From 580a690208321ed45addef5ef12e25b87f9f5dec Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 11 Jul 2008 00:03:44 +0200 Subject: via-velocity: remove the bounce buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Executive summary: the bounce buffers are in my way - they use something like a 64 * 1500 bytes area of PCI consistent area - they are not resized when the MTU changes - they are used - to hand-pad undersized packets. skb_pad anyone ? - to linearize fragmented skbs whose fragment count goes beyond the 7 fragments hardware limit in order to claim scatter-gather support Actually the SG code is commented out and I wonder if it could not be implemented (ab-)using the large send feature of the chipset since the latter should support some multi-descriptor packet transmitting. Signed-off-by: Francois Romieu Fixed-by: Séguier Régis Signed-off-by: Jeff Garzik --- drivers/net/via-velocity.c | 72 ++++++++++++---------------------------------- drivers/net/via-velocity.h | 5 ---- 2 files changed, 18 insertions(+), 59 deletions(-) (limited to 'drivers') diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index bcbf2fa9b94a..fce2dfd0e9e6 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1104,7 +1104,6 @@ static int velocity_init_rings(struct velocity_info *vptr) { int i; unsigned int psize; - unsigned int tsize; dma_addr_t pool_dma; u8 *pool; @@ -1133,19 +1132,6 @@ static int velocity_init_rings(struct velocity_info *vptr) vptr->rd_pool_dma = pool_dma; - tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; - vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize, - &vptr->tx_bufs_dma); - - if (vptr->tx_bufs == NULL) { - printk(KERN_ERR "%s: DMA memory allocation failed.\n", - vptr->dev->name); - pci_free_consistent(vptr->pdev, psize, pool, pool_dma); - return -ENOMEM; - } - - memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq); - i = vptr->options.numrx * sizeof(struct rx_desc); pool += i; pool_dma += i; @@ -1169,16 +1155,10 @@ static int velocity_init_rings(struct velocity_info *vptr) static void velocity_free_rings(struct velocity_info *vptr) { - int size; - - size = vptr->options.numrx * sizeof(struct rx_desc) + - vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; + const int size = vptr->options.numrx * sizeof(struct rx_desc) + + vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); - - size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; - - pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma); } static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) @@ -1313,10 +1293,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) static int velocity_init_td_ring(struct velocity_info *vptr) { - int i, j; dma_addr_t curr; - struct tx_desc *td; - struct velocity_td_info *td_info; + unsigned int j; /* Init the TD ring entries */ for (j = 0; j < vptr->num_txq; j++) { @@ -1331,14 +1309,6 @@ static int velocity_init_td_ring(struct velocity_info *vptr) return -ENOMEM; } - for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) { - td = &(vptr->td_rings[j][i]); - td_info = &(vptr->td_infos[j][i]); - td_info->buf = vptr->tx_bufs + - (j * vptr->options.numtx + i) * PKT_BUF_SZ; - td_info->buf_dma = vptr->tx_bufs_dma + - (j * vptr->options.numtx + i) * PKT_BUF_SZ; - } vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; } return 0; @@ -1867,7 +1837,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ /* * Don't unmap the pre-allocated tx_bufs */ - if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) { + if (tdinfo->skb_dma) { for (i = 0; i < tdinfo->nskb_dma; i++) { #ifdef VELOCITY_ZERO_COPY_SUPPORT @@ -2063,9 +2033,19 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) struct tx_desc *td_ptr; struct velocity_td_info *tdinfo; unsigned long flags; - int index; int pktlen = skb->len; - __le16 len = cpu_to_le16(pktlen); + __le16 len; + int index; + + + + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + goto out; + pktlen = ETH_ZLEN; + } + + len = cpu_to_le16(pktlen); #ifdef VELOCITY_ZERO_COPY_SUPPORT if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { @@ -2083,23 +2063,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) td_ptr->tdesc1.TCR = TCR0_TIC; td_ptr->td_buf[0].size &= ~TD_QUEUE; - /* - * Pad short frames. - */ - if (pktlen < ETH_ZLEN) { - /* Cannot occur until ZC support */ - pktlen = ETH_ZLEN; - len = cpu_to_le16(ETH_ZLEN); - skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); - memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); - tdinfo->skb = skb; - tdinfo->skb_dma[0] = tdinfo->buf_dma; - td_ptr->tdesc0.len = len; - td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); - td_ptr->td_buf[0].pa_high = 0; - td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ - tdinfo->nskb_dma = 1; - } else #ifdef VELOCITY_ZERO_COPY_SUPPORT if (skb_shinfo(skb)->nr_frags > 0) { int nfrags = skb_shinfo(skb)->nr_frags; @@ -2191,7 +2154,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) } dev->trans_start = jiffies; spin_unlock_irqrestore(&vptr->lock, flags); - return 0; +out: + return NETDEV_TX_OK; } /** diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index 7387be4f428d..86446147284c 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -236,10 +236,8 @@ struct velocity_rd_info { struct velocity_td_info { struct sk_buff *skb; - u8 *buf; int nskb_dma; dma_addr_t skb_dma[7]; - dma_addr_t buf_dma; }; enum velocity_owner { @@ -1506,9 +1504,6 @@ struct velocity_info { dma_addr_t rd_pool_dma; dma_addr_t td_pool_dma[TX_QUEUE_NO]; - dma_addr_t tx_bufs_dma; - u8 *tx_bufs; - struct vlan_group *vlgrp; u8 ip_addr[4]; enum chip_type chip_id; -- cgit v1.2.1 From 8ac53afccf7ab383fd97db8910117ae7892c72a7 Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 11 Jul 2008 00:04:33 +0200 Subject: via-velocity: lean and clean velocity_init_rings - PCI consistent areas need no memset - use dev_err instead of plain printk - avoid a few casts Signed-off-by: Francois Romieu Signed-off-by: Jeff Garzik --- drivers/net/via-velocity.c | 48 ++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index fce2dfd0e9e6..86b256cbeaf3 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1102,47 +1102,41 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc static int velocity_init_rings(struct velocity_info *vptr) { - int i; - unsigned int psize; + struct velocity_opt *opt = &vptr->options; + const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); + const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); + struct pci_dev *pdev = vptr->pdev; dma_addr_t pool_dma; - u8 *pool; - - /* - * Allocate all RD/TD rings a single pool - */ - - psize = vptr->options.numrx * sizeof(struct rx_desc) + - vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; + void *pool; + unsigned int i; /* + * Allocate all RD/TD rings a single pool. + * * pci_alloc_consistent() fulfills the requirement for 64 bytes * alignment */ - pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma); - - if (pool == NULL) { - printk(KERN_ERR "%s : DMA memory allocation failed.\n", - vptr->dev->name); + pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + + rx_ring_size, &pool_dma); + if (!pool) { + dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", + vptr->dev->name); return -ENOMEM; } - memset(pool, 0, psize); - - vptr->rd_ring = (struct rx_desc *) pool; - + vptr->rd_ring = pool; vptr->rd_pool_dma = pool_dma; - i = vptr->options.numrx * sizeof(struct rx_desc); - pool += i; - pool_dma += i; - for (i = 0; i < vptr->num_txq; i++) { - int offset = vptr->options.numtx * sizeof(struct tx_desc); + pool += rx_ring_size; + pool_dma += rx_ring_size; + for (i = 0; i < vptr->num_txq; i++) { + vptr->td_rings[i] = pool; vptr->td_pool_dma[i] = pool_dma; - vptr->td_rings[i] = (struct tx_desc *) pool; - pool += offset; - pool_dma += offset; + pool += tx_ring_size; + pool_dma += tx_ring_size; } + return 0; } -- cgit v1.2.1 From 28133176082d9bcafb5958b8fac80943e51d5eda Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 11 Jul 2008 00:05:17 +0200 Subject: via-velocity: move residual free rx descriptors count register update Updates of the RBRDU have two different meanings depending on their context: 1. the receiving process has not started - the value which is written into the RBRDU register is supposed to be the free rx descriptors count (rounded to a multiple of 4) 2. the receiving process is running - the value increments the count above (sic) The update is currently issued deep inside the rx replenish chain (see velocity_give_many_rx_descs). Let's propagate enough information to the caller so that the rx replenish functions do not depend on hardware any more. It is needed to perform the Rx/Tx buffers housekeeping when MTU changes. Signed-off-by: Francois Romieu Signed-off-by: Jeff Garzik --- drivers/net/via-velocity.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 86b256cbeaf3..086d69c19920 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1155,7 +1155,7 @@ static void velocity_free_rings(struct velocity_info *vptr) pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); } -static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) +static void velocity_give_many_rx_descs(struct velocity_info *vptr) { struct mac_regs __iomem *regs = vptr->mac_regs; int avail, dirty, unusable; @@ -1182,7 +1182,7 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) static int velocity_rx_refill(struct velocity_info *vptr) { - int dirty = vptr->rd_dirty, done = 0, ret = 0; + int dirty = vptr->rd_dirty, done = 0; do { struct rx_desc *rd = vptr->rd_ring + dirty; @@ -1192,8 +1192,7 @@ static int velocity_rx_refill(struct velocity_info *vptr) break; if (!vptr->rd_info[dirty].skb) { - ret = velocity_alloc_rx_buf(vptr, dirty); - if (ret < 0) + if (velocity_alloc_rx_buf(vptr, dirty) < 0) break; } done++; @@ -1203,10 +1202,9 @@ static int velocity_rx_refill(struct velocity_info *vptr) if (done) { vptr->rd_dirty = dirty; vptr->rd_filled += done; - velocity_give_many_rx_descs(vptr); } - return ret; + return done; } /** @@ -1219,25 +1217,27 @@ static int velocity_rx_refill(struct velocity_info *vptr) static int velocity_init_rd_ring(struct velocity_info *vptr) { - int ret; int mtu = vptr->dev->mtu; + int ret = -ENOMEM; vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; vptr->rd_info = kcalloc(vptr->options.numrx, sizeof(struct velocity_rd_info), GFP_KERNEL); if (!vptr->rd_info) - return -ENOMEM; + goto out; vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; - ret = velocity_rx_refill(vptr); - if (ret < 0) { + if (velocity_rx_refill(vptr) != vptr->options.numrx) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: failed to allocate RX buffer.\n", vptr->dev->name); velocity_free_rd_ring(vptr); + goto out; } + ret = 0; +out: return ret; } @@ -1412,10 +1412,8 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) vptr->rd_curr = rd_curr; - if (works > 0 && velocity_rx_refill(vptr) < 0) { - VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR - "%s: rx buf allocation failure\n", vptr->dev->name); - } + if ((works > 0) && (velocity_rx_refill(vptr) > 0)) + velocity_give_many_rx_descs(vptr); VAR_USED(stats); return works; @@ -1877,6 +1875,8 @@ static int velocity_open(struct net_device *dev) /* Ensure chip is running */ pci_set_power_state(vptr->pdev, PCI_D0); + velocity_give_many_rx_descs(vptr); + velocity_init_registers(vptr, VELOCITY_INIT_COLD); ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, -- cgit v1.2.1 From 9088d9a4267ff098d5edc3dad8359c92741b7a6a Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 11 Jul 2008 00:05:57 +0200 Subject: via-velocity: add velocity_set_rxbufsize helper It removes a dependancy from velocity_init_rd_ring to dev->mtu. Signed-off-by: Francois Romieu Signed-off-by: Jeff Garzik --- drivers/net/via-velocity.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 086d69c19920..370ce30f2f45 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1207,6 +1207,11 @@ static int velocity_rx_refill(struct velocity_info *vptr) return done; } +static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) +{ + vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; +} + /** * velocity_init_rd_ring - set up receive ring * @vptr: velocity to configure @@ -1217,11 +1222,8 @@ static int velocity_rx_refill(struct velocity_info *vptr) static int velocity_init_rd_ring(struct velocity_info *vptr) { - int mtu = vptr->dev->mtu; int ret = -ENOMEM; - vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; - vptr->rd_info = kcalloc(vptr->options.numrx, sizeof(struct velocity_rd_info), GFP_KERNEL); if (!vptr->rd_info) @@ -1860,6 +1862,8 @@ static int velocity_open(struct net_device *dev) struct velocity_info *vptr = netdev_priv(dev); int ret; + velocity_set_rxbufsize(vptr, dev->mtu); + ret = velocity_init_rings(vptr); if (ret < 0) goto out; @@ -1941,6 +1945,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; + velocity_set_rxbufsize(vptr, new_mtu); + ret = velocity_init_rd_ring(vptr); if (ret < 0) goto out_unlock; -- cgit v1.2.1 From 3f78d88575d99e17218a5bb2d79e251a781d16ad Mon Sep 17 00:00:00 2001 From: Sreenivasa Honnur Date: Wed, 9 Jul 2008 23:47:46 -0400 Subject: S2io: Fix IOMMU overflow checking. - Fix IOMMU overflow checking. As reported by Andi Kleen removed check for zero dma address. Signed-off-by: Santosh Rastapur Signed-off-by: Ramkrishna Vepa Signed-off-by: Jeff Garzik --- drivers/net/s2io.c | 41 ++++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 51a91154125d..685030bb1d5e 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -2501,6 +2501,9 @@ static void stop_nic(struct s2io_nic *nic) /** * fill_rx_buffers - Allocates the Rx side skbs * @ring_info: per ring structure + * @from_card_up: If this is true, we will map the buffer to get + * the dma address for buf0 and buf1 to give it to the card. + * Else we will sync the already mapped buffer to give it to the card. * Description: * The function allocates Rx side skbs and puts the physical * address of these buffers into the RxD buffer pointers, so that the NIC @@ -2518,7 +2521,7 @@ static void stop_nic(struct s2io_nic *nic) * SUCCESS on success or an appropriate -ve value on failure. */ -static int fill_rx_buffers(struct ring_info *ring) +static int fill_rx_buffers(struct ring_info *ring, int from_card_up) { struct sk_buff *skb; struct RxD_t *rxdp; @@ -2637,17 +2640,16 @@ static int fill_rx_buffers(struct ring_info *ring) skb->data = (void *) (unsigned long)tmp; skb_reset_tail_pointer(skb); - /* AK: check is wrong. 0 can be valid dma address */ - if (!(rxdp3->Buffer0_ptr)) + if (from_card_up) { rxdp3->Buffer0_ptr = pci_map_single(ring->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); - else + if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) + goto pci_map_failed; + } else pci_dma_sync_single_for_device(ring->pdev, (dma_addr_t) rxdp3->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) - goto pci_map_failed; rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); if (ring->rxd_mode == RXD_MODE_3B) { @@ -2664,21 +2666,22 @@ static int fill_rx_buffers(struct ring_info *ring) if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) goto pci_map_failed; - /* AK: check is wrong */ - if (!rxdp3->Buffer1_ptr) + if (from_card_up) { rxdp3->Buffer1_ptr = pci_map_single(ring->pdev, ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { - pci_unmap_single - (ring->pdev, - (dma_addr_t)(unsigned long) - skb->data, - ring->mtu + 4, - PCI_DMA_FROMDEVICE); - goto pci_map_failed; + if (pci_dma_mapping_error + (rxdp3->Buffer1_ptr)) { + pci_unmap_single + (ring->pdev, + (dma_addr_t)(unsigned long) + skb->data, + ring->mtu + 4, + PCI_DMA_FROMDEVICE); + goto pci_map_failed; + } } rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); rxdp->Control_2 |= SET_BUFFER2_SIZE_3 @@ -2813,7 +2816,7 @@ static void free_rx_buffers(struct s2io_nic *sp) static int s2io_chk_rx_buffers(struct ring_info *ring) { - if (fill_rx_buffers(ring) == -ENOMEM) { + if (fill_rx_buffers(ring, 0) == -ENOMEM) { DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); } @@ -2944,7 +2947,7 @@ static void s2io_netpoll(struct net_device *dev) rx_intr_handler(&mac_control->rings[i], 0); for (i = 0; i < config->rx_ring_num; i++) { - if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { + if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); break; @@ -7183,7 +7186,7 @@ static int s2io_card_up(struct s2io_nic * sp) for (i = 0; i < config->rx_ring_num; i++) { mac_control->rings[i].mtu = dev->mtu; - ret = fill_rx_buffers(&mac_control->rings[i]); + ret = fill_rx_buffers(&mac_control->rings[i], 1); if (ret) { DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", dev->name); -- cgit v1.2.1 From 01e16faa0691c57fd8f9bac46b1953ff7692ab8a Mon Sep 17 00:00:00 2001 From: Sreenivasa Honnur Date: Wed, 9 Jul 2008 23:49:21 -0400 Subject: S2io: Enable msi-x link interrupts. - Enable msi-x link interrupts because the timer based scheduler was getting cancelled causing the link state to be lost with repetitive card up/downs when changing the mtu. - Unmask mac_rmac_link interrupts only for Xframe I and prevent a spurious link interrupt in Xframe II. - Stop the tx queue and indicate link down when card is down Signed-off-by: Santosh Rastapur Signed-off-by: Ramkrishna Vepa Signed-off-by: Jeff Garzik --- drivers/net/s2io.c | 47 ++++++++++++++++++++++++++++++----------------- drivers/net/s2io.h | 1 + 2 files changed, 31 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 685030bb1d5e..7f89b433bfc3 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -1891,8 +1891,6 @@ static int init_nic(struct s2io_nic *nic) static int s2io_link_fault_indication(struct s2io_nic *nic) { - if (nic->config.intr_type != INTA) - return MAC_RMAC_ERR_TIMER; if (nic->device_type == XFRAME_II_DEVICE) return LINK_UP_DOWN_INTERRUPT; else @@ -1925,7 +1923,9 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) { struct XENA_dev_config __iomem *bar0 = nic->bar0; register u64 gen_int_mask = 0; + u64 interruptible; + writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask); if (mask & TX_DMA_INTR) { gen_int_mask |= TXDMA_INT_M; @@ -2015,10 +2015,12 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) gen_int_mask |= RXMAC_INT_M; do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, &bar0->mac_int_mask); - do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | + interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | - RMAC_DOUBLE_ECC_ERR | - RMAC_LINK_STATE_CHANGE_INT, + RMAC_DOUBLE_ECC_ERR; + if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) + interruptible |= RMAC_LINK_STATE_CHANGE_INT; + do_s2io_write_bits(interruptible, flag, &bar0->mac_rmac_err_mask); } @@ -4376,18 +4378,24 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) /* Nothing much can be done. Get out */ return IRQ_HANDLED; - writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); + if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) { + writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); - if (reason & GEN_INTR_TXTRAFFIC) - writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); + if (reason & GEN_INTR_TXPIC) + s2io_txpic_intr_handle(sp); - for (i = 0; i < config->tx_fifo_num; i++) - tx_intr_handler(&fifos[i]); + if (reason & GEN_INTR_TXTRAFFIC) + writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); - writeq(sp->general_int_mask, &bar0->general_int_mask); - readl(&bar0->general_int_status); + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&fifos[i]); - return IRQ_HANDLED; + writeq(sp->general_int_mask, &bar0->general_int_mask); + readl(&bar0->general_int_status); + return IRQ_HANDLED; + } + /* The interrupt was not raised by us */ + return IRQ_NONE; } static void s2io_txpic_intr_handle(struct s2io_nic *sp) @@ -7115,6 +7123,9 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) s2io_rem_isr(sp); + /* stop the tx queue, indicate link down */ + s2io_link(sp, LINK_DOWN); + /* Check if the device is Quiescent and then Reset the NIC */ while(do_io) { /* As per the HW requirement we need to replenish the @@ -7247,17 +7258,19 @@ static int s2io_card_up(struct s2io_nic * sp) S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); + set_bit(__S2IO_STATE_CARD_UP, &sp->state); + /* Enable select interrupts */ en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); - if (sp->config.intr_type != INTA) - en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); - else { + if (sp->config.intr_type != INTA) { + interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR; + en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); + } else { interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; interruptible |= TX_PIC_INTR; en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); } - set_bit(__S2IO_STATE_CARD_UP, &sp->state); return 0; } diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 483b17c34ae8..6722a2f7d091 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -1107,6 +1107,7 @@ static int init_shared_mem(struct s2io_nic *sp); static void free_shared_mem(struct s2io_nic *sp); static int init_nic(struct s2io_nic *nic); static int rx_intr_handler(struct ring_info *ring_data, int budget); +static void s2io_txpic_intr_handle(struct s2io_nic *sp); static void tx_intr_handler(struct fifo_info *fifo_data); static void s2io_handle_errors(void * dev_id); -- cgit v1.2.1 From 29d0a2b0f7b5f53c79095a5539ee3884afb354b6 Mon Sep 17 00:00:00 2001 From: Sreenivasa Honnur Date: Wed, 9 Jul 2008 23:50:13 -0400 Subject: S2io: Version update for IOMMU overflow checking and enable msi-x link interrupts patches. - Updated version number Signed-off-by: Santosh Rastapur Signed-off-by: Ramkrishna Vepa Signed-off-by: Jeff Garzik --- drivers/net/s2io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 7f89b433bfc3..517425dcb77c 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -86,7 +86,7 @@ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.26.24" +#define DRV_VERSION "2.0.26.25" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; -- cgit v1.2.1 From 4422b00390749f8b877b2838a99ef2948ae08a58 Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 11 Jul 2008 00:29:19 +0200 Subject: cxgb: delete non NAPI code from the driver. Compile-tested only. Signed-off-by: Francois Romieu Signed-off-by: Jeff Garzik --- drivers/net/Kconfig | 8 ------ drivers/net/chelsio/cxgb2.c | 2 -- drivers/net/chelsio/sge.c | 70 ++++----------------------------------------- 3 files changed, 5 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 4675c1bd6fb9..50ca1cf1271e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2378,14 +2378,6 @@ config CHELSIO_T1_1G Enables support for Chelsio's gigabit Ethernet PCI cards. If you are using only 10G cards say 'N' here. -config CHELSIO_T1_NAPI - bool "Use Rx Polling (NAPI)" - depends on CHELSIO_T1 - default y - help - NAPI is a driver API designed to reduce CPU and interrupt load - when the driver is receiving lots of packets from the card. - config CHELSIO_T3 tristate "Chelsio Communications T3 10Gb Ethernet support" depends on PCI && INET diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index a509337eab2d..638c9a27a7a6 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c @@ -1153,9 +1153,7 @@ static int __devinit init_one(struct pci_dev *pdev, #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = t1_netpoll; #endif -#ifdef CONFIG_CHELSIO_T1_NAPI netif_napi_add(netdev, &adapter->napi, t1_poll, 64); -#endif SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); } diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 8a7efd38e95b..d6c7d2aa761b 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) if (unlikely(adapter->vlan_grp && p->vlan_valid)) { st->vlan_xtract++; -#ifdef CONFIG_CHELSIO_T1_NAPI - vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, - ntohs(p->vlan)); -#else - vlan_hwaccel_rx(skb, adapter->vlan_grp, - ntohs(p->vlan)); -#endif - } else { -#ifdef CONFIG_CHELSIO_T1_NAPI + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, + ntohs(p->vlan)); + } else netif_receive_skb(skb); -#else - netif_rx(skb); -#endif - } } /* @@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter) return (e->GenerationBit == Q->genbit); } -#ifdef CONFIG_CHELSIO_T1_NAPI /* * A simpler version of process_responses() that handles only pure (i.e., * non data-carrying) responses. Such respones are too light-weight to justify @@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget) return work_done; } -/* - * NAPI version of the main interrupt handler. - */ irqreturn_t t1_interrupt(int irq, void *data) { struct adapter *adapter = data; @@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data) else { /* no data, no NAPI needed */ writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); - napi_enable(&adapter->napi); /* undo schedule_prep */ + /* undo schedule_prep */ + napi_enable(&adapter->napi); } } return IRQ_HANDLED; @@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data) return IRQ_RETVAL(handled != 0); } -#else -/* - * Main interrupt handler, optimized assuming that we took a 'DATA' - * interrupt. - * - * 1. Clear the interrupt - * 2. Loop while we find valid descriptors and process them; accumulate - * information that can be processed after the loop - * 3. Tell the SGE at which index we stopped processing descriptors - * 4. Bookkeeping; free TX buffers, ring doorbell if there are any - * outstanding TX buffers waiting, replenish RX buffers, potentially - * reenable upper layers if they were turned off due to lack of TX - * resources which are available again. - * 5. If we took an interrupt, but no valid respQ descriptors was found we - * let the slow_intr_handler run and do error handling. - */ -irqreturn_t t1_interrupt(int irq, void *cookie) -{ - int work_done; - struct adapter *adapter = cookie; - struct respQ *Q = &adapter->sge->respQ; - - spin_lock(&adapter->async_lock); - - writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); - - if (likely(responses_pending(adapter))) - work_done = process_responses(adapter, -1); - else - work_done = t1_slow_intr_handler(adapter); - - /* - * The unconditional clearing of the PL_CAUSE above may have raced - * with DMA completion and the corresponding generation of a response - * to cause us to miss the resulting data interrupt. The next write - * is also unconditional to recover the missed interrupt and render - * this race harmless. - */ - writel(Q->cidx, adapter->regs + A_SG_SLEEPING); - - if (!work_done) - adapter->sge->stats.unhandled_irqs++; - spin_unlock(&adapter->async_lock); - return IRQ_RETVAL(work_done != 0); -} -#endif - /* * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. * -- cgit v1.2.1 From 32b0f53e5bc80b87fd20d4d78a0e0cb602c9157a Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 11 Jul 2008 00:30:14 +0200 Subject: via-rhine: delete non NAPI code from the driver. Compile-tested only. Signed-off-by: Francois Romieu Signed-off-by: Jeff Garzik --- drivers/net/Kconfig | 11 ----------- drivers/net/via-rhine.c | 27 ++------------------------- 2 files changed, 2 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 50ca1cf1271e..fb618b6e88eb 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1694,17 +1694,6 @@ config VIA_RHINE_MMIO If unsure, say Y. -config VIA_RHINE_NAPI - bool "Use Rx Polling (NAPI)" - depends on VIA_RHINE - help - NAPI is a new driver API designed to reduce CPU and interrupt load - when the driver is receiving lots of packets from the card. - - If your estimated Rx load is 10kpps or more, or if the card will be - deployed on potentially unfriendly networks (e.g. in a firewall), - then say Y here. - config LAN_SAA9730 bool "Philips SAA9730 Ethernet support" depends on NET_PCI && PCI && MIPS_ATLAS diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 8c9d6ae2bb31..96dff04334b8 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -73,12 +73,7 @@ static const int multicast_filter_limit = 32; There are no ill effects from too-large receive rings. */ #define TX_RING_SIZE 16 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ -#ifdef CONFIG_VIA_RHINE_NAPI #define RX_RING_SIZE 64 -#else -#define RX_RING_SIZE 16 -#endif - /* Operational parameters that usually are not changed. */ @@ -583,7 +578,6 @@ static void rhine_poll(struct net_device *dev) } #endif -#ifdef CONFIG_VIA_RHINE_NAPI static int rhine_napipoll(struct napi_struct *napi, int budget) { struct rhine_private *rp = container_of(napi, struct rhine_private, napi); @@ -604,7 +598,6 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) } return work_done; } -#endif static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr) { @@ -784,9 +777,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rhine_poll; #endif -#ifdef CONFIG_VIA_RHINE_NAPI netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); -#endif + if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; @@ -1056,9 +1048,7 @@ static void init_registers(struct net_device *dev) rhine_set_rx_mode(dev); -#ifdef CONFIG_VIA_RHINE_NAPI napi_enable(&rp->napi); -#endif /* Enable interrupts by setting the interrupt mask. */ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | @@ -1193,9 +1183,7 @@ static void rhine_tx_timeout(struct net_device *dev) /* protect against concurrent rx interrupts */ disable_irq(rp->pdev->irq); -#ifdef CONFIG_VIA_RHINE_NAPI napi_disable(&rp->napi); -#endif spin_lock(&rp->lock); @@ -1319,16 +1307,12 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance) if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { -#ifdef CONFIG_VIA_RHINE_NAPI iowrite16(IntrTxAborted | IntrTxDone | IntrTxError | IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); netif_rx_schedule(dev, &rp->napi); -#else - rhine_rx(dev, RX_RING_SIZE); -#endif } if (intr_status & (IntrTxErrSummary | IntrTxDone)) { @@ -1520,11 +1504,7 @@ static int rhine_rx(struct net_device *dev, int limit) PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans(skb, dev); -#ifdef CONFIG_VIA_RHINE_NAPI netif_receive_skb(skb); -#else - netif_rx(skb); -#endif dev->last_rx = jiffies; rp->stats.rx_bytes += pkt_len; rp->stats.rx_packets++; @@ -1836,9 +1816,7 @@ static int rhine_close(struct net_device *dev) spin_lock_irq(&rp->lock); netif_stop_queue(dev); -#ifdef CONFIG_VIA_RHINE_NAPI napi_disable(&rp->napi); -#endif if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, " @@ -1937,9 +1915,8 @@ static int rhine_suspend(struct pci_dev *pdev, pm_message_t state) if (!netif_running(dev)) return 0; -#ifdef CONFIG_VIA_RHINE_NAPI napi_disable(&rp->napi); -#endif + netif_device_detach(dev); pci_save_state(pdev); -- cgit v1.2.1 From 0aa1538f4e4bab366023f4c414555e4ed25994e6 Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 11 Jul 2008 00:33:52 +0200 Subject: gianfar: delete non NAPI code from the driver. Compile-tested only. Signed-off-by: Francois Romieu Signed-off-by: Jeff Garzik --- drivers/net/Kconfig | 4 --- drivers/net/gianfar.c | 76 ++++----------------------------------------------- drivers/net/gianfar.h | 11 -------- 3 files changed, 6 insertions(+), 85 deletions(-) (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index fb618b6e88eb..1fe7e0792c1f 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2262,10 +2262,6 @@ config GIANFAR This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, and MPC86xx family of chips, and the FEC on the 8540. -config GFAR_NAPI - bool "Use Rx Polling (NAPI)" - depends on GIANFAR - config UCC_GETH tristate "Freescale QE Gigabit Ethernet" depends on QUICC_ENGINE diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 393a0f175302..fa78d6870124 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -44,8 +44,7 @@ * happen immediately, but will wait until either a set number * of frames or amount of time have passed). In NAPI, the * interrupt handler will signal there is work to be done, and - * exit. Without NAPI, the packet(s) will be handled - * immediately. Both methods will start at the last known empty + * exit. This method will start at the last known empty * descriptor, and process every subsequent descriptor until there * are none left with data (NAPI will stop after a set number of * packets to give time to other tasks, but will eventually @@ -101,12 +100,6 @@ #undef BRIEF_GFAR_ERRORS #undef VERBOSE_GFAR_ERRORS -#ifdef CONFIG_GFAR_NAPI -#define RECEIVE(x) netif_receive_skb(x) -#else -#define RECEIVE(x) netif_rx(x) -#endif - const char gfar_driver_name[] = "Gianfar Ethernet"; const char gfar_driver_version[] = "1.3"; @@ -131,9 +124,7 @@ static void free_skb_resources(struct gfar_private *priv); static void gfar_set_multi(struct net_device *dev); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_configure_serdes(struct net_device *dev); -#ifdef CONFIG_GFAR_NAPI static int gfar_poll(struct napi_struct *napi, int budget); -#endif #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); #endif @@ -260,9 +251,7 @@ static int gfar_probe(struct platform_device *pdev) dev->hard_start_xmit = gfar_start_xmit; dev->tx_timeout = gfar_timeout; dev->watchdog_timeo = TX_TIMEOUT; -#ifdef CONFIG_GFAR_NAPI netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); -#endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = gfar_netpoll; #endif @@ -363,11 +352,7 @@ static int gfar_probe(struct platform_device *pdev) /* Even more device info helps when determining which kernel */ /* provided which set of benchmarks. */ -#ifdef CONFIG_GFAR_NAPI printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); -#else - printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); -#endif printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", dev->name, priv->rx_ring_size, priv->tx_ring_size); @@ -945,14 +930,10 @@ tx_skb_fail: /* Returns 0 for success. */ static int gfar_enet_open(struct net_device *dev) { -#ifdef CONFIG_GFAR_NAPI struct gfar_private *priv = netdev_priv(dev); -#endif int err; -#ifdef CONFIG_GFAR_NAPI napi_enable(&priv->napi); -#endif /* Initialize a bunch of registers */ init_registers(dev); @@ -962,17 +943,13 @@ static int gfar_enet_open(struct net_device *dev) err = init_phy(dev); if(err) { -#ifdef CONFIG_GFAR_NAPI napi_disable(&priv->napi); -#endif return err; } err = startup_gfar(dev); if (err) { -#ifdef CONFIG_GFAR_NAPI napi_disable(&priv->napi); -#endif return err; } @@ -1128,9 +1105,7 @@ static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); -#ifdef CONFIG_GFAR_NAPI napi_disable(&priv->napi); -#endif stop_gfar(dev); @@ -1427,14 +1402,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct gfar_private *priv = netdev_priv(dev); -#ifdef CONFIG_GFAR_NAPI u32 tempval; -#else - unsigned long flags; -#endif /* support NAPI */ -#ifdef CONFIG_GFAR_NAPI /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived */ gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); @@ -1451,38 +1421,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id) dev->name, gfar_read(&priv->regs->ievent), gfar_read(&priv->regs->imask)); } -#else - /* Clear IEVENT, so rx interrupt isn't called again - * because of this interrupt */ - gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); - - spin_lock_irqsave(&priv->rxlock, flags); - gfar_clean_rx_ring(dev, priv->rx_ring_size); - - /* If we are coalescing interrupts, update the timer */ - /* Otherwise, clear it */ - if (likely(priv->rxcoalescing)) { - gfar_write(&priv->regs->rxic, 0); - gfar_write(&priv->regs->rxic, - mk_ic_value(priv->rxcount, priv->rxtime)); - } - - spin_unlock_irqrestore(&priv->rxlock, flags); -#endif return IRQ_HANDLED; } -static inline int gfar_rx_vlan(struct sk_buff *skb, - struct vlan_group *vlgrp, unsigned short vlctl) -{ -#ifdef CONFIG_GFAR_NAPI - return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl); -#else - return vlan_hwaccel_rx(skb, vlgrp, vlctl); -#endif -} - static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) { /* If valid headers were found, and valid sums @@ -1539,10 +1481,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, skb->protocol = eth_type_trans(skb, dev); /* Send the packet up the stack */ - if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) - ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); - else - ret = RECEIVE(skb); + if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) { + ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, + fcb->vlctl); + } else + ret = netif_receive_skb(skb); if (NET_RX_DROP == ret) priv->extra_stats.kernel_dropped++; @@ -1629,7 +1572,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) return howmany; } -#ifdef CONFIG_GFAR_NAPI static int gfar_poll(struct napi_struct *napi, int budget) { struct gfar_private *priv = container_of(napi, struct gfar_private, napi); @@ -1664,7 +1606,6 @@ static int gfar_poll(struct napi_struct *napi, int budget) return howmany; } -#endif #ifdef CONFIG_NET_POLL_CONTROLLER /* @@ -2003,11 +1944,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id) gfar_receive(irq, dev_id); -#ifndef CONFIG_GFAR_NAPI - /* Clear the halt bit in RSTAT */ - gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); -#endif - if (netif_msg_rx_err(priv)) printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", dev->name, gfar_read(&priv->regs->rstat)); diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 27f37c81e52c..bead71cb2b16 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -77,13 +77,8 @@ extern const char gfar_driver_name[]; extern const char gfar_driver_version[]; /* These need to be powers of 2 for this driver */ -#ifdef CONFIG_GFAR_NAPI #define DEFAULT_TX_RING_SIZE 256 #define DEFAULT_RX_RING_SIZE 256 -#else -#define DEFAULT_TX_RING_SIZE 64 -#define DEFAULT_RX_RING_SIZE 64 -#endif #define GFAR_RX_MAX_RING_SIZE 256 #define GFAR_TX_MAX_RING_SIZE 256 @@ -128,14 +123,8 @@ extern const char gfar_driver_version[]; #define DEFAULT_RXTIME 21 -/* Non NAPI Case */ -#ifndef CONFIG_GFAR_NAPI -#define DEFAULT_RX_COALESCE 1 -#define DEFAULT_RXCOUNT 16 -#else #define DEFAULT_RX_COALESCE 0 #define DEFAULT_RXCOUNT 0 -#endif /* CONFIG_GFAR_NAPI */ #define MIIMCFG_INIT_VALUE 0x00000007 #define MIIMCFG_RESET 0x80000000 -- cgit v1.2.1 From 1a342d224afb03196e3df28a271f3ddf3787e8f4 Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 11 Jul 2008 00:34:40 +0200 Subject: ucc_geth: delete non NAPI code from the driver. Signed-off-by: Francois Romieu Signed-off-by: Jeff Garzik --- drivers/net/Kconfig | 4 ---- drivers/net/ucc_geth.c | 29 ++--------------------------- 2 files changed, 2 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 1fe7e0792c1f..9b98714889bf 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2270,10 +2270,6 @@ config UCC_GETH This driver supports the Gigabit Ethernet mode of the QUICC Engine, which is available on some Freescale SOCs. -config UGETH_NAPI - bool "Use Rx Polling (NAPI)" - depends on UCC_GETH - config UGETH_MAGIC_PACKET bool "Magic Packet detection support" depends on UCC_GETH diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index fb0b918e5ccb..07933f71b86d 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -3500,11 +3500,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit dev->stats.rx_bytes += length; /* Send the packet up the stack */ -#ifdef CONFIG_UGETH_NAPI netif_receive_skb(skb); -#else - netif_rx(skb); -#endif /* CONFIG_UGETH_NAPI */ } ugeth->dev->last_rx = jiffies; @@ -3580,7 +3576,6 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) return 0; } -#ifdef CONFIG_UGETH_NAPI static int ucc_geth_poll(struct napi_struct *napi, int budget) { struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); @@ -3607,7 +3602,6 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) return howmany; } -#endif /* CONFIG_UGETH_NAPI */ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) { @@ -3617,9 +3611,6 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) struct ucc_geth_info *ug_info; register u32 ucce; register u32 uccm; -#ifndef CONFIG_UGETH_NAPI - register u32 rx_mask; -#endif register u32 tx_mask; u8 i; @@ -3636,21 +3627,11 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) /* check for receive events that require processing */ if (ucce & UCCE_RX_EVENTS) { -#ifdef CONFIG_UGETH_NAPI if (netif_rx_schedule_prep(dev, &ugeth->napi)) { uccm &= ~UCCE_RX_EVENTS; out_be32(uccf->p_uccm, uccm); __netif_rx_schedule(dev, &ugeth->napi); } -#else - rx_mask = UCCE_RXBF_SINGLE_MASK; - for (i = 0; i < ug_info->numQueuesRx; i++) { - if (ucce & rx_mask) - ucc_geth_rx(ugeth, i, (int)ugeth->ug_info->bdRingLenRx[i]); - ucce &= ~rx_mask; - rx_mask <<= 1; - } -#endif /* CONFIG_UGETH_NAPI */ } /* Tx event processing */ @@ -3720,9 +3701,8 @@ static int ucc_geth_open(struct net_device *dev) return err; } -#ifdef CONFIG_UGETH_NAPI napi_enable(&ugeth->napi); -#endif + err = ucc_geth_startup(ugeth); if (err) { if (netif_msg_ifup(ugeth)) @@ -3783,9 +3763,8 @@ static int ucc_geth_open(struct net_device *dev) return err; out_err: -#ifdef CONFIG_UGETH_NAPI napi_disable(&ugeth->napi); -#endif + return err; } @@ -3796,9 +3775,7 @@ static int ucc_geth_close(struct net_device *dev) ugeth_vdbg("%s: IN", __FUNCTION__); -#ifdef CONFIG_UGETH_NAPI napi_disable(&ugeth->napi); -#endif ucc_geth_stop(ugeth); @@ -4050,9 +4027,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma dev->hard_start_xmit = ucc_geth_start_xmit; dev->tx_timeout = ucc_geth_timeout; dev->watchdog_timeo = TX_TIMEOUT; -#ifdef CONFIG_UGETH_NAPI netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); -#endif /* CONFIG_UGETH_NAPI */ #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = ucc_netpoll; #endif -- cgit v1.2.1 From 0f8ecbadae4bd9f085e605c08347ed3077a6146f Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:07 -0700 Subject: ixgb: maybe stop tx port missed a piece back when maybe stop tx was added to the ixgb driver some mistakes were made and the driver a) didn't remove the tx lock, which is now un-necessary b) didn't change the restart code to be compliant with maybe_stop Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 41f3adf5f375..04cac41f7d5e 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1855,12 +1855,17 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) tx_ring->next_to_clean = i; - if (unlikely(netif_queue_stopped(netdev))) { - spin_lock(&adapter->tx_lock); - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && - (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) + if (unlikely(cleaned && netif_carrier_ok(netdev) && + IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__IXGB_DOWN, &adapter->flags))) { netif_wake_queue(netdev); - spin_unlock(&adapter->tx_lock); + ++adapter->restart_queue; + } } if(adapter->detect_tx_hung) { -- cgit v1.2.1 From e539e4667e3c8125641f5916eb0b7d087d3e0844 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:12 -0700 Subject: ixgb: repeat 32 bit ioremap cleanup this patch has been made to many other drivers in kernel to fix the storage of 64 bit resources in 32 bit variables. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 04cac41f7d5e..652c0eab3a4a 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -363,8 +363,6 @@ ixgb_probe(struct pci_dev *pdev, struct net_device *netdev = NULL; struct ixgb_adapter *adapter; static int cards_found = 0; - unsigned long mmio_start; - int mmio_len; int pci_using_dac; int i; int err; @@ -405,11 +403,9 @@ ixgb_probe(struct pci_dev *pdev, adapter->hw.back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); - mmio_start = pci_resource_start(pdev, BAR_0); - mmio_len = pci_resource_len(pdev, BAR_0); - - adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); - if(!adapter->hw.hw_addr) { + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0), + pci_resource_len(pdev, BAR_0)); + if (!adapter->hw.hw_addr) { err = -EIO; goto err_ioremap; } @@ -444,9 +440,6 @@ ixgb_probe(struct pci_dev *pdev, #endif strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - netdev->mem_start = mmio_start; - netdev->mem_end = mmio_start + mmio_len; - netdev->base_addr = adapter->hw.io_base; adapter->bd_number = cards_found; adapter->link_speed = 0; -- cgit v1.2.1 From 4360386f7d849f521e8ef042f90dbca73e07509c Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:17 -0700 Subject: ixgb: fix bug in descriptor ring due to prefetch corruption there was one more bug hidden in the prefetch routines in ixgb hardware that force us to remove it completely. Writebacks were being done on descriptors with stale data due to internal hardware fifo corruption. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 652c0eab3a4a..ecd5252c4bf4 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -146,14 +146,6 @@ static int debug = DEFAULT_DEBUG_LEVEL_SHIFT; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -/* some defines for controlling descriptor fetches in h/w */ -#define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */ -#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below - * this */ -#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail - * is pushed this many descriptors - * from head */ - /** * ixgb_init_module - Driver Registration Routine * @@ -839,7 +831,6 @@ ixgb_configure_rx(struct ixgb_adapter *adapter) struct ixgb_hw *hw = &adapter->hw; u32 rctl; u32 rxcsum; - u32 rxdctl; /* make sure receives are disabled while setting up the descriptors */ @@ -861,18 +852,12 @@ ixgb_configure_rx(struct ixgb_adapter *adapter) IXGB_WRITE_REG(hw, RDH, 0); IXGB_WRITE_REG(hw, RDT, 0); - /* set up pre-fetching of receive buffers so we get some before we - * run out (default hardware behavior is to run out before fetching - * more). This sets up to fetch if HTHRESH rx descriptors are avail - * and the descriptors in hw cache are below PTHRESH. This avoids - * the hardware behavior of fetching <=512 descriptors in a single - * burst that pre-empts all other activity, usually causing fifo - * overflows. */ - /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */ - rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT | - RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT | - RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; - IXGB_WRITE_REG(hw, RXDCTL, rxdctl); + /* due to the hardware errata with RXDCTL, we are unable to use any of + * the performance enhancing features of it without causing other + * subtle bugs, some of the bugs could include receive length + * corruption at high data rates (WTHRESH > 0) and/or receive + * descriptor ring irregularites (particularly in hardware cache) */ + IXGB_WRITE_REG(hw, RXDCTL, 0); /* Enable Receive Checksum Offload for TCP and UDP */ if (adapter->rx_csum) { -- cgit v1.2.1 From a3dc3da02a7c29237ca4ce3ec17f0e1f87ff0a06 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:22 -0700 Subject: ixgb: leave room for extra hardware memory usage ixgb hardware (not ixgbe) has a problem where it might dma past the end of a buffer in certain cases. leave 8 bytes extra room. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index ecd5252c4bf4..ec8bce154364 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -560,7 +560,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter) hw->subsystem_id = pdev->subsystem_device; hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; - adapter->rx_buffer_len = hw->max_frame_size; + adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ if((hw->device_id == IXGB_DEVICE_ID_82597EX) || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) @@ -1573,7 +1573,7 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } - adapter->rx_buffer_len = max_frame; + adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */ netdev->mtu = new_mtu; -- cgit v1.2.1 From 72ab51954d108e4999b3c958ae21da731d2d789a Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:27 -0700 Subject: ixgb: check down state before enable irq in order to prevent the case where poll_disable is waiting on our device to permanently, check the flag to make sure we're not down or closing down before re-enabling interrupts. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index ec8bce154364..60cad65dbc2e 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1782,7 +1782,8 @@ ixgb_clean(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { netif_rx_complete(netdev, napi); - ixgb_irq_enable(adapter); + if (!test_bit(__IXGB_DOWN, &adapter->flags)) + ixgb_irq_enable(adapter); } return work_done; -- cgit v1.2.1 From a65a604a01f710defe01f2322e108f4d8c20f6ce Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:32 -0700 Subject: ixgb: don't allow too small MTU Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 60cad65dbc2e..0e72dadf8b70 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1566,9 +1566,9 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu) int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; - - if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) - || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { + /* MTU < 68 is an error for IPv4 traffic, just don't allow it */ + if ((new_mtu < 68) || + (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu); return -EINVAL; } -- cgit v1.2.1 From 34336635467c5102777ea8acf34fc8bf391f98c0 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:37 -0700 Subject: ixgb: move time stamp set before setting dma pointer a user pointed out that setting variables out of order with respect to the checks we make for tx timeout handling could result in a race where ->dma was set but ->time_stamp was set to the old value. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 0e72dadf8b70..4bf6bbc65822 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1290,12 +1290,12 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, buffer_info->length = size; WARN_ON(buffer_info->dma != 0); + buffer_info->time_stamp = jiffies; buffer_info->dma = pci_map_single(adapter->pdev, skb->data + offset, size, PCI_DMA_TODEVICE); - buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = 0; len -= size; @@ -1322,13 +1322,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, size -= 4; buffer_info->length = size; + buffer_info->time_stamp = jiffies; buffer_info->dma = pci_map_page(adapter->pdev, frag->page, frag->page_offset + offset, size, PCI_DMA_TODEVICE); - buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = 0; len -= size; -- cgit v1.2.1 From b5ca88eb335580dc48ef5ebb3b86719bdb5a4113 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:42 -0700 Subject: ixgb: fix race on rx_buffer_len in mtu change some random coverage testing found that when changing mtu under heavy traffic load, NAPI would use the rx_buffer_len variable after it had been changed by change_mtu. Similar to e1000 bugs found long ago. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 4bf6bbc65822..c3234c451043 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1573,14 +1573,18 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } + if (old_max_frame == max_frame) + return 0; + + if (netif_running(netdev)) + ixgb_down(adapter, true); + adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */ netdev->mtu = new_mtu; - if ((old_max_frame != max_frame) && netif_running(netdev)) { - ixgb_down(adapter, true); + if (netif_running(netdev)) ixgb_up(adapter); - } return 0; } -- cgit v1.2.1 From 1257969724e7d5f878ac05067388ac5c012eb29b Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:47 -0700 Subject: ixgb: fix unload race with timers ixgb needs to call flush scheduled work to flush any timers before unregistering the netdev. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index c3234c451043..79082eb577c6 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -528,6 +528,8 @@ ixgb_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); + flush_scheduled_work(); + unregister_netdev(netdev); iounmap(adapter->hw.hw_addr); -- cgit v1.2.1 From db58294416d5d1446cbf6962a21ad077919d564e Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:51:52 -0700 Subject: ixgb: remove lltx support and update tx routine a) kernel developers suggest LLTX is broken and unsafe to use, remove it. b) remember to pre-stop the queue if we won't have room c) removing lltx means we can remove our tx lock Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb.h | 1 - drivers/net/ixgb/ixgb_main.c | 32 +------------------------------- 2 files changed, 1 insertion(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 16f9c756aa46..3cec7b98d52f 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -157,7 +157,6 @@ struct ixgb_adapter { u32 part_num; u16 link_speed; u16 link_duplex; - spinlock_t tx_lock; struct work_struct tx_timeout_task; struct timer_list blink_timer; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 79082eb577c6..bd08e1992140 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -448,9 +448,6 @@ ixgb_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_TSO; -#ifdef NETIF_F_LLTX - netdev->features |= NETIF_F_LLTX; -#endif if(pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -577,8 +574,6 @@ ixgb_sw_init(struct ixgb_adapter *adapter) /* enable flow control to be programmed */ hw->fc.send_xon = 1; - spin_lock_init(&adapter->tx_lock); - set_bit(__IXGB_DOWN, &adapter->flags); return 0; } @@ -1441,7 +1436,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct ixgb_adapter *adapter = netdev_priv(netdev); unsigned int first; unsigned int tx_flags = 0; - unsigned long flags; int vlan_id = 0; int tso; @@ -1455,26 +1449,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return 0; } -#ifdef NETIF_F_LLTX - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { - /* Collision - tell upper layer to requeue */ - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } -#else - spin_lock_irqsave(&adapter->tx_lock, flags); -#endif - if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, - DESC_NEEDED))) { - netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); + DESC_NEEDED))) return NETDEV_TX_BUSY; - } - -#ifndef NETIF_F_LLTX - spin_unlock_irqrestore(&adapter->tx_lock, flags); -#endif if(adapter->vlgrp && vlan_tx_tag_present(skb)) { tx_flags |= IXGB_TX_FLAGS_VLAN; @@ -1486,9 +1463,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tso = ixgb_tso(adapter, skb); if (tso < 0) { dev_kfree_skb_any(skb); -#ifdef NETIF_F_LLTX - spin_unlock_irqrestore(&adapter->tx_lock, flags); -#endif return NETDEV_TX_OK; } @@ -1502,13 +1476,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) netdev->trans_start = jiffies; -#ifdef NETIF_F_LLTX /* Make sure there is space in the ring for the next send. */ ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); - spin_unlock_irqrestore(&adapter->tx_lock, flags); - -#endif return NETDEV_TX_OK; } -- cgit v1.2.1 From d9fed18bf9090bd0682f3c611c40261a98a2842d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:02 -0700 Subject: ixgb: add copybreak parameter copybreak code was already in the driver, allow the user to turn it off if they don't like it. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index bd08e1992140..4e422c4cbe83 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -40,6 +40,12 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; const char ixgb_driver_version[] = DRV_VERSION; static const char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; +#define IXGB_CB_LENGTH 256 +static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + /* ixgb_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last @@ -1976,8 +1982,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) /* code added for copybreak, this should improve * performance for small packets with large amounts * of reassembly being done in the stack */ -#define IXGB_CB_LENGTH 256 - if (length < IXGB_CB_LENGTH) { + if (length < copybreak) { struct sk_buff *new_skb = netdev_alloc_skb(netdev, length + NET_IP_ALIGN); if (new_skb) { -- cgit v1.2.1 From 7490d71a9245fd59e6cd5732cba4d6b744db581a Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:08 -0700 Subject: ixgb: clean up un-necessary declarations Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 4e422c4cbe83..fc2cf0edb7e5 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -61,7 +61,7 @@ static struct pci_device_id ixgb_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, + {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* required last entry */ @@ -71,16 +71,6 @@ static struct pci_device_id ixgb_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl); /* Local Function Prototypes */ - -int ixgb_up(struct ixgb_adapter *adapter); -void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog); -void ixgb_reset(struct ixgb_adapter *adapter); -int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); -int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); -void ixgb_free_tx_resources(struct ixgb_adapter *adapter); -void ixgb_free_rx_resources(struct ixgb_adapter *adapter); -void ixgb_update_stats(struct ixgb_adapter *adapter); - static int ixgb_init_module(void); static void ixgb_exit_module(void); static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -103,17 +93,18 @@ static irqreturn_t ixgb_intr(int irq, void *data); static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter); #ifdef CONFIG_IXGB_NAPI -static int ixgb_clean(struct napi_struct *napi, int budget); -static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter, - int *work_done, int work_to_do); +static int ixgb_clean(struct napi_struct *, int); +static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int); #else -static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter); +static bool ixgb_clean_rx_irq(struct ixgb_adapter *); #endif -static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); +static void ixgb_alloc_rx_buffers(struct ixgb_adapter *); + static void ixgb_tx_timeout(struct net_device *dev); static void ixgb_tx_timeout_task(struct work_struct *work); + static void ixgb_vlan_rx_register(struct net_device *netdev, - struct vlan_group *grp); + struct vlan_group *grp); static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); static void ixgb_restore_vlan(struct ixgb_adapter *adapter); @@ -124,7 +115,7 @@ static void ixgb_netpoll(struct net_device *dev); #endif static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, - enum pci_channel_state state); + enum pci_channel_state state); static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); static void ixgb_io_resume (struct pci_dev *pdev); -- cgit v1.2.1 From 03f83041d836022a17258c2731f6221f248bedcb Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:13 -0700 Subject: ixgb: format all if( to be if ( this patch is trivial but because I want to have everything be nice and tidy I'm updating it. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_ee.c | 8 +-- drivers/net/ixgb/ixgb_ethtool.c | 68 +++++++++---------- drivers/net/ixgb/ixgb_hw.c | 13 ++-- drivers/net/ixgb/ixgb_main.c | 141 ++++++++++++++++++++-------------------- drivers/net/ixgb/ixgb_osdep.h | 2 +- drivers/net/ixgb/ixgb_param.c | 30 ++++----- 6 files changed, 130 insertions(+), 132 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index 2f7ed52c7502..ba41d2a54c81 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -108,7 +108,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, */ eecd_reg &= ~IXGB_EECD_DI; - if(data & mask) + if (data & mask) eecd_reg |= IXGB_EECD_DI; IXGB_WRITE_REG(hw, EECD, eecd_reg); @@ -159,7 +159,7 @@ ixgb_shift_in_bits(struct ixgb_hw *hw) eecd_reg = IXGB_READ_REG(hw, EECD); eecd_reg &= ~(IXGB_EECD_DI); - if(eecd_reg & IXGB_EECD_DO) + if (eecd_reg & IXGB_EECD_DO) data |= 1; ixgb_lower_clock(hw, &eecd_reg); @@ -300,7 +300,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw) for(i = 0; i < 200; i++) { eecd_reg = IXGB_READ_REG(hw, EECD); - if(eecd_reg & IXGB_EECD_DO) + if (eecd_reg & IXGB_EECD_DO) return (true); udelay(50); @@ -331,7 +331,7 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) checksum += ixgb_read_eeprom(hw, i); - if(checksum == (u16) EEPROM_SUM) + if (checksum == (u16) EEPROM_SUM) return (true); else return (false); diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 8464d8a013b0..7c9b35c677f0 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -95,7 +95,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->port = PORT_FIBRE; ecmd->transceiver = XCVR_EXTERNAL; - if(netif_carrier_ok(adapter->netdev)) { + if (netif_carrier_ok(adapter->netdev)) { ecmd->speed = SPEED_10000; ecmd->duplex = DUPLEX_FULL; } else { @@ -122,11 +122,11 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct ixgb_adapter *adapter = netdev_priv(netdev); - if(ecmd->autoneg == AUTONEG_ENABLE || + if (ecmd->autoneg == AUTONEG_ENABLE || ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) return -EINVAL; - if(netif_running(adapter->netdev)) { + if (netif_running(adapter->netdev)) { ixgb_down(adapter, true); ixgb_reset(adapter); ixgb_up(adapter); @@ -146,11 +146,11 @@ ixgb_get_pauseparam(struct net_device *netdev, pause->autoneg = AUTONEG_DISABLE; - if(hw->fc.type == ixgb_fc_rx_pause) + if (hw->fc.type == ixgb_fc_rx_pause) pause->rx_pause = 1; - else if(hw->fc.type == ixgb_fc_tx_pause) + else if (hw->fc.type == ixgb_fc_tx_pause) pause->tx_pause = 1; - else if(hw->fc.type == ixgb_fc_full) { + else if (hw->fc.type == ixgb_fc_full) { pause->rx_pause = 1; pause->tx_pause = 1; } @@ -163,19 +163,19 @@ ixgb_set_pauseparam(struct net_device *netdev, struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_hw *hw = &adapter->hw; - if(pause->autoneg == AUTONEG_ENABLE) + if (pause->autoneg == AUTONEG_ENABLE) return -EINVAL; - if(pause->rx_pause && pause->tx_pause) + if (pause->rx_pause && pause->tx_pause) hw->fc.type = ixgb_fc_full; - else if(pause->rx_pause && !pause->tx_pause) + else if (pause->rx_pause && !pause->tx_pause) hw->fc.type = ixgb_fc_rx_pause; - else if(!pause->rx_pause && pause->tx_pause) + else if (!pause->rx_pause && pause->tx_pause) hw->fc.type = ixgb_fc_tx_pause; - else if(!pause->rx_pause && !pause->tx_pause) + else if (!pause->rx_pause && !pause->tx_pause) hw->fc.type = ixgb_fc_none; - if(netif_running(adapter->netdev)) { + if (netif_running(adapter->netdev)) { ixgb_down(adapter, true); ixgb_up(adapter); ixgb_set_speed_duplex(netdev); @@ -200,7 +200,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data) adapter->rx_csum = data; - if(netif_running(netdev)) { + if (netif_running(netdev)) { ixgb_down(adapter, true); ixgb_up(adapter); ixgb_set_speed_duplex(netdev); @@ -229,7 +229,7 @@ ixgb_set_tx_csum(struct net_device *netdev, u32 data) static int ixgb_set_tso(struct net_device *netdev, u32 data) { - if(data) + if (data) netdev->features |= NETIF_F_TSO; else netdev->features &= ~NETIF_F_TSO; @@ -415,7 +415,7 @@ ixgb_get_eeprom(struct net_device *netdev, int i, max_len, first_word, last_word; int ret_val = 0; - if(eeprom->len == 0) { + if (eeprom->len == 0) { ret_val = -EINVAL; goto geeprom_error; } @@ -424,12 +424,12 @@ ixgb_get_eeprom(struct net_device *netdev, max_len = ixgb_get_eeprom_len(netdev); - if(eeprom->offset > eeprom->offset + eeprom->len) { + if (eeprom->offset > eeprom->offset + eeprom->len) { ret_val = -EINVAL; goto geeprom_error; } - if((eeprom->offset + eeprom->len) > max_len) + if ((eeprom->offset + eeprom->len) > max_len) eeprom->len = (max_len - eeprom->offset); first_word = eeprom->offset >> 1; @@ -437,7 +437,7 @@ ixgb_get_eeprom(struct net_device *netdev, eeprom_buff = kmalloc(sizeof(__le16) * (last_word - first_word + 1), GFP_KERNEL); - if(!eeprom_buff) + if (!eeprom_buff) return -ENOMEM; /* note the eeprom was good because the driver loaded */ @@ -464,35 +464,35 @@ ixgb_set_eeprom(struct net_device *netdev, int max_len, first_word, last_word; u16 i; - if(eeprom->len == 0) + if (eeprom->len == 0) return -EINVAL; - if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EFAULT; max_len = ixgb_get_eeprom_len(netdev); - if(eeprom->offset > eeprom->offset + eeprom->len) + if (eeprom->offset > eeprom->offset + eeprom->len) return -EINVAL; - if((eeprom->offset + eeprom->len) > max_len) + if ((eeprom->offset + eeprom->len) > max_len) eeprom->len = (max_len - eeprom->offset); first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_buff = kmalloc(max_len, GFP_KERNEL); - if(!eeprom_buff) + if (!eeprom_buff) return -ENOMEM; ptr = (void *)eeprom_buff; - if(eeprom->offset & 1) { + if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); ptr++; } - if((eeprom->offset + eeprom->len) & 1) { + if ((eeprom->offset + eeprom->len) & 1) { /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ eeprom_buff[last_word - first_word] @@ -504,7 +504,7 @@ ixgb_set_eeprom(struct net_device *netdev, ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); /* Update the checksum over the first part of the EEPROM if needed */ - if(first_word <= EEPROM_CHECKSUM_REG) + if (first_word <= EEPROM_CHECKSUM_REG) ixgb_update_eeprom_checksum(hw); kfree(eeprom_buff); @@ -557,10 +557,10 @@ ixgb_set_ringparam(struct net_device *netdev, tx_old = adapter->tx_ring; rx_old = adapter->rx_ring; - if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - if(netif_running(adapter->netdev)) + if (netif_running(adapter->netdev)) ixgb_down(adapter, true); rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); @@ -571,11 +571,11 @@ ixgb_set_ringparam(struct net_device *netdev, txdr->count = min(txdr->count,(u32)MAX_TXD); txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); - if(netif_running(adapter->netdev)) { + if (netif_running(adapter->netdev)) { /* Try to get new resources before deleting old */ - if((err = ixgb_setup_rx_resources(adapter))) + if ((err = ixgb_setup_rx_resources(adapter))) goto err_setup_rx; - if((err = ixgb_setup_tx_resources(adapter))) + if ((err = ixgb_setup_tx_resources(adapter))) goto err_setup_tx; /* save the new, restore the old in order to free it, @@ -589,7 +589,7 @@ ixgb_set_ringparam(struct net_device *netdev, ixgb_free_tx_resources(adapter); adapter->rx_ring = rx_new; adapter->tx_ring = tx_new; - if((err = ixgb_up(adapter))) + if ((err = ixgb_up(adapter))) return err; ixgb_set_speed_duplex(netdev); } @@ -615,7 +615,7 @@ ixgb_led_blink_callback(unsigned long data) { struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; - if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status)) + if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status)) ixgb_led_off(&adapter->hw); else ixgb_led_on(&adapter->hw); @@ -631,7 +631,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data) if (!data) data = INT_MAX; - if(!adapter->blink_timer.function) { + if (!adapter->blink_timer.function) { init_timer(&adapter->blink_timer); adapter->blink_timer.function = ixgb_led_blink_callback; adapter->blink_timer.data = (unsigned long)adapter; diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 04d2003e24e1..d023fb59bf15 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c @@ -125,7 +125,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw) /* If we are stopped or resetting exit gracefully and wait to be * started again before accessing the hardware. */ - if(hw->adapter_stopped) { + if (hw->adapter_stopped) { DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); return false; } @@ -482,7 +482,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw, /* Place this multicast address in the RAR if there is room, * * else put it in the MTA */ - if(rar_used_count < IXGB_RAR_ENTRIES) { + if (rar_used_count < IXGB_RAR_ENTRIES) { ixgb_rar_set(hw, mc_addr_list + (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)), @@ -719,9 +719,8 @@ ixgb_setup_fc(struct ixgb_hw *hw) /* Write the new settings */ IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); - if (pap_reg != 0) { + if (pap_reg != 0) IXGB_WRITE_REG(hw, PAP, pap_reg); - } /* Set the flow control receive threshold registers. Normally, * these registers will be set to a default threshold that may be @@ -729,14 +728,14 @@ ixgb_setup_fc(struct ixgb_hw *hw) * ability to transmit pause frames in not enabled, then these * registers will be set to 0. */ - if(!(hw->fc.type & ixgb_fc_tx_pause)) { + if (!(hw->fc.type & ixgb_fc_tx_pause)) { IXGB_WRITE_REG(hw, FCRTL, 0); IXGB_WRITE_REG(hw, FCRTH, 0); } else { /* We need to set up the Receive Threshold high and low water * marks as well as (optionally) enabling the transmission of XON * frames. */ - if(hw->fc.send_xon) { + if (hw->fc.send_xon) { IXGB_WRITE_REG(hw, FCRTL, (hw->fc.low_water | IXGB_FCRTL_XONE)); } else { @@ -1007,7 +1006,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw) DEBUGFUNC("ixgb_clear_hw_cntrs"); /* if we are stopped or resetting exit gracefully */ - if(hw->adapter_stopped) { + if (hw->adapter_stopped) { DEBUGOUT("Exiting because the adapter is stopped!!!\n"); return; } diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index fc2cf0edb7e5..f7dda049dd86 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -250,7 +250,7 @@ ixgb_up(struct ixgb_adapter *adapter) return err; } - if((hw->max_frame_size != max_frame) || + if ((hw->max_frame_size != max_frame) || (hw->max_frame_size != (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { @@ -258,11 +258,11 @@ ixgb_up(struct ixgb_adapter *adapter) IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); - if(hw->max_frame_size > + if (hw->max_frame_size > IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); - if(!(ctrl0 & IXGB_CTRL0_JFE)) { + if (!(ctrl0 & IXGB_CTRL0_JFE)) { ctrl0 |= IXGB_CTRL0_JFE; IXGB_WRITE_REG(hw, CTRL0, ctrl0); } @@ -299,7 +299,7 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) if (adapter->have_msi) pci_disable_msi(adapter->pdev); - if(kill_watchdog) + if (kill_watchdog) del_timer_sync(&adapter->watchdog_timer); adapter->link_speed = 0; @@ -356,14 +356,14 @@ ixgb_probe(struct pci_dev *pdev, int i; int err; - if((err = pci_enable_device(pdev))) + if ((err = pci_enable_device(pdev))) return err; - if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && + if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { pci_using_dac = 1; } else { - if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || + if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { printk(KERN_ERR "ixgb: No usable DMA configuration, aborting\n"); @@ -372,13 +372,13 @@ ixgb_probe(struct pci_dev *pdev, pci_using_dac = 0; } - if((err = pci_request_regions(pdev, ixgb_driver_name))) + if ((err = pci_request_regions(pdev, ixgb_driver_name))) goto err_request_regions; pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); - if(!netdev) { + if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } @@ -400,9 +400,9 @@ ixgb_probe(struct pci_dev *pdev, } for(i = BAR_1; i <= BAR_5; i++) { - if(pci_resource_len(pdev, i) == 0) + if (pci_resource_len(pdev, i) == 0) continue; - if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { adapter->hw.io_base = pci_resource_start(pdev, i); break; } @@ -436,7 +436,7 @@ ixgb_probe(struct pci_dev *pdev, /* setup the private structure */ - if((err = ixgb_sw_init(adapter))) + if ((err = ixgb_sw_init(adapter))) goto err_sw_init; netdev->features = NETIF_F_SG | @@ -446,12 +446,12 @@ ixgb_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_TSO; - if(pci_using_dac) + if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; /* make sure the EEPROM is good */ - if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { + if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; @@ -460,7 +460,7 @@ ixgb_probe(struct pci_dev *pdev, ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - if(!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->perm_addr)) { DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; @@ -475,7 +475,7 @@ ixgb_probe(struct pci_dev *pdev, INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); strcpy(netdev->name, "eth%d"); - if((err = register_netdev(netdev))) + if ((err = register_netdev(netdev))) goto err_register; /* we're going to reset, so assume we have no link for now */ @@ -558,7 +558,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter) hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ - if((hw->device_id == IXGB_DEVICE_ID_82597EX) + if ((hw->device_id == IXGB_DEVICE_ID_82597EX) || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) @@ -596,15 +596,15 @@ ixgb_open(struct net_device *netdev) /* allocate transmit descriptors */ - if((err = ixgb_setup_tx_resources(adapter))) + if ((err = ixgb_setup_tx_resources(adapter))) goto err_setup_tx; /* allocate receive descriptors */ - if((err = ixgb_setup_rx_resources(adapter))) + if ((err = ixgb_setup_rx_resources(adapter))) goto err_setup_rx; - if((err = ixgb_up(adapter))) + if ((err = ixgb_up(adapter))) goto err_up; return 0; @@ -660,7 +660,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) size = sizeof(struct ixgb_buffer) * txdr->count; txdr->buffer_info = vmalloc(size); - if(!txdr->buffer_info) { + if (!txdr->buffer_info) { DPRINTK(PROBE, ERR, "Unable to allocate transmit descriptor ring memory\n"); return -ENOMEM; @@ -673,7 +673,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) txdr->size = ALIGN(txdr->size, 4096); txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); - if(!txdr->desc) { + if (!txdr->desc) { vfree(txdr->buffer_info); DPRINTK(PROBE, ERR, "Unable to allocate transmit descriptor memory\n"); @@ -749,7 +749,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) size = sizeof(struct ixgb_buffer) * rxdr->count; rxdr->buffer_info = vmalloc(size); - if(!rxdr->buffer_info) { + if (!rxdr->buffer_info) { DPRINTK(PROBE, ERR, "Unable to allocate receive descriptor ring\n"); return -ENOMEM; @@ -763,7 +763,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); - if(!rxdr->desc) { + if (!rxdr->desc) { vfree(rxdr->buffer_info); DPRINTK(PROBE, ERR, "Unable to allocate receive descriptors\n"); @@ -984,7 +984,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter) for(i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; - if(buffer_info->skb) { + if (buffer_info->skb) { pci_unmap_single(pdev, buffer_info->dma, @@ -1025,7 +1025,7 @@ ixgb_set_mac(struct net_device *netdev, void *p) struct ixgb_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; - if(!is_valid_ether_addr(addr->sa_data)) + if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); @@ -1058,16 +1058,16 @@ ixgb_set_multi(struct net_device *netdev) rctl = IXGB_READ_REG(hw, RCTL); - if(netdev->flags & IFF_PROMISC) { + if (netdev->flags & IFF_PROMISC) { rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); - } else if(netdev->flags & IFF_ALLMULTI) { + } else if (netdev->flags & IFF_ALLMULTI) { rctl |= IXGB_RCTL_MPE; rctl &= ~IXGB_RCTL_UPE; } else { rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); } - if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { + if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { rctl |= IXGB_RCTL_MPE; IXGB_WRITE_REG(hw, RCTL, rctl); } else { @@ -1104,8 +1104,8 @@ ixgb_watchdog(unsigned long data) netif_stop_queue(netdev); } - if(adapter->hw.link_up) { - if(!netif_carrier_ok(netdev)) { + if (adapter->hw.link_up) { + if (!netif_carrier_ok(netdev)) { DPRINTK(LINK, INFO, "NIC Link is Up 10000 Mbps Full Duplex\n"); adapter->link_speed = 10000; @@ -1114,7 +1114,7 @@ ixgb_watchdog(unsigned long data) netif_wake_queue(netdev); } } else { - if(netif_carrier_ok(netdev)) { + if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; DPRINTK(LINK, INFO, "NIC Link is Down\n"); @@ -1126,8 +1126,8 @@ ixgb_watchdog(unsigned long data) ixgb_update_stats(adapter); - if(!netif_carrier_ok(netdev)) { - if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { + if (!netif_carrier_ok(netdev)) { + if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. @@ -1207,7 +1207,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | (skb->len - (hdr_len))); - if(++i == adapter->tx_ring.count) i = 0; + if (++i == adapter->tx_ring.count) i = 0; adapter->tx_ring.next_to_use = i; return 1; @@ -1223,7 +1223,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) unsigned int i; u8 css, cso; - if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { struct ixgb_buffer *buffer_info; css = skb_transport_offset(skb); cso = css + skb->csum_offset; @@ -1245,7 +1245,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) cpu_to_le32(IXGB_CONTEXT_DESC_TYPE | IXGB_TX_DESC_CMD_IDE); - if(++i == adapter->tx_ring.count) i = 0; + if (++i == adapter->tx_ring.count) i = 0; adapter->tx_ring.next_to_use = i; return true; @@ -1295,7 +1295,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, len -= size; offset += size; count++; - if(++i == tx_ring->count) i = 0; + if (++i == tx_ring->count) i = 0; } for(f = 0; f < nr_frags; f++) { @@ -1328,7 +1328,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, len -= size; offset += size; count++; - if(++i == tx_ring->count) i = 0; + if (++i == tx_ring->count) i = 0; } } i = (i == 0) ? tx_ring->count - 1 : i - 1; @@ -1349,17 +1349,16 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) u8 popts = 0; unsigned int i; - if(tx_flags & IXGB_TX_FLAGS_TSO) { + if (tx_flags & IXGB_TX_FLAGS_TSO) { cmd_type_len |= IXGB_TX_DESC_CMD_TSE; popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); } - if(tx_flags & IXGB_TX_FLAGS_CSUM) + if (tx_flags & IXGB_TX_FLAGS_CSUM) popts |= IXGB_TX_DESC_POPTS_TXSM; - if(tx_flags & IXGB_TX_FLAGS_VLAN) { + if (tx_flags & IXGB_TX_FLAGS_VLAN) cmd_type_len |= IXGB_TX_DESC_CMD_VLE; - } i = tx_ring->next_to_use; @@ -1373,7 +1372,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) tx_desc->popts = popts; tx_desc->vlan = cpu_to_le16(vlan_id); - if(++i == tx_ring->count) i = 0; + if (++i == tx_ring->count) i = 0; } tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP @@ -1441,7 +1440,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - if(skb->len <= 0) { + if (skb->len <= 0) { dev_kfree_skb_any(skb); return 0; } @@ -1450,7 +1449,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) DESC_NEEDED))) return NETDEV_TX_BUSY; - if(adapter->vlgrp && vlan_tx_tag_present(skb)) { + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { tx_flags |= IXGB_TX_FLAGS_VLAN; vlan_id = vlan_tx_tag_get(skb); } @@ -1465,7 +1464,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (likely(tso)) tx_flags |= IXGB_TX_FLAGS_TSO; - else if(ixgb_tx_csum(adapter, skb)) + else if (ixgb_tx_csum(adapter, skb)) tx_flags |= IXGB_TX_FLAGS_CSUM; ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, @@ -1573,7 +1572,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter) if (pci_channel_offline(pdev)) return; - if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || + if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); @@ -1582,7 +1581,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter) multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); /* fix up multicast stats by removing broadcasts */ - if(multi >= bcast) + if (multi >= bcast) multi -= bcast; adapter->stats.mprcl += (multi & 0xFFFFFFFF); @@ -1706,7 +1705,7 @@ ixgb_intr(int irq, void *data) unsigned int i; #endif - if(unlikely(!icr)) + if (unlikely(!icr)) return IRQ_NONE; /* Not our interrupt */ if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) @@ -1729,7 +1728,7 @@ ixgb_intr(int irq, void *data) * transmit queues for completed descriptors, intended to * avoid starvation issues and assist tx/rx fairness. */ for(i = 0; i < IXGB_MAX_INTR; i++) - if(!ixgb_clean_rx_irq(adapter) & + if (!ixgb_clean_rx_irq(adapter) & !ixgb_clean_tx_irq(adapter)) break; #endif @@ -1798,7 +1797,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) *(u32 *)&(tx_desc->status) = 0; cleaned = (i == eop); - if(++i == tx_ring->count) i = 0; + if (++i == tx_ring->count) i = 0; } eop = tx_ring->buffer_info[i].next_to_watch; @@ -1820,7 +1819,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) } } - if(adapter->detect_tx_hung) { + if (adapter->detect_tx_hung) { /* detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ adapter->detect_tx_hung = false; @@ -1869,7 +1868,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, /* Ignore Checksum bit is set OR * TCP Checksum has not been calculated */ - if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || + if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { skb->ip_summed = CHECKSUM_NONE; return; @@ -1877,7 +1876,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, /* At this point we know the hardware did the TCP checksum */ /* now look at the TCP checksum error bit */ - if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { + if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { /* let the stack verify checksum errors */ skb->ip_summed = CHECKSUM_NONE; adapter->hw_csum_rx_error++; @@ -1918,7 +1917,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) u8 status; #ifdef CONFIG_IXGB_NAPI - if(*work_done >= work_to_do) + if (*work_done >= work_to_do) break; (*work_done)++; @@ -1929,11 +1928,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) prefetch(skb->data); - if(++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) i = 0; next_rxd = IXGB_RX_DESC(*rx_ring, i); prefetch(next_rxd); - if((j = i + 1) == rx_ring->count) j = 0; + if ((j = i + 1) == rx_ring->count) j = 0; next2_buffer = &rx_ring->buffer_info[j]; prefetch(next2_buffer); @@ -1950,7 +1949,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) length = le16_to_cpu(rx_desc->length); - if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { + if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { /* All receives must fit into a single buffer */ @@ -1999,14 +1998,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) skb->protocol = eth_type_trans(skb, netdev); #ifdef CONFIG_IXGB_NAPI - if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { + if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { vlan_hwaccel_receive_skb(skb, adapter->vlgrp, le16_to_cpu(rx_desc->special)); } else { netif_receive_skb(skb); } #else /* CONFIG_IXGB_NAPI */ - if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { + if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { vlan_hwaccel_rx(skb, adapter->vlgrp, le16_to_cpu(rx_desc->special)); } else { @@ -2092,7 +2091,7 @@ map_skb: rx_desc->status = 0; - if(++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) i = 0; buffer_info = &rx_ring->buffer_info[i]; } @@ -2125,7 +2124,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) ixgb_irq_disable(adapter); adapter->vlgrp = grp; - if(grp) { + if (grp) { /* enable VLAN tag insert/strip */ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); ctrl |= IXGB_CTRL0_VME; @@ -2197,10 +2196,10 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) { ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); - if(adapter->vlgrp) { + if (adapter->vlgrp) { u16 vid; for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { - if(!vlan_group_get_device(adapter->vlgrp, vid)) + if (!vlan_group_get_device(adapter->vlgrp, vid)) continue; ixgb_vlan_rx_add_vid(adapter->netdev, vid); } @@ -2238,7 +2237,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); - if(netif_running(netdev)) + if (netif_running(netdev)) ixgb_down(adapter, true); pci_disable_device(pdev); @@ -2261,7 +2260,7 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); - if(pci_enable_device(pdev)) { + if (pci_enable_device(pdev)) { DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } @@ -2277,14 +2276,14 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) ixgb_reset(adapter); /* Make sure the EEPROM is good */ - if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { + if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); return PCI_ERS_RESULT_DISCONNECT; } ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - if(!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->perm_addr)) { DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); return PCI_ERS_RESULT_DISCONNECT; } @@ -2307,8 +2306,8 @@ static void ixgb_io_resume (struct pci_dev *pdev) pci_set_master(pdev); - if(netif_running(netdev)) { - if(ixgb_up(adapter)) { + if (netif_running(netdev)) { + if (ixgb_up(adapter)) { printk ("ixgb: can't bring device back up after reset\n"); return; } diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h index 4be1b273e1b8..fb74bb122e69 100644 --- a/drivers/net/ixgb/ixgb_osdep.h +++ b/drivers/net/ixgb/ixgb_osdep.h @@ -40,7 +40,7 @@ #include #undef ASSERT -#define ASSERT(x) if(!(x)) BUG() +#define ASSERT(x) if (!(x)) BUG() #define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) #ifdef DBG diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index 865d14d6e5a7..a23d2ffc4b7c 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c @@ -200,7 +200,7 @@ struct ixgb_option { static int __devinit ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) { - if(*value == OPTION_UNSET) { + if (*value == OPTION_UNSET) { *value = opt->def; return 0; } @@ -217,7 +217,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) } break; case range_option: - if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { printk(KERN_INFO "%s set to %i\n", opt->name, *value); return 0; } @@ -228,8 +228,8 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) for(i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; - if(*value == ent->i) { - if(ent->str[0] != '\0') + if (*value == ent->i) { + if (ent->str[0] != '\0') printk(KERN_INFO "%s\n", ent->str); return 0; } @@ -260,7 +260,7 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter) { int bd = adapter->bd_number; - if(bd >= IXGB_MAX_NIC) { + if (bd >= IXGB_MAX_NIC) { printk(KERN_NOTICE "Warning: no configuration for board #%i\n", bd); printk(KERN_NOTICE "Using defaults for all values\n"); @@ -277,7 +277,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) }; struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; - if(num_TxDescriptors > bd) { + if (num_TxDescriptors > bd) { tx_ring->count = TxDescriptors[bd]; ixgb_validate_option(&tx_ring->count, &opt); } else { @@ -296,7 +296,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) }; struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; - if(num_RxDescriptors > bd) { + if (num_RxDescriptors > bd) { rx_ring->count = RxDescriptors[bd]; ixgb_validate_option(&rx_ring->count, &opt); } else { @@ -312,7 +312,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) .def = OPTION_ENABLED }; - if(num_XsumRX > bd) { + if (num_XsumRX > bd) { unsigned int rx_csum = XsumRX[bd]; ixgb_validate_option(&rx_csum, &opt); adapter->rx_csum = rx_csum; @@ -338,7 +338,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) .p = fc_list }} }; - if(num_FlowControl > bd) { + if (num_FlowControl > bd) { unsigned int fc = FlowControl[bd]; ixgb_validate_option(&fc, &opt); adapter->hw.fc.type = fc; @@ -356,7 +356,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) .max = MAX_FCRTH}} }; - if(num_RxFCHighThresh > bd) { + if (num_RxFCHighThresh > bd) { adapter->hw.fc.high_water = RxFCHighThresh[bd]; ixgb_validate_option(&adapter->hw.fc.high_water, &opt); } else { @@ -376,7 +376,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) .max = MAX_FCRTL}} }; - if(num_RxFCLowThresh > bd) { + if (num_RxFCLowThresh > bd) { adapter->hw.fc.low_water = RxFCLowThresh[bd]; ixgb_validate_option(&adapter->hw.fc.low_water, &opt); } else { @@ -396,7 +396,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) .max = MAX_FCPAUSE}} }; - if(num_FCReqTimeout > bd) { + if (num_FCReqTimeout > bd) { unsigned int pause_time = FCReqTimeout[bd]; ixgb_validate_option(&pause_time, &opt); adapter->hw.fc.pause_time = pause_time; @@ -429,7 +429,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) .max = MAX_RDTR}} }; - if(num_RxIntDelay > bd) { + if (num_RxIntDelay > bd) { adapter->rx_int_delay = RxIntDelay[bd]; ixgb_validate_option(&adapter->rx_int_delay, &opt); } else { @@ -446,7 +446,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) .max = MAX_TIDV}} }; - if(num_TxIntDelay > bd) { + if (num_TxIntDelay > bd) { adapter->tx_int_delay = TxIntDelay[bd]; ixgb_validate_option(&adapter->tx_int_delay, &opt); } else { @@ -462,7 +462,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) .def = OPTION_ENABLED }; - if(num_IntDelayEnable > bd) { + if (num_IntDelayEnable > bd) { unsigned int ide = IntDelayEnable[bd]; ixgb_validate_option(&ide, &opt); adapter->tx_int_delay_enable = ide; -- cgit v1.2.1 From 9a432992870b4a528bf36dd0327a45c23ddb6f94 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:18 -0700 Subject: ixgb: cleanup space after while Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_ee.c | 2 +- drivers/net/ixgb/ixgb_main.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index ba41d2a54c81..8bc9716b6ce0 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -120,7 +120,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, mask = mask >> 1; - } while(mask); + } while (mask); /* We leave the "DI" bit set to "0" when we leave this routine. */ eecd_reg &= ~IXGB_EECD_DI; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index f7dda049dd86..dffb853111b7 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1274,7 +1274,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, i = tx_ring->next_to_use; - while(len) { + while (len) { buffer_info = &tx_ring->buffer_info[i]; size = min(len, IXGB_MAX_DATA_PER_TXD); /* Workaround for premature desc write-backs @@ -1305,7 +1305,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, len = frag->size; offset = 0; - while(len) { + while (len) { buffer_info = &tx_ring->buffer_info[i]; size = min(len, IXGB_MAX_DATA_PER_TXD); @@ -1362,7 +1362,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) i = tx_ring->next_to_use; - while(count--) { + while (count--) { buffer_info = &tx_ring->buffer_info[i]; tx_desc = IXGB_TX_DESC(*tx_ring, i); tx_desc->buff_addr = cpu_to_le64(buffer_info->dma); @@ -1781,7 +1781,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = IXGB_TX_DESC(*tx_ring, eop); - while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) { + while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { for (cleaned = false; !cleaned; ) { tx_desc = IXGB_TX_DESC(*tx_ring, i); @@ -1912,7 +1912,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) rx_desc = IXGB_RX_DESC(*rx_ring, i); buffer_info = &rx_ring->buffer_info[i]; - while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) { + while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) { struct sk_buff *skb, *next_skb; u8 status; @@ -2053,7 +2053,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) /* leave three descriptors unused */ - while(--cleancount > 2) { + while (--cleancount > 2) { /* recycle! its good for you */ skb = buffer_info->skb; if (skb) { -- cgit v1.2.1 From 0060c07230ee6a5b070388ae55855c594a3d9132 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:23 -0700 Subject: ixgb: whitespace fixups Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb.h | 2 +- drivers/net/ixgb/ixgb_ethtool.c | 36 +++++++++---------- drivers/net/ixgb/ixgb_ids.h | 8 ++--- drivers/net/ixgb/ixgb_main.c | 78 ++++++++++++++++++++--------------------- drivers/net/ixgb/ixgb_param.c | 4 +-- 5 files changed, 63 insertions(+), 65 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 3cec7b98d52f..69529ec9dfe1 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -94,7 +94,7 @@ struct ixgb_adapter; #define MIN_TXD 64 /* hardware cannot reliably support more than 512 descriptors owned by - * hardware descrioptor cache otherwise an unreliable ring under heavy + * hardware descrioptor cache otherwise an unreliable ring under heavy * recieve load may result */ /* #define DEFAULT_RXD 1024 */ /* #define MAX_RXD 4096 */ diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 7c9b35c677f0..5b354e3122c4 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -125,7 +125,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) if (ecmd->autoneg == AUTONEG_ENABLE || ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) return -EINVAL; - + if (netif_running(adapter->netdev)) { ixgb_down(adapter, true); ixgb_reset(adapter); @@ -143,9 +143,9 @@ ixgb_get_pauseparam(struct net_device *netdev, { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_hw *hw = &adapter->hw; - + pause->autoneg = AUTONEG_DISABLE; - + if (hw->fc.type == ixgb_fc_rx_pause) pause->rx_pause = 1; else if (hw->fc.type == ixgb_fc_tx_pause) @@ -162,7 +162,7 @@ ixgb_set_pauseparam(struct net_device *netdev, { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_hw *hw = &adapter->hw; - + if (pause->autoneg == AUTONEG_ENABLE) return -EINVAL; @@ -181,7 +181,7 @@ ixgb_set_pauseparam(struct net_device *netdev, ixgb_set_speed_duplex(netdev); } else ixgb_reset(adapter); - + return 0; } @@ -208,7 +208,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data) ixgb_reset(adapter); return 0; } - + static u32 ixgb_get_tx_csum(struct net_device *netdev) { @@ -234,7 +234,7 @@ ixgb_set_tso(struct net_device *netdev, u32 data) else netdev->features &= ~NETIF_F_TSO; return 0; -} +} static u32 ixgb_get_msglevel(struct net_device *netdev) @@ -251,7 +251,7 @@ ixgb_set_msglevel(struct net_device *netdev, u32 data) } #define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ -static int +static int ixgb_get_regs_len(struct net_device *netdev) { #define IXGB_REG_DUMP_LEN 136*sizeof(u32) @@ -495,7 +495,7 @@ ixgb_set_eeprom(struct net_device *netdev, if ((eeprom->offset + eeprom->len) & 1) { /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ - eeprom_buff[last_word - first_word] + eeprom_buff[last_word - first_word] = ixgb_read_eeprom(hw, last_word); } @@ -534,7 +534,7 @@ ixgb_get_ringparam(struct net_device *netdev, struct ixgb_desc_ring *txdr = &adapter->tx_ring; struct ixgb_desc_ring *rxdr = &adapter->rx_ring; - ring->rx_max_pending = MAX_RXD; + ring->rx_max_pending = MAX_RXD; ring->tx_max_pending = MAX_TXD; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; @@ -544,7 +544,7 @@ ixgb_get_ringparam(struct net_device *netdev, ring->rx_jumbo_pending = 0; } -static int +static int ixgb_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { @@ -647,7 +647,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data) return 0; } -static int +static int ixgb_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { @@ -658,8 +658,8 @@ ixgb_get_sset_count(struct net_device *netdev, int sset) } } -static void -ixgb_get_ethtool_stats(struct net_device *netdev, +static void +ixgb_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgb_adapter *adapter = netdev_priv(netdev); @@ -667,13 +667,13 @@ ixgb_get_ethtool_stats(struct net_device *netdev, ixgb_update_stats(adapter); for(i = 0; i < IXGB_STATS_LEN; i++) { - char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; - data[i] = (ixgb_gstrings_stats[i].sizeof_stat == + char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; + data[i] = (ixgb_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } -static void +static void ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { int i; @@ -681,7 +681,7 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) switch(stringset) { case ETH_SS_STATS: for(i=0; i < IXGB_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, + memcpy(data + i * ETH_GSTRING_LEN, ixgb_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); } diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h index 180d20e793a5..4ba4d1910eb4 100644 --- a/drivers/net/ixgb/ixgb_ids.h +++ b/drivers/net/ixgb/ixgb_ids.h @@ -38,11 +38,11 @@ #define SUN_VENDOR_ID 0x108E #define SUN_SUBVENDOR_ID 0x108E -#define IXGB_DEVICE_ID_82597EX 0x1048 -#define IXGB_DEVICE_ID_82597EX_SR 0x1A48 +#define IXGB_DEVICE_ID_82597EX 0x1048 +#define IXGB_DEVICE_ID_82597EX_SR 0x1A48 #define IXGB_DEVICE_ID_82597EX_LR 0x1B48 -#define IXGB_SUBDEVICE_ID_A11F 0xA11F -#define IXGB_SUBDEVICE_ID_A01F 0xA01F +#define IXGB_SUBDEVICE_ID_A11F 0xA11F +#define IXGB_SUBDEVICE_ID_A01F 0xA01F #define IXGB_DEVICE_ID_82597EX_CX4 0x109E #define IXGB_SUBDEVICE_ID_A00C 0xA00C diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index dffb853111b7..fc211a3f7c8c 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -346,8 +346,7 @@ ixgb_reset(struct ixgb_adapter *adapter) **/ static int __devinit -ixgb_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev = NULL; struct ixgb_adapter *adapter; @@ -562,7 +561,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter) || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) - hw->mac_type = ixgb_82597; + hw->mac_type = ixgb_82597; else { /* should never have loaded on this device */ DPRINTK(PROBE, ERR, "unsupported device id\n"); @@ -702,8 +701,8 @@ ixgb_configure_tx(struct ixgb_adapter *adapter) u32 tctl; struct ixgb_hw *hw = &adapter->hw; - /* Setup the Base and Length of the Tx Descriptor Ring - * tx_ring.dma can be either a 32 or 64 bit value + /* Setup the Base and Length of the Tx Descriptor Ring + * tx_ring.dma can be either a 32 or 64 bit value */ IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); @@ -729,8 +728,8 @@ ixgb_configure_tx(struct ixgb_adapter *adapter) /* Setup Transmit Descriptor Settings for this adapter */ adapter->tx_cmd_type = - IXGB_TX_DESC_TYPE - | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); + IXGB_TX_DESC_TYPE | + (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); } /** @@ -792,8 +791,8 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter) rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); rctl |= - IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | - IXGB_RCTL_RXEN | IXGB_RCTL_CFF | + IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | + IXGB_RCTL_RXEN | IXGB_RCTL_CFF | (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); rctl |= IXGB_RCTL_SECRC; @@ -890,7 +889,7 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter) static void ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, - struct ixgb_buffer *buffer_info) + struct ixgb_buffer *buffer_info) { struct pci_dev *pdev = adapter->pdev; @@ -1076,10 +1075,11 @@ ixgb_set_multi(struct net_device *netdev) IXGB_WRITE_REG(hw, RCTL, rctl); - for(i = 0, mc_ptr = netdev->mc_list; mc_ptr; - i++, mc_ptr = mc_ptr->next) + for (i = 0, mc_ptr = netdev->mc_list; + mc_ptr; + i++, mc_ptr = mc_ptr->next) memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS], - mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); + mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0); } @@ -1199,7 +1199,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) context_desc->hdr_len = hdr_len; context_desc->status = 0; context_desc->cmd_type_len = cpu_to_le32( - IXGB_CONTEXT_DESC_TYPE + IXGB_CONTEXT_DESC_TYPE | IXGB_CONTEXT_DESC_CMD_TSE | IXGB_CONTEXT_DESC_CMD_IP | IXGB_CONTEXT_DESC_CMD_TCP @@ -1375,8 +1375,8 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) if (++i == tx_ring->count) i = 0; } - tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP - | IXGB_TX_DESC_CMD_RS ); + tx_desc->cmd_type_len |= + cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS); /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only @@ -1455,7 +1455,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } first = adapter->tx_ring.next_to_use; - + tso = ixgb_tso(adapter, skb); if (tso < 0) { dev_kfree_skb_any(skb); @@ -1577,16 +1577,16 @@ ixgb_update_stats(struct ixgb_adapter *adapter) u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); - u64 bcast = ((u64)bcast_h << 32) | bcast_l; + u64 bcast = ((u64)bcast_h << 32) | bcast_l; multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); /* fix up multicast stats by removing broadcasts */ if (multi >= bcast) multi -= bcast; - + adapter->stats.mprcl += (multi & 0xFFFFFFFF); adapter->stats.mprch += (multi >> 32); - adapter->stats.bprcl += bcast_l; + adapter->stats.bprcl += bcast_l; adapter->stats.bprch += bcast_h; } else { adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); @@ -1715,7 +1715,7 @@ ixgb_intr(int irq, void *data) #ifdef CONFIG_IXGB_NAPI if (netif_rx_schedule_prep(netdev, &adapter->napi)) { - /* Disable interrupts and register for poll. The flush + /* Disable interrupts and register for poll. The flush of the posted write is intentionally left out. */ @@ -1731,7 +1731,7 @@ ixgb_intr(int irq, void *data) if (!ixgb_clean_rx_irq(adapter) & !ixgb_clean_tx_irq(adapter)) break; -#endif +#endif return IRQ_HANDLED; } @@ -1787,9 +1787,9 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) tx_desc = IXGB_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; - if (tx_desc->popts - & (IXGB_TX_DESC_POPTS_TXSM | - IXGB_TX_DESC_POPTS_IXSM)) + if (tx_desc->popts & + (IXGB_TX_DESC_POPTS_TXSM | + IXGB_TX_DESC_POPTS_IXSM)) adapter->hw_csum_tx_good++; ixgb_unmap_and_free_tx_resource(adapter, buffer_info); @@ -1862,8 +1862,8 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) static void ixgb_rx_checksum(struct ixgb_adapter *adapter, - struct ixgb_rx_desc *rx_desc, - struct sk_buff *skb) + struct ixgb_rx_desc *rx_desc, + struct sk_buff *skb) { /* Ignore Checksum bit is set OR * TCP Checksum has not been calculated @@ -1960,11 +1960,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) goto rxdesc_done; } - if (unlikely(rx_desc->errors - & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE - | IXGB_RX_DESC_ERRORS_P | - IXGB_RX_DESC_ERRORS_RXE))) { - + if (unlikely(rx_desc->errors & + (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | + IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { dev_kfree_skb_irq(skb); goto rxdesc_done; } @@ -2000,14 +1998,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) #ifdef CONFIG_IXGB_NAPI if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { vlan_hwaccel_receive_skb(skb, adapter->vlgrp, - le16_to_cpu(rx_desc->special)); + le16_to_cpu(rx_desc->special)); } else { netif_receive_skb(skb); } #else /* CONFIG_IXGB_NAPI */ if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { vlan_hwaccel_rx(skb, adapter->vlgrp, - le16_to_cpu(rx_desc->special)); + le16_to_cpu(rx_desc->special)); } else { netif_rx(skb); } @@ -2086,7 +2084,7 @@ map_skb: rx_desc = IXGB_RX_DESC(*rx_ring, i); rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); /* guarantee DD bit not set now before h/w gets descriptor - * this is the rest of the workaround for h/w double + * this is the rest of the workaround for h/w double * writeback. */ rx_desc->status = 0; @@ -2111,7 +2109,7 @@ map_skb: /** * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping. - * + * * @param netdev network interface device structure * @param grp indicates to enable or disable tagging/stripping **/ @@ -2231,8 +2229,8 @@ static void ixgb_netpoll(struct net_device *dev) * This callback is called by the PCI subsystem whenever * a PCI bus error is detected. */ -static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, - enum pci_channel_state state) +static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); @@ -2255,7 +2253,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, * This is a shortened version of the device probe/discovery code, * it resembles the first-half of the ixgb_probe() routine. */ -static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) +static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); @@ -2299,7 +2297,7 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) * normal operation. Implementation resembles the second-half * of the ixgb_probe() routine. */ -static void ixgb_io_resume (struct pci_dev *pdev) +static void ixgb_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index a23d2ffc4b7c..07a6980c7d8f 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c @@ -136,7 +136,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold"); /* Flow control request timeout (how long to pause the link partner's tx) * (PAP 15:0) * - * Valid Range: 1 - 65535 + * Valid Range: 1 - 65535 * * Default Value: 65535 (0xffff) (we'll send an xon if we recover) */ @@ -412,7 +412,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) /* high must be greater than low */ if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { /* set defaults */ - printk (KERN_INFO + printk (KERN_INFO "RxFCHighThresh must be >= (RxFCLowThresh + 8), " "Using Defaults\n"); adapter->hw.fc.high_water = DEFAULT_FCRTH; -- cgit v1.2.1 From 52035bdbe8229c6bffae0be3444924ffbccf6506 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:28 -0700 Subject: ixgb: fix spelling errors Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb.h | 4 ++-- drivers/net/ixgb/ixgb_ee.c | 6 +++--- drivers/net/ixgb/ixgb_ee.h | 10 +++++----- drivers/net/ixgb/ixgb_hw.c | 4 ++-- drivers/net/ixgb/ixgb_main.c | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 69529ec9dfe1..6cb4dde07aaa 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -94,8 +94,8 @@ struct ixgb_adapter; #define MIN_TXD 64 /* hardware cannot reliably support more than 512 descriptors owned by - * hardware descrioptor cache otherwise an unreliable ring under heavy - * recieve load may result */ + * hardware descriptor cache otherwise an unreliable ring under heavy + * receive load may result */ /* #define DEFAULT_RXD 1024 */ /* #define MAX_RXD 4096 */ #define DEFAULT_RXD 512 diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index 8bc9716b6ce0..aa71b9b1cc98 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -205,7 +205,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw) eecd_reg = IXGB_READ_REG(hw, EECD); - /* Deselct EEPROM */ + /* Deselect EEPROM */ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); IXGB_WRITE_REG(hw, EECD, eecd_reg); udelay(50); @@ -293,7 +293,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw) */ ixgb_standby_eeprom(hw); - /* Now read DO repeatedly until is high (equal to '1'). The EEEPROM will + /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will * signal that the command has been completed by raising the DO signal. * If DO does not go high in 10 milliseconds, then error out. */ @@ -365,7 +365,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw) * * hw - Struct containing variables accessed by shared code * reg - offset within the EEPROM to be written to - * data - 16 bit word to be writen to the EEPROM + * data - 16 bit word to be written to the EEPROM * * If ixgb_update_eeprom_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h index 4b7bd0d4a8a9..12136b633497 100644 --- a/drivers/net/ixgb/ixgb_ee.h +++ b/drivers/net/ixgb/ixgb_ee.h @@ -34,11 +34,11 @@ #define IXGB_ETH_LENGTH_OF_ADDRESS 6 /* EEPROM Commands */ -#define EEPROM_READ_OPCODE 0x6 /* EERPOM read opcode */ -#define EEPROM_WRITE_OPCODE 0x5 /* EERPOM write opcode */ -#define EEPROM_ERASE_OPCODE 0x7 /* EERPOM erase opcode */ -#define EEPROM_EWEN_OPCODE 0x13 /* EERPOM erase/write enable */ -#define EEPROM_EWDS_OPCODE 0x10 /* EERPOM erast/write disable */ +#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */ /* EEPROM MAP (Word Offsets) */ #define EEPROM_IA_1_2_REG 0x0000 diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index d023fb59bf15..3694e8c7b005 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c @@ -371,7 +371,7 @@ ixgb_init_hw(struct ixgb_hw *hw) * hw - Struct containing variables accessed by shared code * * Places the MAC address in receive address register 0 and clears the rest - * of the receive addresss registers. Clears the multicast table. Assumes + * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. *****************************************************************************/ static void @@ -964,7 +964,7 @@ ixgb_check_for_link(struct ixgb_hw *hw) } /****************************************************************************** - * Check for a bad link condition that may have occured. + * Check for a bad link condition that may have occurred. * The indication is that the RFC / LFC registers may be incrementing * continually. A full adapter reset is required to recover. * diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index fc211a3f7c8c..796f01109d74 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -2248,7 +2248,7 @@ static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, * ixgb_io_slot_reset - called after the pci bus has been reset. * @pdev pointer to pci device with error * - * This callback is called after the PCI buss has been reset. + * This callback is called after the PCI bus has been reset. * Basically, this tries to restart the card from scratch. * This is a shortened version of the device probe/discovery code, * it resembles the first-half of the ixgb_probe() routine. -- cgit v1.2.1 From 1459336da45b214a59f0825777549fb0cb60ed7d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:33 -0700 Subject: ixgb: trivial fix space after for Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_ee.c | 10 +++++----- drivers/net/ixgb/ixgb_ethtool.c | 14 ++++++-------- drivers/net/ixgb/ixgb_hw.c | 21 ++++++++++----------- drivers/net/ixgb/ixgb_main.c | 12 ++++++------ drivers/net/ixgb/ixgb_param.c | 2 +- 5 files changed, 28 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index aa71b9b1cc98..4a9e52f90711 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -152,7 +152,7 @@ ixgb_shift_in_bits(struct ixgb_hw *hw) eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); data = 0; - for(i = 0; i < 16; i++) { + for (i = 0; i < 16; i++) { data = data << 1; ixgb_raise_clock(hw, &eecd_reg); @@ -297,7 +297,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw) * signal that the command has been completed by raising the DO signal. * If DO does not go high in 10 milliseconds, then error out. */ - for(i = 0; i < 200; i++) { + for (i = 0; i < 200; i++) { eecd_reg = IXGB_READ_REG(hw, EECD); if (eecd_reg & IXGB_EECD_DO) @@ -328,7 +328,7 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) u16 checksum = 0; u16 i; - for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) checksum += ixgb_read_eeprom(hw, i); if (checksum == (u16) EEPROM_SUM) @@ -351,7 +351,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw) u16 checksum = 0; u16 i; - for(i = 0; i < EEPROM_CHECKSUM_REG; i++) + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) checksum += ixgb_read_eeprom(hw, i); checksum = (u16) EEPROM_SUM - checksum; @@ -472,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw) ee_map = (struct ixgb_ee_map_type *)hw->eeprom; DEBUGOUT("ixgb_ee: Reading eeprom data\n"); - for(i = 0; i < IXGB_EEPROM_SIZE ; i++) { + for (i = 0; i < IXGB_EEPROM_SIZE ; i++) { u16 ee_data; ee_data = ixgb_read_eeprom(hw, i); checksum += ee_data; diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 5b354e3122c4..e1836c16ab3d 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -301,7 +301,7 @@ ixgb_get_regs(struct net_device *netdev, *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ /* there are 16 RAR entries in hardware, we only use 3 */ - for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { + for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ } @@ -441,12 +441,10 @@ ixgb_get_eeprom(struct net_device *netdev, return -ENOMEM; /* note the eeprom was good because the driver loaded */ - for(i = 0; i <= (last_word - first_word); i++) { + for (i = 0; i <= (last_word - first_word); i++) eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); - } - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), - eeprom->len); + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); kfree(eeprom_buff); geeprom_error: @@ -500,7 +498,7 @@ ixgb_set_eeprom(struct net_device *netdev, } memcpy(ptr, bytes, eeprom->len); - for(i = 0; i <= (last_word - first_word); i++) + for (i = 0; i <= (last_word - first_word); i++) ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); /* Update the checksum over the first part of the EEPROM if needed */ @@ -666,7 +664,7 @@ ixgb_get_ethtool_stats(struct net_device *netdev, int i; ixgb_update_stats(adapter); - for(i = 0; i < IXGB_STATS_LEN; i++) { + for (i = 0; i < IXGB_STATS_LEN; i++) { char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; data[i] = (ixgb_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; @@ -680,7 +678,7 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) switch(stringset) { case ETH_SS_STATS: - for(i=0; i < IXGB_STATS_LEN; i++) { + for (i = 0; i < IXGB_STATS_LEN; i++) { memcpy(data + i * ETH_GSTRING_LEN, ixgb_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 3694e8c7b005..9cc75ce9dc86 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c @@ -347,7 +347,7 @@ ixgb_init_hw(struct ixgb_hw *hw) /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); - for(i = 0; i < IXGB_MC_TBL_SIZE; i++) + for (i = 0; i < IXGB_MC_TBL_SIZE; i++) IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); /* Zero out the VLAN Filter Table Array */ @@ -413,7 +413,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw) /* Zero out the other 15 receive addresses. */ DEBUGOUT("Clearing RAR[1-15]\n"); - for(i = 1; i < IXGB_RAR_ENTRIES; i++) { + for (i = 1; i < IXGB_RAR_ENTRIES; i++) { /* Write high reg first to disable the AV bit first */ IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); @@ -452,19 +452,18 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw, /* Clear RAR[1-15] */ DEBUGOUT(" Clearing RAR[1-15]\n"); - for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { + for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); } /* Clear the MTA */ DEBUGOUT(" Clearing MTA\n"); - for(i = 0; i < IXGB_MC_TBL_SIZE; i++) { + for (i = 0; i < IXGB_MC_TBL_SIZE; i++) IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); - } /* Add the new addresses */ - for(i = 0; i < mc_addr_count; i++) { + for (i = 0; i < mc_addr_count; i++) { DEBUGOUT(" Adding the multicast addresses:\n"); DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)], @@ -649,7 +648,7 @@ ixgb_clear_vfta(struct ixgb_hw *hw) { u32 offset; - for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) + for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); return; } @@ -790,7 +789,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw, ** from the CPU Write to the Ready bit assertion. **************************************************************/ - for(i = 0; i < 10; i++) + for (i = 0; i < 10; i++) { udelay(10); @@ -817,7 +816,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw, ** from the CPU Write to the Ready bit assertion. **************************************************************/ - for(i = 0; i < 10; i++) + for (i = 0; i < 10; i++) { udelay(10); @@ -886,7 +885,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw, ** from the CPU Write to the Ready bit assertion. **************************************************************/ - for(i = 0; i < 10; i++) + for (i = 0; i < 10; i++) { udelay(10); @@ -913,7 +912,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw, ** from the CPU Write to the Ready bit assertion. **************************************************************/ - for(i = 0; i < 10; i++) + for (i = 0; i < 10; i++) { udelay(10); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 796f01109d74..c1dde795a837 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -398,7 +398,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; } - for(i = BAR_1; i <= BAR_5; i++) { + for (i = BAR_1; i <= BAR_5; i++) { if (pci_resource_len(pdev, i) == 0) continue; if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { @@ -923,7 +923,7 @@ ixgb_clean_tx_ring(struct ixgb_adapter *adapter) /* Free all the Tx ring sk_buffs */ - for(i = 0; i < tx_ring->count; i++) { + for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; ixgb_unmap_and_free_tx_resource(adapter, buffer_info); } @@ -981,7 +981,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter) /* Free all the Rx ring sk_buffs */ - for(i = 0; i < rx_ring->count; i++) { + for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->skb) { @@ -1298,7 +1298,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, if (++i == tx_ring->count) i = 0; } - for(f = 0; f < nr_frags; f++) { + for (f = 0; f < nr_frags; f++) { struct skb_frag_struct *frag; frag = &skb_shinfo(skb)->frags[f]; @@ -1727,7 +1727,7 @@ ixgb_intr(int irq, void *data) * every pass through this for loop checks both receive and * transmit queues for completed descriptors, intended to * avoid starvation issues and assist tx/rx fairness. */ - for(i = 0; i < IXGB_MAX_INTR; i++) + for (i = 0; i < IXGB_MAX_INTR; i++) if (!ixgb_clean_rx_irq(adapter) & !ixgb_clean_tx_irq(adapter)) break; @@ -2196,7 +2196,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) if (adapter->vlgrp) { u16 vid; - for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { if (!vlan_group_get_device(adapter->vlgrp, vid)) continue; ixgb_vlan_rx_add_vid(adapter->netdev, vid); diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index 07a6980c7d8f..107ef4826b5d 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c @@ -226,7 +226,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) int i; struct ixgb_opt_list *ent; - for(i = 0; i < opt->arg.l.nr; i++) { + for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') -- cgit v1.2.1 From 8441dab26c0fb3d9c8cb8e9d2114a8f266a0b299 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:38 -0700 Subject: ixgb: cleanup checkpatch suggestions that are relevant Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_param.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index 107ef4826b5d..169636a5935d 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c @@ -363,7 +363,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) adapter->hw.fc.high_water = opt.def; } if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) - printk (KERN_INFO + printk(KERN_INFO "Ignoring RxFCHighThresh when no RxFC\n"); } { /* Receive Flow Control Low Threshold */ @@ -383,7 +383,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) adapter->hw.fc.low_water = opt.def; } if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) - printk (KERN_INFO + printk(KERN_INFO "Ignoring RxFCLowThresh when no RxFC\n"); } { /* Flow Control Pause Time Request*/ @@ -404,7 +404,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) adapter->hw.fc.pause_time = opt.def; } if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) - printk (KERN_INFO + printk(KERN_INFO "Ignoring FCReqTimeout when no RxFC\n"); } /* high low and spacing check for rx flow control thresholds */ @@ -412,7 +412,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) /* high must be greater than low */ if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { /* set defaults */ - printk (KERN_INFO + printk(KERN_INFO "RxFCHighThresh must be >= (RxFCLowThresh + 8), " "Using Defaults\n"); adapter->hw.fc.high_water = DEFAULT_FCRTH; -- cgit v1.2.1 From fc2d14e36c69a8d44a2f5230835b54e95025363e Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:43 -0700 Subject: ixgb: rx cleanup performance improvements rx cleanup should look more like our other drivers that have evolved to nicer performance levels over time. Changes consist of refilling tx buffers to hardware more often, some minor assignment cleanups. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index c1dde795a837..365212b9e952 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -98,7 +98,7 @@ static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int); #else static bool ixgb_clean_rx_irq(struct ixgb_adapter *); #endif -static void ixgb_alloc_rx_buffers(struct ixgb_adapter *); +static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int); static void ixgb_tx_timeout(struct net_device *dev); static void ixgb_tx_timeout_task(struct work_struct *work); @@ -225,7 +225,7 @@ ixgb_up(struct ixgb_adapter *adapter) ixgb_configure_tx(adapter); ixgb_setup_rctl(adapter); ixgb_configure_rx(adapter); - ixgb_alloc_rx_buffers(adapter); + ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring)); /* disable interrupts and get the hardware into a known state */ IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); @@ -1906,6 +1906,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; u32 length; unsigned int i, j; + int cleaned_count = 0; bool cleaned = false; i = rx_ring->next_to_clean; @@ -1913,7 +1914,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) buffer_info = &rx_ring->buffer_info[i]; while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) { - struct sk_buff *skb, *next_skb; + struct sk_buff *skb; u8 status; #ifdef CONFIG_IXGB_NAPI @@ -1926,7 +1927,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) skb = buffer_info->skb; buffer_info->skb = NULL; - prefetch(skb->data); + prefetch(skb->data - NET_IP_ALIGN); if (++i == rx_ring->count) i = 0; next_rxd = IXGB_RX_DESC(*rx_ring, i); @@ -1937,17 +1938,18 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) prefetch(next2_buffer); next_buffer = &rx_ring->buffer_info[i]; - next_skb = next_buffer->skb; - prefetch(next_skb); cleaned = true; + cleaned_count++; pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); + rx_desc->length = 0; if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { @@ -2016,6 +2018,12 @@ rxdesc_done: /* clean up descriptor, might be written over by hw */ rx_desc->status = 0; + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) { + ixgb_alloc_rx_buffers(adapter, cleaned_count); + cleaned_count = 0; + } + /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; @@ -2023,7 +2031,9 @@ rxdesc_done: rx_ring->next_to_clean = i; - ixgb_alloc_rx_buffers(adapter); + cleaned_count = IXGB_DESC_UNUSED(rx_ring); + if (cleaned_count) + ixgb_alloc_rx_buffers(adapter, cleaned_count); return cleaned; } @@ -2034,7 +2044,7 @@ rxdesc_done: **/ static void -ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) +ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count) { struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; struct net_device *netdev = adapter->netdev; @@ -2051,7 +2061,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) /* leave three descriptors unused */ - while (--cleancount > 2) { + while (--cleancount > 2 && cleaned_count--) { /* recycle! its good for you */ skb = buffer_info->skb; if (skb) { -- cgit v1.2.1 From 9e7bd330702bd8d2a87bf8fee027e90c6c90a401 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:48 -0700 Subject: ixgb: clean up assignments inside if statements Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 365212b9e952..05b2677d6536 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -355,15 +355,16 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int i; int err; - if ((err = pci_enable_device(pdev))) + err = pci_enable_device(pdev); + if (err) return err; if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && - !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { + !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { pci_using_dac = 1; } else { if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || - (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { + (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { printk(KERN_ERR "ixgb: No usable DMA configuration, aborting\n"); goto err_dma_mask; @@ -371,7 +372,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_using_dac = 0; } - if ((err = pci_request_regions(pdev, ixgb_driver_name))) + err = pci_request_regions(pdev, ixgb_driver_name); + if (err) goto err_request_regions; pci_set_master(pdev); @@ -435,7 +437,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* setup the private structure */ - if ((err = ixgb_sw_init(adapter))) + err = ixgb_sw_init(adapter); + if (err) goto err_sw_init; netdev->features = NETIF_F_SG | @@ -474,7 +477,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); strcpy(netdev->name, "eth%d"); - if ((err = register_netdev(netdev))) + err = register_netdev(netdev); + if (err) goto err_register; /* we're going to reset, so assume we have no link for now */ @@ -594,16 +598,18 @@ ixgb_open(struct net_device *netdev) int err; /* allocate transmit descriptors */ - - if ((err = ixgb_setup_tx_resources(adapter))) + err = ixgb_setup_tx_resources(adapter); + if (err) goto err_setup_tx; /* allocate receive descriptors */ - if ((err = ixgb_setup_rx_resources(adapter))) + err = ixgb_setup_rx_resources(adapter); + if (err) goto err_setup_rx; - if ((err = ixgb_up(adapter))) + err = ixgb_up(adapter); + if (err) goto err_up; return 0; -- cgit v1.2.1 From c21993401479635025a8053aff1f5cfdbfee5fd9 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:53 -0700 Subject: ixgb: audit use of dev_kfree_skb_any calls to kfree_skb_any are only required when calling kfree from interrupt context. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 05b2677d6536..3e857c022b66 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -903,8 +903,10 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); + /* okay to call kfree_skb here instead of kfree_skb_any because + * this is never called in interrupt context */ if (buffer_info->skb) - dev_kfree_skb_any(buffer_info->skb); + dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; buffer_info->dma = 0; @@ -1447,7 +1449,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } if (skb->len <= 0) { - dev_kfree_skb_any(skb); + dev_kfree_skb(skb); return 0; } @@ -1464,7 +1466,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tso = ixgb_tso(adapter, skb); if (tso < 0) { - dev_kfree_skb_any(skb); + dev_kfree_skb(skb); return NETDEV_TX_OK; } -- cgit v1.2.1 From 2c21fc6e3a1150a1fa8c09b0f8ae0b796fb5039f Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:52:58 -0700 Subject: ixgb: cleanup header cleaned up some spacing in defines Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/ixgb.h | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 6cb4dde07aaa..5e8a60570467 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -89,18 +89,16 @@ struct ixgb_adapter; /* TX/RX descriptor defines */ -#define DEFAULT_TXD 256 -#define MAX_TXD 4096 -#define MIN_TXD 64 +#define DEFAULT_TXD 256 +#define MAX_TXD 4096 +#define MIN_TXD 64 /* hardware cannot reliably support more than 512 descriptors owned by * hardware descriptor cache otherwise an unreliable ring under heavy * receive load may result */ -/* #define DEFAULT_RXD 1024 */ -/* #define MAX_RXD 4096 */ -#define DEFAULT_RXD 512 -#define MAX_RXD 512 -#define MIN_RXD 64 +#define DEFAULT_RXD 512 +#define MAX_RXD 512 +#define MIN_RXD 64 /* Supported Rx Buffer Sizes */ #define IXGB_RXBUFFER_2048 2048 -- cgit v1.2.1 From 6d37ab282e246f3cb5b4b975ecc5e8303ba5da82 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:53:04 -0700 Subject: ixgb: make NAPI the only option and the default network maintainers suggest NAPI only drivers are the only way to go. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/Kconfig | 14 ------------- drivers/net/ixgb/ixgb_main.c | 47 +------------------------------------------- 2 files changed, 1 insertion(+), 60 deletions(-) (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9b98714889bf..21414177ee1e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2430,20 +2430,6 @@ config IXGB To compile this driver as a module, choose M here. The module will be called ixgb. -config IXGB_NAPI - bool "Use Rx Polling (NAPI) (EXPERIMENTAL)" - depends on IXGB && EXPERIMENTAL - help - NAPI is a new driver API designed to reduce CPU and interrupt load - when the driver is receiving lots of packets from the card. It is - still somewhat experimental and thus not yet enabled by default. - - If your estimated Rx load is 10kpps or more, or if the card will be - deployed on potentially unfriendly networks (e.g. in a firewall), - then say Y here. - - If in doubt, say N. - config S2IO tristate "S2IO 10Gbe XFrame NIC" depends on PCI diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 3e857c022b66..526413482be2 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -31,12 +31,8 @@ char ixgb_driver_name[] = "ixgb"; static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; -#ifndef CONFIG_IXGB_NAPI -#define DRIVERNAPI -#else #define DRIVERNAPI "-NAPI" -#endif -#define DRV_VERSION "1.0.126-k4"DRIVERNAPI +#define DRV_VERSION "1.0.126" DRIVERNAPI const char ixgb_driver_version[] = DRV_VERSION; static const char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -92,12 +88,8 @@ static int ixgb_set_mac(struct net_device *netdev, void *p); static irqreturn_t ixgb_intr(int irq, void *data); static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter); -#ifdef CONFIG_IXGB_NAPI static int ixgb_clean(struct napi_struct *, int); static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int); -#else -static bool ixgb_clean_rx_irq(struct ixgb_adapter *); -#endif static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int); static void ixgb_tx_timeout(struct net_device *dev); @@ -271,9 +263,7 @@ ixgb_up(struct ixgb_adapter *adapter) clear_bit(__IXGB_DOWN, &adapter->flags); -#ifdef CONFIG_IXGB_NAPI napi_enable(&adapter->napi); -#endif ixgb_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies); @@ -289,9 +279,7 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) /* prevent the interrupt handler from restarting watchdog */ set_bit(__IXGB_DOWN, &adapter->flags); -#ifdef CONFIG_IXGB_NAPI napi_disable(&adapter->napi); -#endif /* waiting for NAPI to complete can re-enable interrupts */ ixgb_irq_disable(adapter); free_irq(adapter->pdev->irq, netdev); @@ -419,9 +407,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgb_set_ethtool_ops(netdev); netdev->tx_timeout = &ixgb_tx_timeout; netdev->watchdog_timeo = 5 * HZ; -#ifdef CONFIG_IXGB_NAPI netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); -#endif netdev->vlan_rx_register = ixgb_vlan_rx_register; netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid; netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid; @@ -1709,9 +1695,6 @@ ixgb_intr(int irq, void *data) struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_hw *hw = &adapter->hw; u32 icr = IXGB_READ_REG(hw, ICR); -#ifndef CONFIG_IXGB_NAPI - unsigned int i; -#endif if (unlikely(!icr)) return IRQ_NONE; /* Not our interrupt */ @@ -1720,7 +1703,6 @@ ixgb_intr(int irq, void *data) if (!test_bit(__IXGB_DOWN, &adapter->flags)) mod_timer(&adapter->watchdog_timer, jiffies); -#ifdef CONFIG_IXGB_NAPI if (netif_rx_schedule_prep(netdev, &adapter->napi)) { /* Disable interrupts and register for poll. The flush @@ -1730,20 +1712,9 @@ ixgb_intr(int irq, void *data) IXGB_WRITE_REG(&adapter->hw, IMC, ~0); __netif_rx_schedule(netdev, &adapter->napi); } -#else - /* yes, that is actually a & and it is meant to make sure that - * every pass through this for loop checks both receive and - * transmit queues for completed descriptors, intended to - * avoid starvation issues and assist tx/rx fairness. */ - for (i = 0; i < IXGB_MAX_INTR; i++) - if (!ixgb_clean_rx_irq(adapter) & - !ixgb_clean_tx_irq(adapter)) - break; -#endif return IRQ_HANDLED; } -#ifdef CONFIG_IXGB_NAPI /** * ixgb_clean - NAPI Rx polling callback * @adapter: board private structure @@ -1768,7 +1739,6 @@ ixgb_clean(struct napi_struct *napi, int budget) return work_done; } -#endif /** * ixgb_clean_tx_irq - Reclaim resources after transmit completes @@ -1901,11 +1871,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, **/ static bool -#ifdef CONFIG_IXGB_NAPI ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) -#else -ixgb_clean_rx_irq(struct ixgb_adapter *adapter) -#endif { struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; struct net_device *netdev = adapter->netdev; @@ -1925,12 +1891,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) struct sk_buff *skb; u8 status; -#ifdef CONFIG_IXGB_NAPI if (*work_done >= work_to_do) break; (*work_done)++; -#endif status = rx_desc->status; skb = buffer_info->skb; buffer_info->skb = NULL; @@ -2005,21 +1969,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) ixgb_rx_checksum(adapter, rx_desc, skb); skb->protocol = eth_type_trans(skb, netdev); -#ifdef CONFIG_IXGB_NAPI if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { vlan_hwaccel_receive_skb(skb, adapter->vlgrp, le16_to_cpu(rx_desc->special)); } else { netif_receive_skb(skb); } -#else /* CONFIG_IXGB_NAPI */ - if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { - vlan_hwaccel_rx(skb, adapter->vlgrp, - le16_to_cpu(rx_desc->special)); - } else { - netif_rx(skb); - } -#endif /* CONFIG_IXGB_NAPI */ netdev->last_rx = jiffies; rxdesc_done: -- cgit v1.2.1 From f731a9ef82c6728559b34743bca19d231e5e1b63 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 8 Jul 2008 15:53:09 -0700 Subject: ixgb: update copyright dates and versions Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgb/Makefile | 2 +- drivers/net/ixgb/ixgb.h | 2 +- drivers/net/ixgb/ixgb_ee.c | 2 +- drivers/net/ixgb/ixgb_ee.h | 2 +- drivers/net/ixgb/ixgb_ethtool.c | 2 +- drivers/net/ixgb/ixgb_hw.c | 2 +- drivers/net/ixgb/ixgb_hw.h | 2 +- drivers/net/ixgb/ixgb_ids.h | 2 +- drivers/net/ixgb/ixgb_main.c | 6 +++--- drivers/net/ixgb/ixgb_osdep.h | 2 +- drivers/net/ixgb/ixgb_param.c | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile index 838a5084fa00..0b20c5e62ffe 100644 --- a/drivers/net/ixgb/Makefile +++ b/drivers/net/ixgb/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel PRO/10GbE Linux driver -# Copyright(c) 1999 - 2006 Intel Corporation. +# Copyright(c) 1999 - 2008 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 5e8a60570467..804698fc6a8f 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index 4a9e52f90711..89ffa7264a12 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h index 12136b633497..7ea12652f471 100644 --- a/drivers/net/ixgb/ixgb_ee.h +++ b/drivers/net/ixgb/ixgb_ee.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index e1836c16ab3d..288ee1d0f431 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 9cc75ce9dc86..11dcda0f453e 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h index 39cfa47bea69..831fe0c58b2b 100644 --- a/drivers/net/ixgb/ixgb_hw.h +++ b/drivers/net/ixgb/ixgb_hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h index 4ba4d1910eb4..2a58847f46e8 100644 --- a/drivers/net/ixgb/ixgb_ids.h +++ b/drivers/net/ixgb/ixgb_ids.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 526413482be2..e83feaf830bd 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -32,9 +32,9 @@ char ixgb_driver_name[] = "ixgb"; static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; #define DRIVERNAPI "-NAPI" -#define DRV_VERSION "1.0.126" DRIVERNAPI +#define DRV_VERSION "1.0.135-k2" DRIVERNAPI const char ixgb_driver_version[] = DRV_VERSION; -static const char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; +static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation."; #define IXGB_CB_LENGTH 256 static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH; diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h index fb74bb122e69..d92e72bd627a 100644 --- a/drivers/net/ixgb/ixgb_osdep.h +++ b/drivers/net/ixgb/ixgb_osdep.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index 169636a5935d..af35e1ddadd6 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, -- cgit v1.2.1 From 661086df6cf35f62d0aec09ccb9164eb2baaaecd Mon Sep 17 00:00:00 2001 From: Peter P Waskiewicz Jr Date: Tue, 8 Jul 2008 15:06:51 -0700 Subject: igb: Introduce multiple TX queues with infrastructure This code adds multiple Tx queue infrastructure much like we previously did in ixgbe. The MSI-X vector mapping is the bulk of the change. IAM can now be safely enabled and we've verified that it does work correctly. We can also eliminate the tx ring lock. Signed-off-by: Peter P Waskiewicz Jr Signed-off-by: Mitch Williams Signed-off-by: Auke Kok Signed-off-by: Jeff Garzik --- drivers/net/igb/igb.h | 7 +- drivers/net/igb/igb_main.c | 160 ++++++++++++++++++++++++++++++--------------- 2 files changed, 114 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 2c48eec17660..a1431c8797b9 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -62,6 +62,7 @@ struct igb_adapter; /* Transmit and receive queues */ #define IGB_MAX_RX_QUEUES 4 +#define IGB_MAX_TX_QUEUES 4 /* RX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of @@ -157,8 +158,6 @@ struct igb_ring { union { /* TX */ struct { - spinlock_t tx_clean_lock; - spinlock_t tx_lock; bool detect_tx_hung; }; /* RX */ @@ -277,6 +276,10 @@ struct igb_adapter { /* for ioport free */ int bars; int need_ioport; + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ }; enum e1000_state_t { diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index afd4ce3f7b53..e11a5dae668a 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -103,7 +103,7 @@ static irqreturn_t igb_msix_rx(int irq, void *); static irqreturn_t igb_msix_tx(int irq, void *); static int igb_clean_rx_ring_msix(struct napi_struct *, int); static bool igb_clean_tx_irq(struct igb_ring *); -static int igb_clean(struct napi_struct *, int); +static int igb_poll(struct napi_struct *, int); static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); @@ -224,6 +224,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter) return -ENOMEM; } + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = &(adapter->tx_ring[i]); + ring->adapter = adapter; + ring->queue_index = i; + } for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = &(adapter->rx_ring[i]); ring->adapter = adapter; @@ -231,7 +236,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ring->itr_register = E1000_ITR; /* set a default napi handler for each rx_ring */ - netif_napi_add(adapter->netdev, &ring->napi, igb_clean, 64); + netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); } return 0; } @@ -412,8 +417,14 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) /* If we can't do MSI-X, try MSI */ msi_only: adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; if (!pci_enable_msi(adapter->pdev)) adapter->msi_enabled = 1; + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + /* Notify the stack of the (possibly) reduced Tx Queue count. */ + adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; +#endif return; } @@ -693,6 +704,10 @@ void igb_down(struct igb_adapter *adapter) /* flush and sleep below */ netif_stop_queue(netdev); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +#endif /* disable transmits in the hardware */ tctl = rd32(E1000_TCTL); @@ -895,7 +910,11 @@ static int __devinit igb_probe(struct pci_dev *pdev, pci_save_state(pdev); err = -ENOMEM; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES); +#else netdev = alloc_etherdev(sizeof(struct igb_adapter)); +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ if (!netdev) goto err_alloc_etherdev; @@ -997,6 +1016,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + netdev->features |= NETIF_F_MULTI_QUEUE; +#endif + netdev->features |= NETIF_F_LLTX; adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); @@ -1097,6 +1120,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* tell the stack to leave us alone until igb_open() is called */ netif_carrier_off(netdev); netif_stop_queue(netdev); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +#endif strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); @@ -1223,9 +1250,15 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) /* Number of supported queues. */ /* Having more queues than CPUs doesn't make sense. */ + adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus()); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus()); +#else adapter->num_tx_queues = 1; - adapter->num_rx_queues = min(IGB_MAX_RX_QUEUES, num_online_cpus()); +#endif /* CONFIG_NET_MULTI_QUEUE_DEVICE */ + /* This call may decrease the number of queues depending on + * interrupt mode. */ igb_set_interrupt_capability(adapter); if (igb_alloc_queues(adapter)) { @@ -1386,8 +1419,6 @@ int igb_setup_tx_resources(struct igb_adapter *adapter, tx_ring->adapter = adapter; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - spin_lock_init(&tx_ring->tx_clean_lock); - spin_lock_init(&tx_ring->tx_lock); return 0; err: @@ -1407,6 +1438,9 @@ err: static int igb_setup_all_tx_resources(struct igb_adapter *adapter) { int i, err = 0; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + int r_idx; +#endif for (i = 0; i < adapter->num_tx_queues; i++) { err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); @@ -1419,6 +1453,12 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) } } +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { + r_idx = i % adapter->num_tx_queues; + adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; + } +#endif return err; } @@ -2096,6 +2136,9 @@ static void igb_watchdog_task(struct work_struct *work) struct e1000_mac_info *mac = &adapter->hw.mac; u32 link; s32 ret_val; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + int i; +#endif if ((netif_carrier_ok(netdev)) && (rd32(E1000_STATUS) & E1000_STATUS_LU)) @@ -2152,6 +2195,10 @@ static void igb_watchdog_task(struct work_struct *work) netif_carrier_on(netdev); netif_wake_queue(netdev); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +#endif if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, @@ -2164,6 +2211,10 @@ static void igb_watchdog_task(struct work_struct *work) dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_stop_queue(netdev); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +#endif if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); @@ -2524,7 +2575,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); context_desc->seqnum_seed = 0; context_desc->mss_l4len_idx = - cpu_to_le32(tx_ring->eims_value >> 4); + cpu_to_le32(tx_ring->queue_index << 4); buffer_info->time_stamp = jiffies; buffer_info->dma = 0; @@ -2627,7 +2678,7 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_VLAN)) - olinfo_status |= tx_ring->eims_value >> 4; + olinfo_status |= tx_ring->queue_index << 4; olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); @@ -2663,7 +2714,12 @@ static int __igb_maybe_stop_tx(struct net_device *netdev, { struct igb_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + netif_stop_subqueue(netdev, tx_ring->queue_index); +#else netif_stop_queue(netdev); +#endif + /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ @@ -2675,7 +2731,11 @@ static int __igb_maybe_stop_tx(struct net_device *netdev, return -EBUSY; /* A reprieve! */ - netif_start_queue(netdev); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + netif_wake_subqueue(netdev, tx_ring->queue_index); +#else + netif_wake_queue(netdev); +#endif ++adapter->restart_queue; return 0; } @@ -2697,7 +2757,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, struct igb_adapter *adapter = netdev_priv(netdev); unsigned int tx_flags = 0; unsigned int len; - unsigned long irq_flags; u8 hdr_len = 0; int tso = 0; @@ -2713,10 +2772,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, return NETDEV_TX_OK; } - if (!spin_trylock_irqsave(&tx_ring->tx_lock, irq_flags)) - /* Collision - tell upper layer to requeue */ - return NETDEV_TX_LOCKED; - /* need: 1 descriptor per page, * + 2 desc gap to keep tail from touching head, * + 1 desc for skb->data, @@ -2724,7 +2779,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, * otherwise try next time */ if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { /* this is a hard error */ - spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags); return NETDEV_TX_BUSY; } @@ -2733,12 +2787,14 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); } + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= IGB_TX_FLAGS_IPV4; + tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; if (tso < 0) { dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags); return NETDEV_TX_OK; } @@ -2748,9 +2804,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) tx_flags |= IGB_TX_FLAGS_CSUM; - if (skb->protocol == htons(ETH_P_IP)) - tx_flags |= IGB_TX_FLAGS_IPV4; - igb_tx_queue_adv(adapter, tx_ring, tx_flags, igb_tx_map_adv(adapter, tx_ring, skb), skb->len, hdr_len); @@ -2760,14 +2813,22 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); - spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags); return NETDEV_TX_OK; } static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); - struct igb_ring *tx_ring = &adapter->tx_ring[0]; + struct igb_ring *tx_ring; + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + int r_idx = 0; + r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1); + tx_ring = adapter->multi_tx_table[r_idx]; +#else + tx_ring = &adapter->tx_ring[0]; +#endif + /* This goes back to the question of how to logically map a tx queue * to a flow. Right now, performance is impacted slightly negatively @@ -3035,7 +3096,7 @@ static irqreturn_t igb_msix_other(int irq, void *data) /* guard against interrupt when we're going down */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); - + no_link_interrupt: wr32(E1000_IMS, E1000_IMS_LSC); wr32(E1000_EIMS, adapter->eims_other); @@ -3054,12 +3115,15 @@ static irqreturn_t igb_msix_tx(int irq, void *data) tx_ring->total_bytes = 0; tx_ring->total_packets = 0; + + /* auto mask will automatically reenable the interrupt when we write + * EICS */ if (!igb_clean_tx_irq(tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ wr32(E1000_EICS, tx_ring->eims_value); - - if (!tx_ring->itr_val) + else wr32(E1000_EIMS, tx_ring->eims_value); + return IRQ_HANDLED; } @@ -3163,42 +3227,24 @@ static irqreturn_t igb_intr(int irq, void *data) } /** - * igb_clean - NAPI Rx polling callback - * @adapter: board private structure + * igb_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle **/ -static int igb_clean(struct napi_struct *napi, int budget) +static int igb_poll(struct napi_struct *napi, int budget) { - struct igb_adapter *adapter = container_of(napi, struct igb_adapter, - napi); + struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); + struct igb_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; - int tx_clean_complete = 1, work_done = 0; - int i; + int tx_clean_complete, work_done = 0; - /* Must NOT use netdev_priv macro here. */ - adapter = netdev->priv; - - /* Keep link state information with original netdev */ - if (!netif_carrier_ok(netdev)) - goto quit_polling; - - /* igb_clean is called per-cpu. This lock protects tx_ring[i] from - * being cleaned by multiple cpus simultaneously. A failure obtaining - * the lock means tx_ring[i] is currently being cleaned anyway. */ - for (i = 0; i < adapter->num_tx_queues; i++) { - if (spin_trylock(&adapter->tx_ring[i].tx_clean_lock)) { - tx_clean_complete &= igb_clean_tx_irq(&adapter->tx_ring[i]); - spin_unlock(&adapter->tx_ring[i].tx_clean_lock); - } - } - - for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_irq_adv(&adapter->rx_ring[i], &work_done, - adapter->rx_ring[i].napi.weight); + /* this poll routine only supports one tx and one rx queue */ + tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); + igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ if ((tx_clean_complete && (work_done < budget)) || !netif_running(netdev)) { -quit_polling: if (adapter->itr_setting & 3) igb_set_itr(adapter, E1000_ITR, false); netif_rx_complete(netdev, napi); @@ -3327,11 +3373,19 @@ done_cleaning: * sees the new next_to_clean. */ smp_mb(); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + ++adapter->restart_queue; + } +#else if (netif_queue_stopped(netdev) && !(test_bit(__IGB_DOWN, &adapter->state))) { netif_wake_queue(netdev); ++adapter->restart_queue; } +#endif } if (tx_ring->detect_tx_hung) { @@ -3368,7 +3422,11 @@ done_cleaning: tx_ring->buffer_info[i].time_stamp, jiffies, tx_desc->upper.fields.status); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + netif_stop_subqueue(netdev, tx_ring->queue_index); +#else netif_stop_queue(netdev); +#endif } } tx_ring->total_bytes += total_bytes; -- cgit v1.2.1 From e21ed3538f1946ea623caf28f1c44ede50224275 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 8 Jul 2008 15:07:24 -0700 Subject: igb: update ethtool stats to support multiqueue Addesses problems seen earlier with igb driver not correctly reporting rx and tx stats. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/igb.h | 1 + drivers/net/igb/igb_ethtool.c | 13 +++++++++---- drivers/net/igb/igb_main.c | 2 ++ 3 files changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index a1431c8797b9..5915efccbcab 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -158,6 +158,7 @@ struct igb_ring { union { /* TX */ struct { + struct igb_queue_stats tx_stats; bool detect_tx_hung; }; /* RX */ diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 0447f9bcd27a..ed756c12aba6 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -96,10 +96,8 @@ static const struct igb_stats igb_gstrings_stats[] = { }; #define IGB_QUEUE_STATS_LEN \ - ((((((struct igb_adapter *)netdev->priv)->num_rx_queues > 1) ? \ - ((struct igb_adapter *)netdev->priv)->num_rx_queues : 0) + \ - (((((struct igb_adapter *)netdev->priv)->num_tx_queues > 1) ? \ - ((struct igb_adapter *)netdev->priv)->num_tx_queues : 0))) * \ + ((((struct igb_adapter *)netdev->priv)->num_rx_queues + \ + ((struct igb_adapter *)netdev->priv)->num_tx_queues) * \ (sizeof(struct igb_queue_stats) / sizeof(u64))) #define IGB_GLOBAL_STATS_LEN \ sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) @@ -1842,6 +1840,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev, data[i] = (igb_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + for (j = 0; j < adapter->num_tx_queues; j++) { + int k; + queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + i += k; + } for (j = 0; j < adapter->num_rx_queues; j++) { int k; queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index e11a5dae668a..f975bfec2265 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -3431,6 +3431,8 @@ done_cleaning: } tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; adapter->net_stats.tx_bytes += total_bytes; adapter->net_stats.tx_packets += total_packets; return retval; -- cgit v1.2.1 From fe4506b6a2f9716ef62583020581ae2032573fed Mon Sep 17 00:00:00 2001 From: Jeb Cramer Date: Tue, 8 Jul 2008 15:07:55 -0700 Subject: igb: add DCA support Add DCA support in the similar method that it was added to the ixgbe driver recently. DCA allows the network device to put data in the CPU cache and notify the chipset of that event. This reduces cache misses during receives. Signed-off-by: Jeb Cramer Signed-off-by: Mitch Williams Signed-off-by: Auke Kok Signed-off-by: Shannon Nelson Signed-off-by: Jeff Garzik --- drivers/net/igb/e1000_82575.h | 11 +++ drivers/net/igb/e1000_regs.h | 2 + drivers/net/igb/igb.h | 4 +- drivers/net/igb/igb_main.c | 174 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 187 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index d78ad33d32bf..02e57a8447cb 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -144,9 +144,20 @@ struct e1000_adv_tx_context_desc { #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ /* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + #endif diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index ff187b73c69e..d25e914df975 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h @@ -235,6 +235,8 @@ #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ #define E1000_SWSM 0x05B50 /* SW Semaphore */ #define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ #define E1000_HICR 0x08F00 /* Host Inteface Control */ /* RSS registers */ diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 5915efccbcab..d4a042344728 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -271,7 +271,9 @@ struct igb_adapter { /* to not mess up cache alignment, always add to the bottom */ unsigned long state; unsigned int msi_enabled; - +#ifdef CONFIG_DCA + unsigned int dca_enabled; +#endif u32 eeprom_wol; /* for ioport free */ diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index f975bfec2265..e8ef5410591a 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -41,7 +41,9 @@ #include #include #include - +#ifdef CONFIG_DCA +#include +#endif #include "igb.h" #define DRV_VERSION "1.0.8-k2" @@ -102,6 +104,11 @@ static irqreturn_t igb_msix_other(int irq, void *); static irqreturn_t igb_msix_rx(int irq, void *); static irqreturn_t igb_msix_tx(int irq, void *); static int igb_clean_rx_ring_msix(struct napi_struct *, int); +#ifdef CONFIG_DCA +static void igb_update_rx_dca(struct igb_ring *); +static void igb_update_tx_dca(struct igb_ring *); +static void igb_setup_dca(struct igb_adapter *); +#endif /* CONFIG_DCA */ static bool igb_clean_tx_irq(struct igb_ring *); static int igb_poll(struct napi_struct *, int); static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); @@ -119,6 +126,14 @@ static int igb_suspend(struct pci_dev *, pm_message_t); static int igb_resume(struct pci_dev *); #endif static void igb_shutdown(struct pci_dev *); +#ifdef CONFIG_DCA +static int igb_notify_dca(struct notifier_block *, unsigned long, void *); +static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif #ifdef CONFIG_NET_POLL_CONTROLLER /* for netdump / net console */ @@ -183,6 +198,9 @@ static int __init igb_init_module(void) printk(KERN_INFO "%s\n", igb_copyright); ret = pci_register_driver(&igb_driver); +#ifdef CONFIG_DCA + dca_register_notify(&dca_notifier); +#endif return ret; } @@ -196,6 +214,9 @@ module_init(igb_init_module); **/ static void __exit igb_exit_module(void) { +#ifdef CONFIG_DCA + dca_unregister_notify(&dca_notifier); +#endif pci_unregister_driver(&igb_driver); } @@ -1130,6 +1151,17 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (err) goto err_register; +#ifdef CONFIG_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->dca_enabled = true; + dev_info(&pdev->dev, "DCA enabled\n"); + /* Always use CB2 mode, difference is masked + * in the CB driver. */ + wr32(E1000_DCA_CTRL, 2); + igb_setup_dca(adapter); + } +#endif + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, @@ -1193,6 +1225,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; /* flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ @@ -1202,6 +1235,15 @@ static void __devexit igb_remove(struct pci_dev *pdev) flush_scheduled_work(); +#ifdef CONFIG_DCA + if (adapter->dca_enabled) { + dev_info(&pdev->dev, "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->dca_enabled = false; + wr32(E1000_DCA_CTRL, 1); + } +#endif + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ igb_release_hw_control(adapter); @@ -3112,7 +3154,10 @@ static irqreturn_t igb_msix_tx(int irq, void *data) if (!tx_ring->itr_val) wr32(E1000_EIMC, tx_ring->eims_value); - +#ifdef CONFIG_DCA + if (adapter->dca_enabled) + igb_update_tx_dca(tx_ring); +#endif tx_ring->total_bytes = 0; tx_ring->total_packets = 0; @@ -3146,9 +3191,119 @@ static irqreturn_t igb_msix_rx(int irq, void *data) if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) __netif_rx_schedule(adapter->netdev, &rx_ring->napi); - return IRQ_HANDLED; +#ifdef CONFIG_DCA + if (adapter->dca_enabled) + igb_update_rx_dca(rx_ring); +#endif + return IRQ_HANDLED; +} + +#ifdef CONFIG_DCA +static void igb_update_rx_dca(struct igb_ring *rx_ring) +{ + u32 dca_rxctrl; + struct igb_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + int cpu = get_cpu(); + int q = rx_ring - adapter->rx_ring; + + if (rx_ring->cpu != cpu) { + dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); + dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; + dca_rxctrl |= dca_get_tag(cpu); + dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; + dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; + dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; + wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); + rx_ring->cpu = cpu; + } + put_cpu(); +} + +static void igb_update_tx_dca(struct igb_ring *tx_ring) +{ + u32 dca_txctrl; + struct igb_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + int cpu = get_cpu(); + int q = tx_ring - adapter->tx_ring; + + if (tx_ring->cpu != cpu) { + dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; + dca_txctrl |= dca_get_tag(cpu); + dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; + wr32(E1000_DCA_TXCTRL(q), dca_txctrl); + tx_ring->cpu = cpu; + } + put_cpu(); +} + +static void igb_setup_dca(struct igb_adapter *adapter) +{ + int i; + + if (!(adapter->dca_enabled)) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) { + adapter->tx_ring[i].cpu = -1; + igb_update_tx_dca(&adapter->tx_ring[i]); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->rx_ring[i].cpu = -1; + igb_update_rx_dca(&adapter->rx_ring[i]); + } } +static int __igb_notify_dca(struct device *dev, void *data) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned long event = *(unsigned long *)data; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if already enabled, don't do it again */ + if (adapter->dca_enabled) + break; + adapter->dca_enabled = true; + /* Always use CB2 mode, difference is masked + * in the CB driver. */ + wr32(E1000_DCA_CTRL, 2); + if (dca_add_requester(dev) == 0) { + dev_info(&adapter->pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } + /* Fall Through since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->dca_enabled) { + /* without this a class_device is left + * hanging around in the sysfs model */ + dca_remove_requester(dev); + dev_info(&adapter->pdev->dev, "DCA disabled\n"); + adapter->dca_enabled = false; + wr32(E1000_DCA_CTRL, 1); + } + break; + } + + return 0; +} + +static int igb_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, + __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif /* CONFIG_DCA */ /** * igb_intr_msi - Interrupt Handler @@ -3239,7 +3394,16 @@ static int igb_poll(struct napi_struct *napi, int budget) int tx_clean_complete, work_done = 0; /* this poll routine only supports one tx and one rx queue */ +#ifdef CONFIG_DCA + if (adapter->dca_enabled) + igb_update_tx_dca(&adapter->tx_ring[0]); +#endif tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); + +#ifdef CONFIG_DCA + if (adapter->dca_enabled) + igb_update_rx_dca(&adapter->rx_ring[0]); +#endif igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ @@ -3268,6 +3432,10 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) if (!netif_carrier_ok(netdev)) goto quit_polling; +#ifdef CONFIG_DCA + if (adapter->dca_enabled) + igb_update_rx_dca(rx_ring); +#endif igb_clean_rx_irq_adv(rx_ring, &work_done, budget); -- cgit v1.2.1 From 87cb7e8cc59c89ccdcf243671c932179bb651a71 Mon Sep 17 00:00:00 2001 From: Auke Kok Date: Tue, 8 Jul 2008 15:08:29 -0700 Subject: igb: reenable CRC stripping in hardware We can remove a clunky workaround for not having the hardware strip the CRC. 82575 silicon as well as the older PCI Express e1000e hardware all work OK in this respect. Signed-off-by: Auke Kok Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/igb_main.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index e8ef5410591a..ae329c02eaef 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1664,10 +1664,12 @@ static void igb_setup_rctl(struct igb_adapter *adapter) E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); - /* disable the stripping of CRC because it breaks - * BMC firmware connected over SMBUS - rctl |= E1000_RCTL_SECRC; + /* + * enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. */ + rctl |= E1000_RCTL_SECRC; rctl &= ~E1000_RCTL_SBP; @@ -3743,7 +3745,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, } } send_up: - pskb_trim(skb, skb->len - 4); i++; if (i == rx_ring->count) i = 0; -- cgit v1.2.1 From 0024fd00cd404b418b6e6a7408700814cfe7b3dd Mon Sep 17 00:00:00 2001 From: Auke Kok Date: Tue, 8 Jul 2008 15:09:37 -0700 Subject: igb: Increment driver version Signed-off-by: Auke Kok Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/igb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ae329c02eaef..14363260612c 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -46,7 +46,7 @@ #endif #include "igb.h" -#define DRV_VERSION "1.0.8-k2" +#define DRV_VERSION "1.2.45-k2" char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = -- cgit v1.2.1 From 2d064c06fecadadcb81a452acd373af00dfb1fec Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 8 Jul 2008 15:10:12 -0700 Subject: igb: add 82576 MAC support Signed-off-by: Alexander Duyck Signed-off-by: Auke Kok Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/e1000_82575.c | 208 +++++++++++++++++++++++++++++++++++++--- drivers/net/igb/e1000_82575.h | 9 ++ drivers/net/igb/e1000_defines.h | 10 ++ drivers/net/igb/e1000_hw.h | 8 ++ drivers/net/igb/e1000_mac.c | 3 +- drivers/net/igb/e1000_mac.h | 1 + drivers/net/igb/e1000_regs.h | 7 +- drivers/net/igb/igb_ethtool.c | 146 +++++++++++++++++++++------- drivers/net/igb/igb_main.c | 170 +++++++++++++++++++++++++------- 9 files changed, 476 insertions(+), 86 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 2c8b91060d98..e098f234770f 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -31,6 +31,7 @@ #include #include +#include #include "e1000_mac.h" #include "e1000_82575.h" @@ -45,7 +46,6 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *); static s32 igb_init_hw_82575(struct e1000_hw *); static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); -static void igb_rar_set_82575(struct e1000_hw *, u8 *, u32); static s32 igb_reset_hw_82575(struct e1000_hw *); static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); static s32 igb_setup_copper_link_82575(struct e1000_hw *); @@ -84,6 +84,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) case E1000_DEV_ID_82575GB_QUAD_COPPER: mac->type = e1000_82575; break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + mac->type = e1000_82576; + break; default: return -E1000_ERR_MAC_INIT; break; @@ -128,6 +134,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; /* Set if manageability features are enabled. */ @@ -694,13 +702,12 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) if ((hw->phy.media_type != e1000_media_type_copper) || (igb_sgmii_active_82575(hw))) ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, - &duplex); + &duplex); else ret_val = igb_check_for_copper_link(hw); return ret_val; } - /** * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex * @hw: pointer to the HW structure @@ -757,18 +764,129 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, } /** - * igb_rar_set_82575 - Set receive address register + * igb_init_rx_addrs_82575 - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 addr[6] = {0,0,0,0,0,0}; + /* + * This function is essentially the same as that of + * e1000_init_rx_addrs_generic. However it also takes care + * of the special case where the register offset of the + * second set of RARs begins elsewhere. This is implicitly taken care by + * function e1000_rar_set_generic. + */ + + hw_dbg("e1000_init_rx_addrs_82575"); + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, addr, i); +} + +/** + * igb_update_mc_addr_list_82575 - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +void igb_update_mc_addr_list_82575(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + u32 hash_value; + u32 i; + u8 addr[6] = {0,0,0,0,0,0}; + /* + * This function is essentially the same as that of + * igb_update_mc_addr_list_generic. However it also takes care + * of the special case where the register offset of the + * second set of RARs begins elsewhere. This is implicitly taken care by + * function e1000_rar_set_generic. + */ + + /* + * Load the first set of multicast addresses into the exact + * filters (RAR). If there are not enough to fill the RAR + * array, clear the filters. + */ + for (i = rar_used_count; i < rar_count; i++) { + if (mc_addr_count) { + igb_rar_set(hw, mc_addr_list, i); + mc_addr_count--; + mc_addr_list += ETH_ALEN; + } else { + igb_rar_set(hw, addr, i); + } + } + + /* Clear the old settings from the MTA */ + hw_dbg("Clearing MTA\n"); + for (i = 0; i < hw->mac.mta_reg_count; i++) { + array_wr32(E1000_MTA, i, 0); + wrfl(); + } + + /* Load any remaining multicast addresses into the hash table. */ + for (; mc_addr_count > 0; mc_addr_count--) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + hw_dbg("Hash value = 0x%03X\n", hash_value); + hw->mac.ops.mta_set(hw, hash_value); + mc_addr_list += ETH_ALEN; + } +} + +/** + * igb_shutdown_fiber_serdes_link_82575 - Remove link during power down * @hw: pointer to the HW structure - * @addr: pointer to the receive address - * @index: receive address array register * - * Sets the receive address array register at index to the address passed - * in by addr. + * In the case of fiber serdes, shut down optics and PCS on driver unload + * when management pass thru is not enabled. **/ -static void igb_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index) +void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw) { - if (index < E1000_RAR_ENTRIES_82575) - igb_rar_set(hw, addr, index); + u32 reg; + + if (hw->mac.type != e1000_82576 || + (hw->phy.media_type != e1000_media_type_fiber && + hw->phy.media_type != e1000_media_type_internal_serdes)) + return; + + /* if the management interface is not enabled, then power down */ + if (!igb_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = rd32(E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = rd32(E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP7_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + msleep(1); + } return; } @@ -854,7 +972,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) igb_clear_vfta(hw); /* Setup the receive address */ - igb_init_rx_addrs(hw, rar_count); + igb_init_rx_addrs_82575(hw, rar_count); /* Zero out the Multicast HASH table */ hw_dbg("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) @@ -1113,6 +1231,70 @@ out: return ret_val; } +/** + * igb_translate_register_82576 - Translate the proper register offset + * @reg: e1000 register to be read + * + * Registers in 82576 are located in different offsets than other adapters + * even though they function in the same manner. This function takes in + * the name of the register to read and returns the correct offset for + * 82576 silicon. + **/ +u32 igb_translate_register_82576(u32 reg) +{ + /* + * Some of the Kawela registers are located at different + * offsets than they are in older adapters. + * Despite the difference in location, the registers + * function in the same manner. + */ + switch (reg) { + case E1000_TDBAL(0): + reg = 0x0E000; + break; + case E1000_TDBAH(0): + reg = 0x0E004; + break; + case E1000_TDLEN(0): + reg = 0x0E008; + break; + case E1000_TDH(0): + reg = 0x0E010; + break; + case E1000_TDT(0): + reg = 0x0E018; + break; + case E1000_TXDCTL(0): + reg = 0x0E028; + break; + case E1000_RDBAL(0): + reg = 0x0C000; + break; + case E1000_RDBAH(0): + reg = 0x0C004; + break; + case E1000_RDLEN(0): + reg = 0x0C008; + break; + case E1000_RDH(0): + reg = 0x0C010; + break; + case E1000_RDT(0): + reg = 0x0C018; + break; + case E1000_RXDCTL(0): + reg = 0x0C028; + break; + case E1000_SRRCTL(0): + reg = 0x0C00C; + break; + default: + break; + } + + return reg; +} + /** * igb_reset_init_script_82575 - Inits HW defaults after reset * @hw: pointer to the HW structure @@ -1304,7 +1486,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = { .reset_hw = igb_reset_hw_82575, .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, - .rar_set = igb_rar_set_82575, + .rar_set = igb_rar_set, .read_mac_addr = igb_read_mac_addr_82575, .get_speed_and_duplex = igb_get_speed_and_duplex_copper, }; diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index 02e57a8447cb..d273236c7764 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -28,9 +28,13 @@ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ +u32 igb_translate_register_82576(u32 reg); +void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); +extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); #define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 /* SRRCTL bit definitions */ #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ @@ -159,5 +163,10 @@ struct e1000_adv_tx_context_desc { #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ #endif diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index ed748dcfb7a4..afdba3c9073c 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -90,6 +90,11 @@ #define E1000_I2CCMD_ERROR 0x80000000 #define E1000_MAX_SGMII_PHY_REG_ADDR 255 #define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 /* Receive Descriptor bit definitions */ #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ @@ -213,6 +218,7 @@ /* Device Control */ #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ #define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ #define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ #define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ #define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ @@ -244,6 +250,7 @@ */ #define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_PCS_CFG_PCS_EN 8 #define E1000_PCS_LCTL_FLV_LINK_UP 1 #define E1000_PCS_LCTL_FSV_100 2 #define E1000_PCS_LCTL_FSV_1000 4 @@ -253,6 +260,7 @@ #define E1000_PCS_LCTL_AN_ENABLE 0x10000 #define E1000_PCS_LCTL_AN_RESTART 0x20000 #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 #define E1000_PCS_LSTS_LINK_OK 1 #define E1000_PCS_LSTS_SPEED_100 2 @@ -360,6 +368,7 @@ #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ #define E1000_PBA_24K 0x0018 #define E1000_PBA_34K 0x0022 +#define E1000_PBA_64K 0x0040 /* 64KB */ #define IFS_MAX 80 #define IFS_MIN 40 @@ -528,6 +537,7 @@ /* PHY Control Register */ #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index 746c3ea09e27..19fa4ee96f2e 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -38,6 +38,10 @@ struct e1000_hw; +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 #define E1000_DEV_ID_82575EB_COPPER 0x10A7 #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 @@ -50,6 +54,7 @@ struct e1000_hw; enum e1000_mac_type { e1000_undefined = 0, e1000_82575, + e1000_82576, e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ }; @@ -410,14 +415,17 @@ struct e1000_mac_operations { s32 (*check_for_link)(struct e1000_hw *); s32 (*reset_hw)(struct e1000_hw *); s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); s32 (*setup_physical_interface)(struct e1000_hw *); void (*rar_set)(struct e1000_hw *, u8 *, u32); s32 (*read_mac_addr)(struct e1000_hw *); s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + void (*mta_set)(struct e1000_hw *, u32); }; struct e1000_phy_operations { s32 (*acquire_phy)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); s32 (*force_speed_duplex)(struct e1000_hw *); s32 (*get_cfg_done)(struct e1000_hw *hw); s32 (*get_cable_length)(struct e1000_hw *); diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 47ad2c4277c3..20408aa1f916 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -36,7 +36,6 @@ static s32 igb_set_default_fc(struct e1000_hw *hw); static s32 igb_set_fc_watermarks(struct e1000_hw *hw); -static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); /** * igb_remove_device - Free device specific structure @@ -360,7 +359,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, * the multicast filter table array address and new table value. See * igb_mta_set() **/ -static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) { u32 hash_value, hash_mask; u8 bit_shift = 0; diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h index 326b6592307b..dc2f8cce15e7 100644 --- a/drivers/net/igb/e1000_mac.h +++ b/drivers/net/igb/e1000_mac.h @@ -94,5 +94,6 @@ enum e1000_mng_mode { #define E1000_HICR_C 0x02 extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); #endif diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index d25e914df975..b95093d24c09 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h @@ -56,6 +56,9 @@ #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ #define E1000_TCTL 0x00400 /* TX Control - RW */ #define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ #define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ @@ -217,6 +220,7 @@ #define E1000_RFCTL 0x05008 /* Receive Filter Control*/ #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ #define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ #define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */ #define E1000_WUC 0x05800 /* Wakeup Control - RW */ @@ -258,7 +262,8 @@ #define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ -#define E1000_REGISTER(a, reg) reg +#define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \ + ? reg : e1000_translate_register_82576(reg)) #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) #define rd32(reg) (readl(hw->hw_addr + reg)) diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index ed756c12aba6..e27d5a533b4f 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -827,8 +827,9 @@ err_setup: /* ethtool register test data */ struct igb_reg_test { u16 reg; - u8 array_len; - u8 test_type; + u16 reg_offset; + u16 array_len; + u16 test_type; u32 mask; u32 write; }; @@ -850,34 +851,72 @@ struct igb_reg_test { #define TABLE64_TEST_LO 5 #define TABLE64_TEST_HI 6 -/* default register test */ +/* 82576 reg test */ +static struct igb_reg_test reg_test_82576[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 1, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82576, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82575 register test */ static struct igb_reg_test reg_test_82575[] = { - { E1000_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, /* Enable all four RX queues before testing. */ - { E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, /* RDH is read-only for 82575, only test RDT. */ - { E1000_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, - { E1000_FCRTH, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, - { E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, - { E1000_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_TXCW, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, - { E1000_RA, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, - { E1000_MTA, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { 0, 0, 0, 0 } }; @@ -937,7 +976,15 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) u32 i, toggle; toggle = 0x7FFFF3FF; - test = reg_test_82575; + + switch (adapter->hw.mac.type) { + case e1000_82576: + test = reg_test_82576; + break; + default: + test = reg_test_82575; + break; + } /* Because the status register is such a special case, * we handle it separately from the rest of the register @@ -964,19 +1011,19 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) for (i = 0; i < test->array_len; i++) { switch (test->test_type) { case PATTERN_TEST: - REG_PATTERN_TEST(test->reg + (i * 0x100), + REG_PATTERN_TEST(test->reg + (i * test->reg_offset), test->mask, test->write); break; case SET_READ_TEST: - REG_SET_AND_CHECK(test->reg + (i * 0x100), + REG_SET_AND_CHECK(test->reg + (i * test->reg_offset), test->mask, test->write); break; case WRITE_NO_TEST: writel(test->write, (adapter->hw.hw_addr + test->reg) - + (i * 0x100)); + + (i * test->reg_offset)); break; case TABLE32_TEST: REG_PATTERN_TEST(test->reg + (i * 4), @@ -1392,13 +1439,39 @@ static int igb_set_phy_loopback(struct igb_adapter *adapter) static int igb_setup_loopback_test(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 rctl; + u32 reg; if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { - rctl = rd32(E1000_RCTL); - rctl |= E1000_RCTL_LBM_TCVR; - wr32(E1000_RCTL, rctl); + reg = rd32(E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; + wr32(E1000_RCTL, reg); + + wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + + reg = rd32(E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; + wr32(E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + + /* Set PCS register for forced speed */ + reg = rd32(E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ + wr32(E1000_PCS_LCTL, reg); + return 0; } else if (hw->phy.media_type == e1000_media_type_copper) { return igb_set_phy_loopback(adapter); @@ -1654,10 +1727,13 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, switch (hw->device_id) { case E1000_DEV_ID_82575GB_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER: /* WoL not supported */ wol->supported = 0; break; case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: /* Wake events not supported on port B */ if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { wol->supported = 0; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 14363260612c..ba043c4e1ca2 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -51,7 +51,7 @@ char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007 Intel Corporation."; +static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { @@ -59,6 +59,10 @@ static const struct e1000_info *igb_info_tbl[] = { }; static struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, @@ -268,6 +272,10 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, { u32 msixbm = 0; struct e1000_hw *hw = &adapter->hw; + u32 ivar, index; + + switch (hw->mac.type) { + case e1000_82575: /* The 82575 assigns vectors using a bitmask, which matches the bitmask for the EICR/EIMS/EIMC registers. To assign one or more queues to a vector, we write the appropriate bits @@ -282,6 +290,47 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, E1000_EICR_TX_QUEUE0 << tx_queue; } array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + break; + case e1000_82576: + /* Kawela uses a table-based method for assigning vectors. + Each queue has a single entry in the table to which we write + a vector number along with a "valid" bit. Sadly, the layout + of the table is somewhat counterintuitive. */ + if (rx_queue > IGB_N0_QUEUE) { + index = (rx_queue & 0x7); + ivar = array_rd32(E1000_IVAR0, index); + if (rx_queue < 8) { + /* vector goes into low byte of register */ + ivar = ivar & 0xFFFFFF00; + ivar |= msix_vector | E1000_IVAR_VALID; + } else { + /* vector goes into third byte of register */ + ivar = ivar & 0xFF00FFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 16; + } + adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector; + array_wr32(E1000_IVAR0, index, ivar); + } + if (tx_queue > IGB_N0_QUEUE) { + index = (tx_queue & 0x7); + ivar = array_rd32(E1000_IVAR0, index); + if (tx_queue < 8) { + /* vector goes into second byte of register */ + ivar = ivar & 0xFFFF00FF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 8; + } else { + /* vector goes into high byte of register */ + ivar = ivar & 0x00FFFFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 24; + } + adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector; + array_wr32(E1000_IVAR0, index, ivar); + } + break; + default: + BUG(); + break; + } } /** @@ -297,6 +346,12 @@ static void igb_configure_msix(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; adapter->eims_enable_mask = 0; + if (hw->mac.type == e1000_82576) + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *tx_ring = &adapter->tx_ring[i]; @@ -322,6 +377,8 @@ static void igb_configure_msix(struct igb_adapter *adapter) /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); @@ -337,6 +394,19 @@ static void igb_configure_msix(struct igb_adapter *adapter) adapter->eims_enable_mask |= E1000_EIMS_OTHER; adapter->eims_other = E1000_EIMS_OTHER; + break; + + case e1000_82576: + tmp = (vector++ | E1000_IVAR_VALID) << 8; + wr32(E1000_IVAR_MISC, tmp); + + adapter->eims_enable_mask = (1 << (vector)) - 1; + adapter->eims_other = 1 << (vector - 1); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ wrfl(); } @@ -474,8 +544,17 @@ static int igb_request_irq(struct igb_adapter *adapter) adapter->num_rx_queues = 1; igb_alloc_queues(adapter); } else { - wr32(E1000_MSIXBM(0), (E1000_EICR_RX_QUEUE0 | - E1000_EIMS_OTHER)); + switch (hw->mac.type) { + case e1000_82575: + wr32(E1000_MSIXBM(0), + (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); + break; + case e1000_82576: + wr32(E1000_IVAR0, E1000_IVAR_VALID); + break; + default: + break; + } } if (adapter->msi_enabled) { @@ -770,16 +849,23 @@ void igb_reinit_locked(struct igb_adapter *adapter) void igb_reset(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - struct e1000_fc_info *fc = &adapter->hw.fc; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_fc_info *fc = &hw->fc; u32 pba = 0, tx_space, min_tx_space, min_rx_space; u16 hwm; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. */ + if (mac->type != e1000_82576) { pba = E1000_PBA_34K; + } + else { + pba = E1000_PBA_64K; + } - if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && + (mac->type < e1000_82576)) { /* adjust PBA for jumbo frames */ wr32(E1000_PBA, pba); @@ -818,8 +904,8 @@ void igb_reset(struct igb_adapter *adapter) if (pba < min_rx_space) pba = min_rx_space; } + wr32(E1000_PBA, pba); } - wr32(E1000_PBA, pba); /* flow control settings */ /* The high water mark must be low enough to fit one full frame @@ -828,10 +914,15 @@ void igb_reset(struct igb_adapter *adapter) * - 90% of the Rx FIFO size, or * - the full Rx FIFO size minus one full frame */ hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - adapter->max_frame_size)); + ((pba << 10) - 2 * adapter->max_frame_size)); - fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ - fc->low_water = fc->high_water - 8; + if (mac->type < e1000_82576) { + fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + } else { + fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + } fc->pause_time = 0xFFFF; fc->send_xon = 1; fc->type = fc->original_type; @@ -1118,9 +1209,12 @@ static int __devinit igb_probe(struct pci_dev *pdev, * lan on a particular port */ switch (pdev->device) { case E1000_DEV_ID_82575GB_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER: adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) @@ -1801,7 +1895,10 @@ static void igb_configure_rx(struct igb_adapter *adapter) get_random_bytes(&random[0], 40); - shift = 6; + if (hw->mac.type >= e1000_82576) + shift = 0; + else + shift = 6; for (j = 0; j < (32 * 4); j++) { reta.bytes[j & 3] = (j % adapter->num_rx_queues) << shift; @@ -2127,7 +2224,7 @@ static void igb_set_multi(struct net_device *netdev) if (!netdev->mc_count) { /* nothing to program, so clear mc list */ - igb_update_mc_addr_list(hw, NULL, 0, 1, + igb_update_mc_addr_list_82575(hw, NULL, 0, 1, mac->rar_entry_count); return; } @@ -2145,7 +2242,8 @@ static void igb_set_multi(struct net_device *netdev) memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); mc_ptr = mc_ptr->next; } - igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count); + igb_update_mc_addr_list_82575(hw, mta_list, i, 1, + mac->rar_entry_count); kfree(mta_list); } @@ -3211,8 +3309,14 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring) if (rx_ring->cpu != cpu) { dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); - dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; - dca_rxctrl |= dca_get_tag(cpu); + if (hw->mac.type == e1000_82576) { + dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; + dca_rxctrl |= dca_get_tag(cpu) << + E1000_DCA_RXCTRL_CPUID_SHIFT; + } else { + dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; + dca_rxctrl |= dca_get_tag(cpu); + } dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; @@ -3232,8 +3336,14 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring) if (tx_ring->cpu != cpu) { dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; - dca_txctrl |= dca_get_tag(cpu); + if (hw->mac.type == e1000_82576) { + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; + dca_txctrl |= dca_get_tag(cpu) << + E1000_DCA_TXCTRL_CPUID_SHIFT; + } else { + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; + dca_txctrl |= dca_get_tag(cpu); + } dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; wr32(E1000_DCA_TXCTRL(q), dca_txctrl); tx_ring->cpu = cpu; @@ -3572,7 +3682,7 @@ done_cleaning: /* detected Tx unit hang */ dev_err(&adapter->pdev->dev, "Detected Tx Unit Hang\n" - " Tx Queue <%lu>\n" + " Tx Queue <%d>\n" " TDH <%x>\n" " TDT <%x>\n" " next_to_use <%x>\n" @@ -3582,8 +3692,7 @@ done_cleaning: " time_stamp <%lx>\n" " jiffies <%lx>\n" " desc.status <%x>\n", - (unsigned long)((tx_ring - adapter->tx_ring) / - sizeof(struct igb_ring)), + tx_ring->queue_index, readl(adapter->hw.hw_addr + tx_ring->head), readl(adapter->hw.hw_addr + tx_ring->tail), tx_ring->next_to_use, @@ -4098,7 +4207,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; + u32 ctrl, rctl, status; u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; @@ -4141,33 +4250,24 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) ctrl |= E1000_CTRL_ADVD3WUC; wr32(E1000_CTRL, ctrl); - if (adapter->hw.phy.media_type == e1000_media_type_fiber || - adapter->hw.phy.media_type == - e1000_media_type_internal_serdes) { - /* keep the laser running in D3 */ - ctrl_ext = rd32(E1000_CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; - wr32(E1000_CTRL_EXT, ctrl_ext); - } - /* Allow time for pending master requests to run */ igb_disable_pcie_master(&adapter->hw); wr32(E1000_WUC, E1000_WUC_PME_EN); wr32(E1000_WUFC, wufc); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); } else { wr32(E1000_WUC, 0); wr32(E1000_WUFC, 0); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); } - /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->en_mng_pt) { + /* make sure adapter isn't asleep if manageability/wol is enabled */ + if (wufc || adapter->en_mng_pt) { pci_enable_wake(pdev, PCI_D3hot, 1); pci_enable_wake(pdev, PCI_D3cold, 1); + } else { + igb_shutdown_fiber_serdes_link_82575(hw); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); } /* Release control of h/w to f/w. If f/w is AMT enabled, this -- cgit v1.2.1 From 7dfc16fab1186769d7d0086830ab3fbc8fddfcba Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 8 Jul 2008 15:10:46 -0700 Subject: igb: Add support for quad port WOL and feature flags Change igb from using a series of boolean operators to using a single flags value that contains a number of different bit flags for all the different features of the adapter. This patch also adds WOL support for quad port adapters. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/igb.h | 13 ++++-- drivers/net/igb/igb_ethtool.c | 12 +++++- drivers/net/igb/igb_main.c | 92 +++++++++++++++++++++++++++++-------------- 3 files changed, 81 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index d4a042344728..ee08010d2c4f 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -270,10 +270,7 @@ struct igb_adapter { /* to not mess up cache alignment, always add to the bottom */ unsigned long state; - unsigned int msi_enabled; -#ifdef CONFIG_DCA - unsigned int dca_enabled; -#endif + unsigned int flags; u32 eeprom_wol; /* for ioport free */ @@ -285,6 +282,14 @@ struct igb_adapter { #endif /* CONFIG_NETDEVICES_MULTIQUEUE */ }; +#define IGB_FLAG_HAS_MSI (1 << 0) +#define IGB_FLAG_MSI_ENABLE (1 << 1) +#define IGB_FLAG_HAS_DCA (1 << 2) +#define IGB_FLAG_DCA_ENABLED (1 << 3) +#define IGB_FLAG_IN_NETPOLL (1 << 5) +#define IGB_FLAG_QUAD_PORT_A (1 << 6) +#define IGB_FLAG_NEED_CTX_IDX (1 << 7) + enum e1000_state_t { __IGB_TESTING, __IGB_RESETTING, diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index e27d5a533b4f..ef209b5cd390 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -1097,7 +1097,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) if (adapter->msix_entries) { /* NOTE: we don't test MSI-X interrupts here, yet */ return 0; - } else if (adapter->msi_enabled) { + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { shared_int = false; if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { *data = 1; @@ -1727,7 +1727,6 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, switch (hw->device_id) { case E1000_DEV_ID_82575GB_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER: /* WoL not supported */ wol->supported = 0; break; @@ -1742,6 +1741,15 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, /* return success for non excluded adapter ports */ retval = 0; break; + case E1000_DEV_ID_82576_QUAD_COPPER: + /* quad port adapters only support WoL on port A */ + if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; default: /* dual port cards only support WoL on port A from now on * unless it was enabled in the eeprom for port B diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ba043c4e1ca2..68a4fef3df9a 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -53,7 +53,6 @@ static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation."; - static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, }; @@ -170,6 +169,8 @@ static struct pci_driver igb_driver = { .err_handler = &igb_err_handler }; +static int global_quad_port_a; /* global quad port a indication */ + MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); @@ -201,6 +202,8 @@ static int __init igb_init_module(void) printk(KERN_INFO "%s\n", igb_copyright); + global_quad_port_a = 0; + ret = pci_register_driver(&igb_driver); #ifdef CONFIG_DCA dca_register_notify(&dca_notifier); @@ -471,7 +474,7 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; - } else if (adapter->msi_enabled) + } else if (adapter->flags & IGB_FLAG_HAS_MSI) pci_disable_msi(adapter->pdev); return; } @@ -510,7 +513,7 @@ msi_only: adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; if (!pci_enable_msi(adapter->pdev)) - adapter->msi_enabled = 1; + adapter->flags |= IGB_FLAG_HAS_MSI; #ifdef CONFIG_NETDEVICES_MULTIQUEUE /* Notify the stack of the (possibly) reduced Tx Queue count. */ @@ -538,7 +541,7 @@ static int igb_request_irq(struct igb_adapter *adapter) /* fall back to MSI */ igb_reset_interrupt_capability(adapter); if (!pci_enable_msi(adapter->pdev)) - adapter->msi_enabled = 1; + adapter->flags |= IGB_FLAG_HAS_MSI; igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); adapter->num_rx_queues = 1; @@ -557,14 +560,14 @@ static int igb_request_irq(struct igb_adapter *adapter) } } - if (adapter->msi_enabled) { + if (adapter->flags & IGB_FLAG_HAS_MSI) { err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, netdev->name, netdev); if (!err) goto request_done; /* fall back to legacy interrupts */ igb_reset_interrupt_capability(adapter); - adapter->msi_enabled = 0; + adapter->flags &= ~IGB_FLAG_HAS_MSI; } err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, @@ -1097,6 +1100,17 @@ static int __devinit igb_probe(struct pci_dev *pdev, igb_get_bus_info_pcie(hw); + /* set flags */ + switch (hw->mac.type) { + case e1000_82576: + case e1000_82575: + adapter->flags |= IGB_FLAG_HAS_DCA; + adapter->flags |= IGB_FLAG_NEED_CTX_IDX; + break; + default: + break; + } + hw->phy.autoneg_wait_to_complete = false; hw->mac.adaptive_ifs = true; @@ -1209,7 +1223,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, * lan on a particular port */ switch (pdev->device) { case E1000_DEV_ID_82575GB_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER: adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82575EB_FIBER_SERDES: @@ -1220,6 +1233,16 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->eeprom_wol = 0; break; + case E1000_DEV_ID_82576_QUAD_COPPER: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->flags |= IGB_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; } /* initialize the wol settings based on the eeprom settings */ @@ -1246,8 +1269,9 @@ static int __devinit igb_probe(struct pci_dev *pdev, goto err_register; #ifdef CONFIG_DCA - if (dca_add_requester(&pdev->dev) == 0) { - adapter->dca_enabled = true; + if ((adapter->flags & IGB_FLAG_HAS_DCA) && + (dca_add_requester(&pdev->dev) == 0)) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&pdev->dev, "DCA enabled\n"); /* Always use CB2 mode, difference is masked * in the CB driver. */ @@ -1276,7 +1300,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", adapter->msix_entries ? "MSI-X" : - adapter->msi_enabled ? "MSI" : "legacy", + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); return 0; @@ -1330,10 +1354,10 @@ static void __devexit igb_remove(struct pci_dev *pdev) flush_scheduled_work(); #ifdef CONFIG_DCA - if (adapter->dca_enabled) { + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { dev_info(&pdev->dev, "DCA disabled\n"); dca_remove_requester(&pdev->dev); - adapter->dca_enabled = false; + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; wr32(E1000_DCA_CTRL, 1); } #endif @@ -2650,9 +2674,9 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); - /* Context index must be unique per ring. Luckily, so is the interrupt - * mask value. */ - mss_l4len_idx |= tx_ring->eims_value >> 4; + /* Context index must be unique per ring. */ + if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) + mss_l4len_idx |= tx_ring->queue_index << 4; context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); context_desc->seqnum_seed = 0; @@ -2716,8 +2740,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); context_desc->seqnum_seed = 0; - context_desc->mss_l4len_idx = - cpu_to_le32(tx_ring->queue_index << 4); + if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) + context_desc->mss_l4len_idx = + cpu_to_le32(tx_ring->queue_index << 4); buffer_info->time_stamp = jiffies; buffer_info->dma = 0; @@ -2818,8 +2843,9 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, olinfo_status |= E1000_TXD_POPTS_TXSM << 8; } - if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | - IGB_TX_FLAGS_VLAN)) + if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && + (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_VLAN))) olinfo_status |= tx_ring->queue_index << 4; olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); @@ -3255,7 +3281,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data) if (!tx_ring->itr_val) wr32(E1000_EIMC, tx_ring->eims_value); #ifdef CONFIG_DCA - if (adapter->dca_enabled) + if (adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_tx_dca(tx_ring); #endif tx_ring->total_bytes = 0; @@ -3292,7 +3318,7 @@ static irqreturn_t igb_msix_rx(int irq, void *data) __netif_rx_schedule(adapter->netdev, &rx_ring->napi); #ifdef CONFIG_DCA - if (adapter->dca_enabled) + if (adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_rx_dca(rx_ring); #endif return IRQ_HANDLED; @@ -3355,7 +3381,7 @@ static void igb_setup_dca(struct igb_adapter *adapter) { int i; - if (!(adapter->dca_enabled)) + if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) return; for (i = 0; i < adapter->num_tx_queues; i++) { @@ -3375,12 +3401,15 @@ static int __igb_notify_dca(struct device *dev, void *data) struct e1000_hw *hw = &adapter->hw; unsigned long event = *(unsigned long *)data; + if (!(adapter->flags & IGB_FLAG_HAS_DCA)) + goto out; + switch (event) { case DCA_PROVIDER_ADD: /* if already enabled, don't do it again */ - if (adapter->dca_enabled) + if (adapter->flags & IGB_FLAG_DCA_ENABLED) break; - adapter->dca_enabled = true; + adapter->flags |= IGB_FLAG_DCA_ENABLED; /* Always use CB2 mode, difference is masked * in the CB driver. */ wr32(E1000_DCA_CTRL, 2); @@ -3391,17 +3420,17 @@ static int __igb_notify_dca(struct device *dev, void *data) } /* Fall Through since DCA is disabled. */ case DCA_PROVIDER_REMOVE: - if (adapter->dca_enabled) { + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { /* without this a class_device is left * hanging around in the sysfs model */ dca_remove_requester(dev); dev_info(&adapter->pdev->dev, "DCA disabled\n"); - adapter->dca_enabled = false; + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; wr32(E1000_DCA_CTRL, 1); } break; } - +out: return 0; } @@ -3507,13 +3536,13 @@ static int igb_poll(struct napi_struct *napi, int budget) /* this poll routine only supports one tx and one rx queue */ #ifdef CONFIG_DCA - if (adapter->dca_enabled) + if (adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_tx_dca(&adapter->tx_ring[0]); #endif tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); #ifdef CONFIG_DCA - if (adapter->dca_enabled) + if (adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_rx_dca(&adapter->rx_ring[0]); #endif igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget); @@ -3545,7 +3574,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) goto quit_polling; #ifdef CONFIG_DCA - if (adapter->dca_enabled) + if (adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_rx_dca(rx_ring); #endif igb_clean_rx_irq_adv(rx_ring, &work_done, budget); @@ -4350,6 +4379,8 @@ static void igb_netpoll(struct net_device *netdev) int work_done = 0; igb_irq_disable(adapter); + adapter->flags |= IGB_FLAG_IN_NETPOLL; + for (i = 0; i < adapter->num_tx_queues; i++) igb_clean_tx_irq(&adapter->tx_ring[i]); @@ -4358,6 +4389,7 @@ static void igb_netpoll(struct net_device *netdev) &work_done, adapter->rx_ring[i].napi.weight); + adapter->flags &= ~IGB_FLAG_IN_NETPOLL; igb_irq_enable(adapter); } #endif /* CONFIG_NET_POLL_CONTROLLER */ -- cgit v1.2.1 From bf36c1a0040cc6ccd63cdd1cec25d2085f2df964 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 8 Jul 2008 15:11:40 -0700 Subject: igb: add page recycling support This patch adds support for page recycling by splitting the page into two usable portions and tracking the reference count. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/igb.h | 4 +- drivers/net/igb/igb_main.c | 138 ++++++++++++++++++++------------------------- 2 files changed, 63 insertions(+), 79 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index ee08010d2c4f..f41b9996d2ed 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -125,6 +125,7 @@ struct igb_buffer { struct { struct page *page; u64 page_dma; + unsigned int page_offset; }; }; }; @@ -163,9 +164,6 @@ struct igb_ring { }; /* RX */ struct { - /* arrays of page information for packet split */ - struct sk_buff *pending_skb; - int pending_skb_page; int no_itr_adjust; struct igb_queue_stats rx_stats; struct napi_struct napi; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 68a4fef3df9a..660a78653287 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1725,7 +1725,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - rx_ring->pending_skb = NULL; rx_ring->adapter = adapter; @@ -1817,15 +1816,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter) rctl |= E1000_RCTL_SZ_2048; rctl &= ~E1000_RCTL_BSEX; break; - case IGB_RXBUFFER_4096: - rctl |= E1000_RCTL_SZ_4096; - break; - case IGB_RXBUFFER_8192: - rctl |= E1000_RCTL_SZ_8192; - break; - case IGB_RXBUFFER_16384: - rctl |= E1000_RCTL_SZ_16384; - break; } } else { rctl &= ~E1000_RCTL_BSEX; @@ -1843,10 +1833,8 @@ static void igb_setup_rctl(struct igb_adapter *adapter) * so only enable packet split for jumbo frames */ if (rctl & E1000_RCTL_LPE) { adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; - srrctl = adapter->rx_ps_hdr_size << + srrctl |= adapter->rx_ps_hdr_size << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; - /* buffer size is ALWAYS one page */ - srrctl |= PAGE_SIZE >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } else { adapter->rx_ps_hdr_size = 0; @@ -2151,20 +2139,17 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) buffer_info->skb = NULL; } if (buffer_info->page) { - pci_unmap_page(pdev, buffer_info->page_dma, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + if (buffer_info->page_dma) + pci_unmap_page(pdev, buffer_info->page_dma, + PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); put_page(buffer_info->page); buffer_info->page = NULL; buffer_info->page_dma = 0; + buffer_info->page_offset = 0; } } - /* there also may be some cached data from a chained receive */ - if (rx_ring->pending_skb) { - dev_kfree_skb(rx_ring->pending_skb); - rx_ring->pending_skb = NULL; - } - size = sizeof(struct igb_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); @@ -3091,7 +3076,11 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) else if (max_frame <= IGB_RXBUFFER_2048) adapter->rx_buffer_len = IGB_RXBUFFER_2048; else - adapter->rx_buffer_len = IGB_RXBUFFER_4096; +#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 + adapter->rx_buffer_len = IGB_RXBUFFER_16384; +#else + adapter->rx_buffer_len = PAGE_SIZE / 2; +#endif /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)) @@ -3796,7 +3785,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc , *next_rxd; struct igb_buffer *buffer_info , *next_buffer; struct sk_buff *skb; - unsigned int i, j; + unsigned int i; u32 length, hlen, staterr; bool cleaned = false; int cleaned_count = 0; @@ -3826,61 +3815,46 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, cleaned = true; cleaned_count++; - if (rx_ring->pending_skb != NULL) { - skb = rx_ring->pending_skb; - rx_ring->pending_skb = NULL; - j = rx_ring->pending_skb_page; - } else { - skb = buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); - buffer_info->skb = NULL; - if (hlen) { - pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_ps_hdr_size + - NET_IP_ALIGN, - PCI_DMA_FROMDEVICE); - skb_put(skb, hlen); - } else { - pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_buffer_len + - NET_IP_ALIGN, - PCI_DMA_FROMDEVICE); - skb_put(skb, length); - goto send_up; - } - j = 0; + skb = buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + buffer_info->skb = NULL; + if (!adapter->rx_ps_hdr_size) { + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_buffer_len + + NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); + skb_put(skb, length); + goto send_up; + } + + if (!skb_shinfo(skb)->nr_frags) { + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_hdr_size + + NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); + skb_put(skb, hlen); } - while (length) { + if (length) { pci_unmap_page(pdev, buffer_info->page_dma, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); buffer_info->page_dma = 0; - skb_fill_page_desc(skb, j, buffer_info->page, - 0, length); - buffer_info->page = NULL; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, + buffer_info->page, + buffer_info->page_offset, + length); + + if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || + (page_count(buffer_info->page) != 1)) + buffer_info->page = NULL; + else + get_page(buffer_info->page); skb->len += length; skb->data_len += length; - skb->truesize += length; - rx_desc->wb.upper.status_error = 0; - if (staterr & E1000_RXD_STAT_EOP) - break; - - j++; - cleaned_count++; - i++; - if (i == rx_ring->count) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - length = le16_to_cpu(rx_desc->wb.upper.length); - if (!(staterr & E1000_RXD_STAT_DD)) { - rx_ring->pending_skb = skb; - rx_ring->pending_skb_page = j; - goto out; - } + skb->truesize += length; } send_up: i++; @@ -3890,6 +3864,12 @@ send_up: prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; + if (!(staterr & E1000_RXD_STAT_EOP)) { + buffer_info->skb = xchg(&next_buffer->skb, skb); + buffer_info->dma = xchg(&next_buffer->dma, 0); + goto next_desc; + } + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { dev_kfree_skb_irq(skb); goto next_desc; @@ -3922,7 +3902,7 @@ next_desc: staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } -out: + rx_ring->next_to_clean = i; cleaned_count = IGB_DESC_UNUSED(rx_ring); @@ -3960,16 +3940,22 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, while (cleaned_count--) { rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); - if (adapter->rx_ps_hdr_size && !buffer_info->page) { - buffer_info->page = alloc_page(GFP_ATOMIC); + if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { if (!buffer_info->page) { - adapter->alloc_rx_buff_failed++; - goto no_buffers; + buffer_info->page = alloc_page(GFP_ATOMIC); + if (!buffer_info->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + buffer_info->page_offset = 0; + } else { + buffer_info->page_offset ^= PAGE_SIZE / 2; } buffer_info->page_dma = pci_map_page(pdev, buffer_info->page, - 0, PAGE_SIZE, + buffer_info->page_offset, + PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); } -- cgit v1.2.1 From d3352520273426e4c16e91d189aa8aa7ee5e96c5 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 8 Jul 2008 15:12:13 -0700 Subject: igb: add support for in kernel LRO This patch adds support for the use of the inet_lro module to provide software LRO support. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/Kconfig | 9 ++++ drivers/net/igb/e1000_82575.h | 2 + drivers/net/igb/igb.h | 16 ++++++ drivers/net/igb/igb_ethtool.c | 17 +++++++ drivers/net/igb/igb_main.c | 112 ++++++++++++++++++++++++++++++++++++++---- 5 files changed, 147 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 21414177ee1e..44d1c835128a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2018,6 +2018,15 @@ config IGB To compile this driver as a module, choose M here. The module will be called igb. +config IGB_LRO + bool "Use software LRO" + depends on IGB && INET + select INET_LRO + ---help--- + Say Y here if you want to use large receive offload. + + If in doubt, say N. + source "drivers/net/ixp2000/Kconfig" config MYRI_SBUS diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index d273236c7764..2f848e578a24 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -99,6 +99,8 @@ union e1000_adv_rx_desc { /* RSS Hash results */ /* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index f41b9996d2ed..c25ca17d3228 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -36,6 +36,12 @@ struct igb_adapter; +#ifdef CONFIG_IGB_LRO +#include +#define MAX_LRO_AGGR 32 +#define MAX_LRO_DESCRIPTORS 8 +#endif + /* Interrupt defines */ #define IGB_MAX_TX_CLEAN 72 @@ -167,6 +173,10 @@ struct igb_ring { int no_itr_adjust; struct igb_queue_stats rx_stats; struct napi_struct napi; +#ifdef CONFIG_IGB_LRO + struct net_lro_mgr lro_mgr; + bool lro_used; +#endif }; }; @@ -278,6 +288,12 @@ struct igb_adapter { #ifdef CONFIG_NETDEVICES_MULTIQUEUE struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; #endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#ifdef CONFIG_IGB_LRO + unsigned int lro_max_aggr; + unsigned int lro_aggregated; + unsigned int lro_flushed; + unsigned int lro_no_desc; +#endif }; #define IGB_FLAG_HAS_MSI (1 << 0) diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index ef209b5cd390..7db183093768 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -93,6 +93,11 @@ static const struct igb_stats igb_gstrings_stats[] = { { "tx_smbus", IGB_STAT(stats.mgptc) }, { "rx_smbus", IGB_STAT(stats.mgprc) }, { "dropped_smbus", IGB_STAT(stats.mgpdc) }, +#ifdef CONFIG_IGB_LRO + { "lro_aggregated", IGB_STAT(lro_aggregated) }, + { "lro_flushed", IGB_STAT(lro_flushed) }, + { "lro_no_desc", IGB_STAT(lro_no_desc) }, +#endif }; #define IGB_QUEUE_STATS_LEN \ @@ -1917,6 +1922,18 @@ static void igb_get_ethtool_stats(struct net_device *netdev, int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); int j; int i; +#ifdef CONFIG_IGB_LRO + int aggregated = 0, flushed = 0, no_desc = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated; + flushed += adapter->rx_ring[i].lro_mgr.stats.flushed; + no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc; + } + adapter->lro_aggregated = aggregated; + adapter->lro_flushed = flushed; + adapter->lro_no_desc = no_desc; +#endif igb_update_stats(adapter); for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 660a78653287..89416ebda9ef 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -116,6 +116,9 @@ static bool igb_clean_tx_irq(struct igb_ring *); static int igb_poll(struct napi_struct *, int); static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); +#ifdef CONFIG_IGB_LRO +static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *); +#endif static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); @@ -1134,6 +1137,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; +#ifdef CONFIG_IGB_LRO + netdev->features |= NETIF_F_LRO; +#endif + netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_HW_CSUM; @@ -1705,6 +1712,14 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, struct pci_dev *pdev = adapter->pdev; int size, desc_len; +#ifdef CONFIG_IGB_LRO + size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS; + rx_ring->lro_mgr.lro_arr = vmalloc(size); + if (!rx_ring->lro_mgr.lro_arr) + goto err; + memset(rx_ring->lro_mgr.lro_arr, 0, size); +#endif + size = sizeof(struct igb_buffer) * rx_ring->count; rx_ring->buffer_info = vmalloc(size); if (!rx_ring->buffer_info) @@ -1731,6 +1746,10 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, return 0; err: +#ifdef CONFIG_IGB_LRO + vfree(rx_ring->lro_mgr.lro_arr); + rx_ring->lro_mgr.lro_arr = NULL; +#endif vfree(rx_ring->buffer_info); dev_err(&adapter->pdev->dev, "Unable to allocate memory for " "the receive descriptor ring\n"); @@ -1894,6 +1913,16 @@ static void igb_configure_rx(struct igb_adapter *adapter) rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; wr32(E1000_RXDCTL(i), rxdctl); +#ifdef CONFIG_IGB_LRO + /* Intitial LRO Settings */ + ring->lro_mgr.max_aggr = MAX_LRO_AGGR; + ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; + ring->lro_mgr.get_skb_header = igb_get_skb_hdr; + ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; + ring->lro_mgr.dev = adapter->netdev; + ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; + ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; +#endif } if (adapter->num_rx_queues > 1) { @@ -2085,6 +2114,11 @@ static void igb_free_rx_resources(struct igb_ring *rx_ring) vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; +#ifdef CONFIG_IGB_LRO + vfree(rx_ring->lro_mgr.lro_arr); + rx_ring->lro_mgr.lro_arr = NULL; +#endif + pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; @@ -3735,22 +3769,75 @@ done_cleaning: return retval; } +#ifdef CONFIG_IGB_LRO + /** + * igb_get_skb_hdr - helper function for LRO header processing + * @skb: pointer to sk_buff to be added to LRO packet + * @iphdr: pointer to ip header structure + * @tcph: pointer to tcp header structure + * @hdr_flags: pointer to header flags + * @priv: pointer to the receive descriptor for the current sk_buff + **/ +static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, + u64 *hdr_flags, void *priv) +{ + union e1000_adv_rx_desc *rx_desc = priv; + u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info & + (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP); + + /* Verify that this is a valid IPv4 TCP packet */ + if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 | + E1000_RXDADV_PKTTYPE_TCP)) + return -1; + + /* Set network headers */ + skb_reset_network_header(skb); + skb_set_transport_header(skb, ip_hdrlen(skb)); + *iphdr = ip_hdr(skb); + *tcph = tcp_hdr(skb); + *hdr_flags = LRO_IPV4 | LRO_TCP; + + return 0; + +} +#endif /* CONFIG_IGB_LRO */ /** * igb_receive_skb - helper function to handle rx indications - * @adapter: board private structure + * @ring: pointer to receive ring receving this packet * @status: descriptor status field as written by hardware * @vlan: descriptor vlan field as written by hardware (no le/be conversion) * @skb: pointer to sk_buff to be indicated to stack **/ -static void igb_receive_skb(struct igb_adapter *adapter, u8 status, __le16 vlan, - struct sk_buff *skb) +static void igb_receive_skb(struct igb_ring *ring, u8 status, + union e1000_adv_rx_desc * rx_desc, + struct sk_buff *skb) { - if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) - vlan_hwaccel_receive_skb(skb, adapter->vlgrp, - le16_to_cpu(vlan)); - else - netif_receive_skb(skb); + struct igb_adapter * adapter = ring->adapter; + bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); + +#ifdef CONFIG_IGB_LRO + if (adapter->netdev->features & NETIF_F_LRO && + skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (vlan_extracted) + lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, + adapter->vlgrp, + le16_to_cpu(rx_desc->wb.upper.vlan), + rx_desc); + else + lro_receive_skb(&ring->lro_mgr,skb, rx_desc); + ring->lro_used = 1; + } else { +#endif + if (vlan_extracted) + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, + le16_to_cpu(rx_desc->wb.upper.vlan)); + else + + netif_receive_skb(skb); +#ifdef CONFIG_IGB_LRO + } +#endif } @@ -3883,7 +3970,7 @@ send_up: skb->protocol = eth_type_trans(skb, netdev); - igb_receive_skb(adapter, staterr, rx_desc->wb.upper.vlan, skb); + igb_receive_skb(rx_ring, staterr, rx_desc, skb); netdev->last_rx = jiffies; @@ -3906,6 +3993,13 @@ next_desc: rx_ring->next_to_clean = i; cleaned_count = IGB_DESC_UNUSED(rx_ring); +#ifdef CONFIG_IGB_LRO + if (rx_ring->lro_used) { + lro_flush_all(&rx_ring->lro_mgr); + rx_ring->lro_used = 0; + } +#endif + if (cleaned_count) igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); -- cgit v1.2.1 From a88f10ec7a5b3d87cb9372481055340018652389 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 8 Jul 2008 15:13:38 -0700 Subject: igb: update suspend resume Updates the suspend and resume to better handle the possibility of MSIX vector changes. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/igb_main.c | 46 +++++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 89416ebda9ef..00e2e8d30ba2 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -272,6 +272,17 @@ static int igb_alloc_queues(struct igb_adapter *adapter) return 0; } +static void igb_free_queues(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + netif_napi_del(&adapter->rx_ring[i].napi); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +} + #define IGB_N0_QUEUE -1 static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, int tx_queue, int msix_vector) @@ -1322,8 +1333,7 @@ err_eeprom: iounmap(hw->flash_address); igb_remove_device(hw); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); + igb_free_queues(adapter); err_sw_init: err_hw_init: iounmap(hw->hw_addr); @@ -1381,8 +1391,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) igb_remove_device(&adapter->hw); igb_reset_interrupt_capability(adapter); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); + igb_free_queues(adapter); iounmap(adapter->hw.hw_addr); if (adapter->hw.flash_address) @@ -4324,11 +4333,12 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(netdev); - if (netif_running(netdev)) { - WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); - igb_down(adapter); - igb_free_irq(adapter); - } + if (netif_running(netdev)) + igb_close(netdev); + + igb_reset_interrupt_capability(adapter); + + igb_free_queues(adapter); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -4415,10 +4425,11 @@ static int igb_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - if (netif_running(netdev)) { - err = igb_request_irq(adapter); - if (err) - return err; + igb_set_interrupt_capability(adapter); + + if (igb_alloc_queues(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; } /* e1000_power_up_phy(adapter); */ @@ -4426,10 +4437,11 @@ static int igb_resume(struct pci_dev *pdev) igb_reset(adapter); wr32(E1000_WUS, ~0); - igb_init_manageability(adapter); - - if (netif_running(netdev)) - igb_up(adapter); + if (netif_running(netdev)) { + err = igb_open(netdev); + if (err) + return err; + } netif_device_attach(netdev); -- cgit v1.2.1 From 9280fa5201d7f69b20af4b7efadb5fe8f2f67277 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 8 Jul 2008 15:14:04 -0700 Subject: igb: unused variable warning in igb remove Wrap hw variable declaration in DCA flags to prevent unused variable warning during compilation. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/igb_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 00e2e8d30ba2..2a5303c311bc 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1360,7 +1360,9 @@ static void __devexit igb_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_DCA struct e1000_hw *hw = &adapter->hw; +#endif /* flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ -- cgit v1.2.1 From 6eb5a7f1dbd56883680290f6a0bd2d8d15f8ff58 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 8 Jul 2008 15:14:44 -0700 Subject: igb: Improve multiqueue AIM support Improve multiqueue performance Change itr_val to reflect ITR timer value instead of ints/sec Cleaned up AIM algorithms in general Based on work by Mitch Williams Signed-off-by: Alexander Duyck Acked-by: Mitch Williams Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/igb/igb.h | 8 +- drivers/net/igb/igb_ethtool.c | 17 +++-- drivers/net/igb/igb_main.c | 169 ++++++++++++++++++++++++------------------ 3 files changed, 113 insertions(+), 81 deletions(-) (limited to 'drivers') diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index c25ca17d3228..56de7ec15b46 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -47,7 +47,9 @@ struct igb_adapter; #define IGB_MIN_DYN_ITR 3000 #define IGB_MAX_DYN_ITR 96000 -#define IGB_START_ITR 6000 + +/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */ +#define IGB_START_ITR 648 #define IGB_DYN_ITR_PACKET_THRESHOLD 2 #define IGB_DYN_ITR_LENGTH_LOW 200 @@ -170,9 +172,10 @@ struct igb_ring { }; /* RX */ struct { - int no_itr_adjust; struct igb_queue_stats rx_stats; struct napi_struct napi; + int set_itr; + struct igb_ring *buddy; #ifdef CONFIG_IGB_LRO struct net_lro_mgr lro_mgr; bool lro_used; @@ -219,7 +222,6 @@ struct igb_adapter { u32 itr_setting; u16 tx_itr; u16 rx_itr; - int set_itr; struct work_struct reset_task; struct work_struct watchdog_task; diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 7db183093768..11aee1309951 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -1861,6 +1861,8 @@ static int igb_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int i; if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->rx_coalesce_usecs > 3) && @@ -1869,13 +1871,16 @@ static int igb_set_coalesce(struct net_device *netdev, return -EINVAL; /* convert to rate of irq's per second */ - if (ec->rx_coalesce_usecs <= 3) + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { adapter->itr_setting = ec->rx_coalesce_usecs; - else - adapter->itr_setting = (1000000 / ec->rx_coalesce_usecs); + adapter->itr = IGB_START_ITR; + } else { + adapter->itr_setting = ec->rx_coalesce_usecs << 2; + adapter->itr = adapter->itr_setting; + } - if (netif_running(netdev)) - igb_reinit_locked(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) + wr32(adapter->rx_ring[i].itr_register, adapter->itr); return 0; } @@ -1888,7 +1893,7 @@ static int igb_get_coalesce(struct net_device *netdev, if (adapter->itr_setting <= 3) ec->rx_coalesce_usecs = adapter->itr_setting; else - ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + ec->rx_coalesce_usecs = adapter->itr_setting >> 2; return 0; } diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 2a5303c311bc..aaed129f4ca0 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -255,6 +255,8 @@ static int igb_alloc_queues(struct igb_adapter *adapter) return -ENOMEM; } + adapter->rx_ring->buddy = adapter->tx_ring; + for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = &(adapter->tx_ring[i]); ring->adapter = adapter; @@ -375,7 +377,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++); adapter->eims_enable_mask |= tx_ring->eims_value; if (tx_ring->itr_val) - writel(1000000000 / (tx_ring->itr_val * 256), + writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); else writel(1, hw->hw_addr + tx_ring->itr_register); @@ -383,10 +385,11 @@ static void igb_configure_msix(struct igb_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *rx_ring = &adapter->rx_ring[i]; + rx_ring->buddy = 0; igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++); adapter->eims_enable_mask |= rx_ring->eims_value; if (rx_ring->itr_val) - writel(1000000000 / (rx_ring->itr_val * 256), + writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); else writel(1, hw->hw_addr + rx_ring->itr_register); @@ -449,7 +452,7 @@ static int igb_request_msix(struct igb_adapter *adapter) if (err) goto out; ring->itr_register = E1000_EITR(0) + (vector << 2); - ring->itr_val = adapter->itr; + ring->itr_val = 976; /* ~4000 ints/sec */ vector++; } for (i = 0; i < adapter->num_rx_queues; i++) { @@ -1898,8 +1901,7 @@ static void igb_configure_rx(struct igb_adapter *adapter) mdelay(10); if (adapter->itr_setting > 3) - wr32(E1000_ITR, - 1000000000 / (adapter->itr * 256)); + wr32(E1000_ITR, adapter->itr); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -2463,38 +2465,60 @@ enum latency_range { }; -static void igb_lower_rx_eitr(struct igb_adapter *adapter, - struct igb_ring *rx_ring) +/** + * igb_update_ring_itr - update the dynamic ITR value based on packet size + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igb_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this fuction + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * This functionality is controlled by the InterruptThrottleRate module + * parameter (see igb_param.c) + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + * @rx_ring: pointer to ring + **/ +static void igb_update_ring_itr(struct igb_ring *rx_ring) { - struct e1000_hw *hw = &adapter->hw; - int new_val; + int new_val = rx_ring->itr_val; + int avg_wire_size = 0; + struct igb_adapter *adapter = rx_ring->adapter; - new_val = rx_ring->itr_val / 2; - if (new_val < IGB_MIN_DYN_ITR) - new_val = IGB_MIN_DYN_ITR; + if (!rx_ring->total_packets) + goto clear_counts; /* no packets, so don't do anything */ - if (new_val != rx_ring->itr_val) { - rx_ring->itr_val = new_val; - wr32(rx_ring->itr_register, - 1000000000 / (new_val * 256)); + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + if (adapter->link_speed != SPEED_1000) { + new_val = 120; + goto set_itr_val; } -} + avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; -static void igb_raise_rx_eitr(struct igb_adapter *adapter, - struct igb_ring *rx_ring) -{ - struct e1000_hw *hw = &adapter->hw; - int new_val; + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); - new_val = rx_ring->itr_val * 2; - if (new_val > IGB_MAX_DYN_ITR) - new_val = IGB_MAX_DYN_ITR; + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; +set_itr_val: if (new_val != rx_ring->itr_val) { rx_ring->itr_val = new_val; - wr32(rx_ring->itr_register, - 1000000000 / (new_val * 256)); + rx_ring->set_itr = 1; } +clear_counts: + rx_ring->total_bytes = 0; + rx_ring->total_packets = 0; } /** @@ -2561,8 +2585,7 @@ update_itr_done: return retval; } -static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register, - int rx_only) +static void igb_set_itr(struct igb_adapter *adapter) { u16 current_itr; u32 new_itr = adapter->itr; @@ -2578,26 +2601,23 @@ static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register, adapter->rx_itr, adapter->rx_ring->total_packets, adapter->rx_ring->total_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) - adapter->rx_itr = low_latency; - if (!rx_only) { + if (adapter->rx_ring->buddy) { adapter->tx_itr = igb_update_itr(adapter, adapter->tx_itr, adapter->tx_ring->total_packets, adapter->tx_ring->total_bytes); - /* conservative mode (itr 3) eliminates the - * lowest_latency setting */ - if (adapter->itr_setting == 3 && - adapter->tx_itr == lowest_latency) - adapter->tx_itr = low_latency; current_itr = max(adapter->rx_itr, adapter->tx_itr); } else { current_itr = adapter->rx_itr; } + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && + current_itr == lowest_latency) + current_itr = low_latency; + switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: @@ -2614,6 +2634,13 @@ static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register, } set_itr_now: + adapter->rx_ring->total_bytes = 0; + adapter->rx_ring->total_packets = 0; + if (adapter->rx_ring->buddy) { + adapter->rx_ring->buddy->total_bytes = 0; + adapter->rx_ring->buddy->total_packets = 0; + } + if (new_itr != adapter->itr) { /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is @@ -2628,7 +2655,8 @@ set_itr_now: * ends up being correct. */ adapter->itr = new_itr; - adapter->set_itr = 1; + adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256); + adapter->rx_ring->set_itr = 1; } return; @@ -2979,6 +3007,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, /* this is a hard error */ return NETDEV_TX_BUSY; } + skb_orphan(skb); if (adapter->vlgrp && vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; @@ -3312,8 +3341,6 @@ static irqreturn_t igb_msix_tx(int irq, void *data) struct igb_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - if (!tx_ring->itr_val) - wr32(E1000_EIMC, tx_ring->eims_value); #ifdef CONFIG_DCA if (adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_tx_dca(tx_ring); @@ -3332,21 +3359,36 @@ static irqreturn_t igb_msix_tx(int irq, void *data) return IRQ_HANDLED; } +static void igb_write_itr(struct igb_ring *ring) +{ + struct e1000_hw *hw = &ring->adapter->hw; + if ((ring->adapter->itr_setting & 3) && ring->set_itr) { + switch (hw->mac.type) { + case e1000_82576: + wr32(ring->itr_register, + ring->itr_val | + 0x80000000); + break; + default: + wr32(ring->itr_register, + ring->itr_val | + (ring->itr_val << 16)); + break; + } + ring->set_itr = 0; + } +} + static irqreturn_t igb_msix_rx(int irq, void *data) { struct igb_ring *rx_ring = data; struct igb_adapter *adapter = rx_ring->adapter; - struct e1000_hw *hw = &adapter->hw; /* Write the ITR value calculated at the end of the * previous interrupt. */ - if (adapter->set_itr) { - wr32(rx_ring->itr_register, - 1000000000 / (rx_ring->itr_val * 256)); - adapter->set_itr = 0; - } + igb_write_itr(rx_ring); if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) __netif_rx_schedule(adapter->netdev, &rx_ring->napi); @@ -3493,13 +3535,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data) /* read ICR disables interrupts using IAM */ u32 icr = rd32(E1000_ICR); - /* Write the ITR value calculated at the end of the - * previous interrupt. - */ - if (adapter->set_itr) { - wr32(E1000_ITR, 1000000000 / (adapter->itr * 256)); - adapter->set_itr = 0; - } + igb_write_itr(adapter->rx_ring); if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { hw->mac.get_link_status = 1; @@ -3529,13 +3565,7 @@ static irqreturn_t igb_intr(int irq, void *data) if (!icr) return IRQ_NONE; /* Not our interrupt */ - /* Write the ITR value calculated at the end of the - * previous interrupt. - */ - if (adapter->set_itr) { - wr32(E1000_ITR, 1000000000 / (adapter->itr * 256)); - adapter->set_itr = 0; - } + igb_write_itr(adapter->rx_ring); /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt */ @@ -3585,7 +3615,7 @@ static int igb_poll(struct napi_struct *napi, int budget) if ((tx_clean_complete && (work_done < budget)) || !netif_running(netdev)) { if (adapter->itr_setting & 3) - igb_set_itr(adapter, E1000_ITR, false); + igb_set_itr(adapter); netif_rx_complete(netdev, napi); if (!test_bit(__IGB_DOWN, &adapter->state)) igb_irq_enable(adapter); @@ -3619,15 +3649,11 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) quit_polling: netif_rx_complete(netdev, napi); - wr32(E1000_EIMS, rx_ring->eims_value); - if ((adapter->itr_setting & 3) && !rx_ring->no_itr_adjust && - (rx_ring->total_packets > IGB_DYN_ITR_PACKET_THRESHOLD)) { - int mean_size = rx_ring->total_bytes / - rx_ring->total_packets; - if (mean_size < IGB_DYN_ITR_LENGTH_LOW) - igb_raise_rx_eitr(adapter, rx_ring); - else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH) - igb_lower_rx_eitr(adapter, rx_ring); + if (adapter->itr_setting & 3) { + if (adapter->num_rx_queues == 1) + igb_set_itr(adapter); + else + igb_update_ring_itr(rx_ring); } if (!test_bit(__IGB_DOWN, &adapter->state)) @@ -3972,7 +3998,6 @@ send_up: dev_kfree_skb_irq(skb); goto next_desc; } - rx_ring->no_itr_adjust |= (staterr & E1000_RXD_STAT_DYNINT); total_bytes += skb->len; total_packets++; -- cgit v1.2.1 From 5e4fe5c45ac6dda534c362e29bd4eb39f4d9cba8 Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Tue, 8 Jul 2008 17:10:42 +1000 Subject: virtio_net: Set VIRTIO_NET_F_GUEST_CSUM feature We can handle receiving partial csums, so set the appropriate feature bit. Signed-off-by: Mark McLoughlin Signed-off-by: Rusty Russell Signed-off-by: Jeff Garzik --- drivers/net/virtio_net.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4452306d5328..c28d7cb2035b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -550,7 +550,8 @@ static struct virtio_device_id id_table[] = { }; static unsigned int features[] = { - VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, + VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, + VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, }; -- cgit v1.2.1 From 9439f749441f3a7c2c8ef9e32b698cfe9ed60f48 Mon Sep 17 00:00:00 2001 From: Karen Xie Date: Tue, 8 Jul 2008 09:32:34 -0700 Subject: cxgb3 - Add iscsi support Add iSCSI (S3xx) support. Signed-off-by: Karen Xie Signed-off-by: Jeff Garzik --- drivers/net/cxgb3/cxgb3_ctl_defs.h | 5 +---- drivers/net/cxgb3/cxgb3_offload.c | 11 +++++++++++ drivers/net/cxgb3/regs.h | 10 ++++++---- drivers/net/cxgb3/t3_cpl.h | 40 ++++++++++++++++++++++++++++++++++++++ drivers/net/cxgb3/t3cdev.h | 1 + 5 files changed, 59 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h index 6c4f32066919..d38e6cc92668 100644 --- a/drivers/net/cxgb3/cxgb3_ctl_defs.h +++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h @@ -110,10 +110,7 @@ struct ulp_iscsi_info { unsigned int llimit; unsigned int ulimit; unsigned int tagmask; - unsigned int pgsz3; - unsigned int pgsz2; - unsigned int pgsz1; - unsigned int pgsz0; + u8 pgsz_factor[4]; unsigned int max_rxsz; unsigned int max_txsz; struct pci_dev *pdev; diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index ae6ff5df779c..c69f4c0187d9 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -207,6 +207,17 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, break; case ULP_ISCSI_SET_PARAMS: t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); + /* set MaxRxData and MaxCoalesceSize to 16224 */ + t3_write_reg(adapter, A_TP_PARA_REG2, 0x3f603f60); + /* program the ddp page sizes */ + { + int i; + unsigned int val = 0; + for (i = 0; i < 4; i++) + val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); + if (val) + t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); + } break; default: ret = -EOPNOTSUPP; diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index 567178879345..4bda27c551c9 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -1517,16 +1517,18 @@ #define A_ULPRX_ISCSI_TAGMASK 0x514 -#define S_HPZ0 0 -#define M_HPZ0 0xf -#define V_HPZ0(x) ((x) << S_HPZ0) -#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) +#define A_ULPRX_ISCSI_PSZ 0x518 #define A_ULPRX_TDDP_LLIMIT 0x51c #define A_ULPRX_TDDP_ULIMIT 0x520 #define A_ULPRX_TDDP_PSZ 0x528 +#define S_HPZ0 0 +#define M_HPZ0 0xf +#define V_HPZ0(x) ((x) << S_HPZ0) +#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) + #define A_ULPRX_STAG_LLIMIT 0x52c #define A_ULPRX_STAG_ULIMIT 0x530 diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h index a666c5d51cc0..917970ed24a1 100644 --- a/drivers/net/cxgb3/t3_cpl.h +++ b/drivers/net/cxgb3/t3_cpl.h @@ -191,6 +191,9 @@ union opcode_tid { #define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) #define G_TID(x) ((x) & 0xFFFFFF) +#define S_QNUM 0 +#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF) + #define S_HASHTYPE 22 #define M_HASHTYPE 0x3 #define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE) @@ -779,6 +782,12 @@ struct tx_data_wr { __be32 param; }; +/* tx_data_wr.flags fields */ +#define S_TX_ACK_PAGES 21 +#define M_TX_ACK_PAGES 0x7 +#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES) +#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES) + /* tx_data_wr.param fields */ #define S_TX_PORT 0 #define M_TX_PORT 0x7 @@ -1452,4 +1461,35 @@ struct cpl_rdma_terminate { #define M_TERM_TID 0xFFFFF #define V_TERM_TID(x) ((x) << S_TERM_TID) #define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID) + +/* ULP_TX opcodes */ +enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 }; + +#define S_ULPTX_CMD 28 +#define M_ULPTX_CMD 0xF +#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD) + +#define S_ULPTX_NFLITS 0 +#define M_ULPTX_NFLITS 0xFF +#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS) + +struct ulp_mem_io { + WR_HDR; + __be32 cmd_lock_addr; + __be32 len; +}; + +/* ulp_mem_io.cmd_lock_addr fields */ +#define S_ULP_MEMIO_ADDR 0 +#define M_ULP_MEMIO_ADDR 0x7FFFFFF +#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR) +#define S_ULP_MEMIO_LOCK 27 +#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK) +#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U) + +/* ulp_mem_io.len fields */ +#define S_ULP_MEMIO_DATA_LEN 28 +#define M_ULP_MEMIO_DATA_LEN 0xF +#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN) + #endif /* T3_CPL_H */ diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h index 8556628fd5af..0a21cfbd2b21 100644 --- a/drivers/net/cxgb3/t3cdev.h +++ b/drivers/net/cxgb3/t3cdev.h @@ -64,6 +64,7 @@ struct t3cdev { void *l3opt; /* optional layer 3 data */ void *l4opt; /* optional layer 4 data */ void *ulp; /* ulp stuff */ + void *ulp_iscsi; /* ulp iscsi */ }; #endif /* _T3CDEV_H_ */ -- cgit v1.2.1 From e9911c2c8f87cfda47109c42e399fa487117095c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 7 Jul 2008 16:51:45 +0200 Subject: Fix missing exports for net/phy/mdio-bitbang.c {alloc,free}_mdio_bitbang() are not exported while they are used in mdio-ofgpio driver. Signed-off-by: Takashi Iwai Signed-off-by: Jeff Garzik --- drivers/net/phy/mdio-bitbang.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 2747b1f89ffe..c01b78013ddc 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c @@ -177,6 +177,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) return bus; } +EXPORT_SYMBOL(alloc_mdio_bitbang); void free_mdio_bitbang(struct mii_bus *bus) { @@ -185,5 +186,6 @@ void free_mdio_bitbang(struct mii_bus *bus) module_put(ctrl->ops->owner); kfree(bus); } +EXPORT_SYMBOL(free_mdio_bitbang); MODULE_LICENSE("GPL"); -- cgit v1.2.1 From 67fbbe1551b24d1bcab8478407f9b8c713d5596e Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 9 Jul 2008 12:38:43 +0100 Subject: SAA9730: Remove driver The only user of the board, the extremly dated and rare MIPS Atlas board, has been removed, so this driver can go, too. Signed-off-by: Ralf Baechle Signed-off-by: Jeff Garzik --- drivers/net/Kconfig | 9 - drivers/net/Makefile | 1 - drivers/net/saa9730.c | 1139 ------------------------------------------------- drivers/net/saa9730.h | 384 ----------------- 4 files changed, 1533 deletions(-) delete mode 100644 drivers/net/saa9730.c delete mode 100644 drivers/net/saa9730.h (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 44d1c835128a..9490cb172330 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1694,15 +1694,6 @@ config VIA_RHINE_MMIO If unsure, say Y. -config LAN_SAA9730 - bool "Philips SAA9730 Ethernet support" - depends on NET_PCI && PCI && MIPS_ATLAS - help - The SAA9730 is a combined multimedia and peripheral controller used - in thin clients, Internet access terminals, and diskless - workstations. - See . - config SC92031 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)" depends on NET_PCI && PCI && EXPERIMENTAL diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 4beb043e09e6..3292d0af59c3 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -166,7 +166,6 @@ obj-$(CONFIG_EEXPRESS_PRO) += eepro.o obj-$(CONFIG_8139CP) += 8139cp.o obj-$(CONFIG_8139TOO) += 8139too.o obj-$(CONFIG_ZNET) += znet.o -obj-$(CONFIG_LAN_SAA9730) += saa9730.o obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_DEPCA) += depca.o obj-$(CONFIG_EWRK3) += ewrk3.o diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c deleted file mode 100644 index c65199df8a7f..000000000000 --- a/drivers/net/saa9730.c +++ /dev/null @@ -1,1139 +0,0 @@ -/* - * Copyright (C) 2000, 2005 MIPS Technologies, Inc. All rights reserved. - * Authors: Carsten Langgaard - * Maciej W. Rozycki - * Copyright (C) 2004 Ralf Baechle - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * SAA9730 ethernet driver. - * - * Changes: - * Angelo Dell'Aera : Conversion to the new PCI API - * (pci_driver). - * Conversion to spinlocks. - * Error handling fixes. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include "saa9730.h" - -#ifdef LAN_SAA9730_DEBUG -int lan_saa9730_debug = LAN_SAA9730_DEBUG; -#else -int lan_saa9730_debug; -#endif - -#define DRV_MODULE_NAME "saa9730" - -static struct pci_device_id saa9730_pci_tbl[] = { - { PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA9730, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { 0, } -}; - -MODULE_DEVICE_TABLE(pci, saa9730_pci_tbl); - -/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */ -static unsigned int pci_irq_line; - -static void evm_saa9730_enable_lan_int(struct lan_saa9730_private *lp) -{ - writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT, - &lp->evm_saa9730_regs->InterruptBlock1); - writel(readl(&lp->evm_saa9730_regs->InterruptStatus1) | EVM_LAN_INT, - &lp->evm_saa9730_regs->InterruptStatus1); - writel(readl(&lp->evm_saa9730_regs->InterruptEnable1) | EVM_LAN_INT | - EVM_MASTER_EN, &lp->evm_saa9730_regs->InterruptEnable1); -} - -static void evm_saa9730_disable_lan_int(struct lan_saa9730_private *lp) -{ - writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT, - &lp->evm_saa9730_regs->InterruptBlock1); - writel(readl(&lp->evm_saa9730_regs->InterruptEnable1) & ~EVM_LAN_INT, - &lp->evm_saa9730_regs->InterruptEnable1); -} - -static void evm_saa9730_clear_lan_int(struct lan_saa9730_private *lp) -{ - writel(EVM_LAN_INT, &lp->evm_saa9730_regs->InterruptStatus1); -} - -static void evm_saa9730_block_lan_int(struct lan_saa9730_private *lp) -{ - writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT, - &lp->evm_saa9730_regs->InterruptBlock1); -} - -static void evm_saa9730_unblock_lan_int(struct lan_saa9730_private *lp) -{ - writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT, - &lp->evm_saa9730_regs->InterruptBlock1); -} - -static void __used show_saa9730_regs(struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - int i, j; - - printk("TxmBufferA = %p\n", lp->TxmBuffer[0][0]); - printk("TxmBufferB = %p\n", lp->TxmBuffer[1][0]); - printk("RcvBufferA = %p\n", lp->RcvBuffer[0][0]); - printk("RcvBufferB = %p\n", lp->RcvBuffer[1][0]); - - for (i = 0; i < LAN_SAA9730_BUFFERS; i++) { - for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) { - printk("TxmBuffer[%d][%d] = %x\n", i, j, - le32_to_cpu(*(unsigned int *) - lp->TxmBuffer[i][j])); - } - } - for (i = 0; i < LAN_SAA9730_BUFFERS; i++) { - for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) { - printk("RcvBuffer[%d][%d] = %x\n", i, j, - le32_to_cpu(*(unsigned int *) - lp->RcvBuffer[i][j])); - } - } - printk("lp->evm_saa9730_regs->InterruptBlock1 = %x\n", - readl(&lp->evm_saa9730_regs->InterruptBlock1)); - printk("lp->evm_saa9730_regs->InterruptStatus1 = %x\n", - readl(&lp->evm_saa9730_regs->InterruptStatus1)); - printk("lp->evm_saa9730_regs->InterruptEnable1 = %x\n", - readl(&lp->evm_saa9730_regs->InterruptEnable1)); - printk("lp->lan_saa9730_regs->Ok2Use = %x\n", - readl(&lp->lan_saa9730_regs->Ok2Use)); - printk("lp->NextTxmBufferIndex = %x\n", lp->NextTxmBufferIndex); - printk("lp->NextTxmPacketIndex = %x\n", lp->NextTxmPacketIndex); - printk("lp->PendingTxmBufferIndex = %x\n", - lp->PendingTxmBufferIndex); - printk("lp->PendingTxmPacketIndex = %x\n", - lp->PendingTxmPacketIndex); - printk("lp->lan_saa9730_regs->LanDmaCtl = %x\n", - readl(&lp->lan_saa9730_regs->LanDmaCtl)); - printk("lp->lan_saa9730_regs->DmaStatus = %x\n", - readl(&lp->lan_saa9730_regs->DmaStatus)); - printk("lp->lan_saa9730_regs->CamCtl = %x\n", - readl(&lp->lan_saa9730_regs->CamCtl)); - printk("lp->lan_saa9730_regs->TxCtl = %x\n", - readl(&lp->lan_saa9730_regs->TxCtl)); - printk("lp->lan_saa9730_regs->TxStatus = %x\n", - readl(&lp->lan_saa9730_regs->TxStatus)); - printk("lp->lan_saa9730_regs->RxCtl = %x\n", - readl(&lp->lan_saa9730_regs->RxCtl)); - printk("lp->lan_saa9730_regs->RxStatus = %x\n", - readl(&lp->lan_saa9730_regs->RxStatus)); - - for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) { - writel(i, &lp->lan_saa9730_regs->CamAddress); - printk("lp->lan_saa9730_regs->CamData = %x\n", - readl(&lp->lan_saa9730_regs->CamData)); - } - - printk("dev->stats.tx_packets = %lx\n", dev->stats.tx_packets); - printk("dev->stats.tx_errors = %lx\n", dev->stats.tx_errors); - printk("dev->stats.tx_aborted_errors = %lx\n", - dev->stats.tx_aborted_errors); - printk("dev->stats.tx_window_errors = %lx\n", - dev->stats.tx_window_errors); - printk("dev->stats.tx_carrier_errors = %lx\n", - dev->stats.tx_carrier_errors); - printk("dev->stats.tx_fifo_errors = %lx\n", - dev->stats.tx_fifo_errors); - printk("dev->stats.tx_heartbeat_errors = %lx\n", - dev->stats.tx_heartbeat_errors); - printk("dev->stats.collisions = %lx\n", dev->stats.collisions); - - printk("dev->stats.rx_packets = %lx\n", dev->stats.rx_packets); - printk("dev->stats.rx_errors = %lx\n", dev->stats.rx_errors); - printk("dev->stats.rx_dropped = %lx\n", dev->stats.rx_dropped); - printk("dev->stats.rx_crc_errors = %lx\n", dev->stats.rx_crc_errors); - printk("dev->stats.rx_frame_errors = %lx\n", - dev->stats.rx_frame_errors); - printk("dev->stats.rx_fifo_errors = %lx\n", - dev->stats.rx_fifo_errors); - printk("dev->stats.rx_length_errors = %lx\n", - dev->stats.rx_length_errors); - - printk("lp->lan_saa9730_regs->DebugPCIMasterAddr = %x\n", - readl(&lp->lan_saa9730_regs->DebugPCIMasterAddr)); - printk("lp->lan_saa9730_regs->DebugLanTxStateMachine = %x\n", - readl(&lp->lan_saa9730_regs->DebugLanTxStateMachine)); - printk("lp->lan_saa9730_regs->DebugLanRxStateMachine = %x\n", - readl(&lp->lan_saa9730_regs->DebugLanRxStateMachine)); - printk("lp->lan_saa9730_regs->DebugLanTxFifoPointers = %x\n", - readl(&lp->lan_saa9730_regs->DebugLanTxFifoPointers)); - printk("lp->lan_saa9730_regs->DebugLanRxFifoPointers = %x\n", - readl(&lp->lan_saa9730_regs->DebugLanRxFifoPointers)); - printk("lp->lan_saa9730_regs->DebugLanCtlStateMachine = %x\n", - readl(&lp->lan_saa9730_regs->DebugLanCtlStateMachine)); -} - -static void lan_saa9730_buffer_init(struct lan_saa9730_private *lp) -{ - int i, j; - - /* Init RX buffers */ - for (i = 0; i < LAN_SAA9730_BUFFERS; i++) { - for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) { - *(unsigned int *) lp->RcvBuffer[i][j] = - cpu_to_le32(RXSF_READY << - RX_STAT_CTL_OWNER_SHF); - } - } - - /* Init TX buffers */ - for (i = 0; i < LAN_SAA9730_BUFFERS; i++) { - for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) { - *(unsigned int *) lp->TxmBuffer[i][j] = - cpu_to_le32(TXSF_EMPTY << - TX_STAT_CTL_OWNER_SHF); - } - } -} - -static void lan_saa9730_free_buffers(struct pci_dev *pdev, - struct lan_saa9730_private *lp) -{ - pci_free_consistent(pdev, lp->buffer_size, lp->buffer_start, - lp->dma_addr); -} - -static int lan_saa9730_allocate_buffers(struct pci_dev *pdev, - struct lan_saa9730_private *lp) -{ - void *Pa; - unsigned int i, j, rxoffset, txoffset; - int ret; - - /* Initialize buffer space */ - lp->DmaRcvPackets = LAN_SAA9730_RCV_Q_SIZE; - lp->DmaTxmPackets = LAN_SAA9730_TXM_Q_SIZE; - - /* Initialize Rx Buffer Index */ - lp->NextRcvPacketIndex = 0; - lp->NextRcvBufferIndex = 0; - - /* Set current buffer index & next available packet index */ - lp->NextTxmPacketIndex = 0; - lp->NextTxmBufferIndex = 0; - lp->PendingTxmPacketIndex = 0; - lp->PendingTxmBufferIndex = 0; - - /* - * Allocate all RX and TX packets in one chunk. - * The Rx and Tx packets must be PACKET_SIZE aligned. - */ - lp->buffer_size = ((LAN_SAA9730_RCV_Q_SIZE + LAN_SAA9730_TXM_Q_SIZE) * - LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_BUFFERS) + - LAN_SAA9730_PACKET_SIZE; - lp->buffer_start = pci_alloc_consistent(pdev, lp->buffer_size, - &lp->dma_addr); - if (!lp->buffer_start) { - ret = -ENOMEM; - goto out; - } - - Pa = (void *)ALIGN((unsigned long)lp->buffer_start, - LAN_SAA9730_PACKET_SIZE); - - rxoffset = Pa - lp->buffer_start; - - /* Init RX buffers */ - for (i = 0; i < LAN_SAA9730_BUFFERS; i++) { - for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) { - *(unsigned int *) Pa = - cpu_to_le32(RXSF_READY << - RX_STAT_CTL_OWNER_SHF); - lp->RcvBuffer[i][j] = Pa; - Pa += LAN_SAA9730_PACKET_SIZE; - } - } - - txoffset = Pa - lp->buffer_start; - - /* Init TX buffers */ - for (i = 0; i < LAN_SAA9730_BUFFERS; i++) { - for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) { - *(unsigned int *) Pa = - cpu_to_le32(TXSF_EMPTY << - TX_STAT_CTL_OWNER_SHF); - lp->TxmBuffer[i][j] = Pa; - Pa += LAN_SAA9730_PACKET_SIZE; - } - } - - /* - * Set rx buffer A and rx buffer B to point to the first two buffer - * spaces. - */ - writel(lp->dma_addr + rxoffset, &lp->lan_saa9730_regs->RxBuffA); - writel(lp->dma_addr + rxoffset + - LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_RCV_Q_SIZE, - &lp->lan_saa9730_regs->RxBuffB); - - /* - * Set txm_buf_a and txm_buf_b to point to the first two buffer - * space - */ - writel(lp->dma_addr + txoffset, - &lp->lan_saa9730_regs->TxBuffA); - writel(lp->dma_addr + txoffset + - LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_TXM_Q_SIZE, - &lp->lan_saa9730_regs->TxBuffB); - - /* Set packet number */ - writel((lp->DmaRcvPackets << PK_COUNT_RX_A_SHF) | - (lp->DmaRcvPackets << PK_COUNT_RX_B_SHF) | - (lp->DmaTxmPackets << PK_COUNT_TX_A_SHF) | - (lp->DmaTxmPackets << PK_COUNT_TX_B_SHF), - &lp->lan_saa9730_regs->PacketCount); - - return 0; - -out: - return ret; -} - -static int lan_saa9730_cam_load(struct lan_saa9730_private *lp) -{ - unsigned int i; - unsigned char *NetworkAddress; - - NetworkAddress = (unsigned char *) &lp->PhysicalAddress[0][0]; - - for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) { - /* First set address to where data is written */ - writel(i, &lp->lan_saa9730_regs->CamAddress); - writel((NetworkAddress[0] << 24) | (NetworkAddress[1] << 16) | - (NetworkAddress[2] << 8) | NetworkAddress[3], - &lp->lan_saa9730_regs->CamData); - NetworkAddress += 4; - } - return 0; -} - -static int lan_saa9730_cam_init(struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - unsigned int i; - - /* Copy MAC-address into all entries. */ - for (i = 0; i < LAN_SAA9730_CAM_ENTRIES; i++) { - memcpy((unsigned char *) lp->PhysicalAddress[i], - (unsigned char *) dev->dev_addr, 6); - } - - return 0; -} - -static int lan_saa9730_mii_init(struct lan_saa9730_private *lp) -{ - int i, l; - - /* Check link status, spin here till station is not busy. */ - i = 0; - while (readl(&lp->lan_saa9730_regs->StationMgmtCtl) & MD_CA_BUSY) { - i++; - if (i > 100) { - printk("Error: lan_saa9730_mii_init: timeout\n"); - return -1; - } - mdelay(1); /* wait 1 ms. */ - } - - /* Now set the control and address register. */ - writel(MD_CA_BUSY | PHY_STATUS | PHY_ADDRESS << MD_CA_PHY_SHF, - &lp->lan_saa9730_regs->StationMgmtCtl); - - /* check link status, spin here till station is not busy */ - i = 0; - while (readl(&lp->lan_saa9730_regs->StationMgmtCtl) & MD_CA_BUSY) { - i++; - if (i > 100) { - printk("Error: lan_saa9730_mii_init: timeout\n"); - return -1; - } - mdelay(1); /* wait 1 ms. */ - } - - /* Wait for 1 ms. */ - mdelay(1); - - /* Check the link status. */ - if (readl(&lp->lan_saa9730_regs->StationMgmtData) & - PHY_STATUS_LINK_UP) { - /* Link is up. */ - return 0; - } else { - /* Link is down, reset the PHY first. */ - - /* set PHY address = 'CONTROL' */ - writel(PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR | PHY_CONTROL, - &lp->lan_saa9730_regs->StationMgmtCtl); - - /* Wait for 1 ms. */ - mdelay(1); - - /* set 'CONTROL' = force reset and renegotiate */ - writel(PHY_CONTROL_RESET | PHY_CONTROL_AUTO_NEG | - PHY_CONTROL_RESTART_AUTO_NEG, - &lp->lan_saa9730_regs->StationMgmtData); - - /* Wait for 50 ms. */ - mdelay(50); - - /* set 'BUSY' to start operation */ - writel(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR | - PHY_CONTROL, &lp->lan_saa9730_regs->StationMgmtCtl); - - /* await completion */ - i = 0; - while (readl(&lp->lan_saa9730_regs->StationMgmtCtl) & - MD_CA_BUSY) { - i++; - if (i > 100) { - printk - ("Error: lan_saa9730_mii_init: timeout\n"); - return -1; - } - mdelay(1); /* wait 1 ms. */ - } - - /* Wait for 1 ms. */ - mdelay(1); - - for (l = 0; l < 2; l++) { - /* set PHY address = 'STATUS' */ - writel(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | - PHY_STATUS, - &lp->lan_saa9730_regs->StationMgmtCtl); - - /* await completion */ - i = 0; - while (readl(&lp->lan_saa9730_regs->StationMgmtCtl) & - MD_CA_BUSY) { - i++; - if (i > 100) { - printk - ("Error: lan_saa9730_mii_init: timeout\n"); - return -1; - } - mdelay(1); /* wait 1 ms. */ - } - - /* wait for 3 sec. */ - mdelay(3000); - - /* check the link status */ - if (readl(&lp->lan_saa9730_regs->StationMgmtData) & - PHY_STATUS_LINK_UP) { - /* link is up */ - break; - } - } - } - - return 0; -} - -static int lan_saa9730_control_init(struct lan_saa9730_private *lp) -{ - /* Initialize DMA control register. */ - writel((LANMB_ANY << DMA_CTL_MAX_XFER_SHF) | - (LANEND_LITTLE << DMA_CTL_ENDIAN_SHF) | - (LAN_SAA9730_RCV_Q_INT_THRESHOLD << DMA_CTL_RX_INT_COUNT_SHF) - | DMA_CTL_RX_INT_TO_EN | DMA_CTL_RX_INT_EN | - DMA_CTL_MAC_RX_INT_EN | DMA_CTL_MAC_TX_INT_EN, - &lp->lan_saa9730_regs->LanDmaCtl); - - /* Initial MAC control register. */ - writel((MACCM_MII << MAC_CONTROL_CONN_SHF) | MAC_CONTROL_FULL_DUP, - &lp->lan_saa9730_regs->MacCtl); - - /* Initialize CAM control register. */ - writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_BROAD_ACC, - &lp->lan_saa9730_regs->CamCtl); - - /* - * Initialize CAM enable register, only turn on first entry, should - * contain own addr. - */ - writel(0x0001, &lp->lan_saa9730_regs->CamEnable); - - /* Initialize Tx control register */ - writel(TX_CTL_EN_COMP, &lp->lan_saa9730_regs->TxCtl); - - /* Initialize Rcv control register */ - writel(RX_CTL_STRIP_CRC, &lp->lan_saa9730_regs->RxCtl); - - /* Reset DMA engine */ - writel(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest); - - return 0; -} - -static int lan_saa9730_stop(struct lan_saa9730_private *lp) -{ - int i; - - /* Stop DMA first */ - writel(readl(&lp->lan_saa9730_regs->LanDmaCtl) & - ~(DMA_CTL_EN_TX_DMA | DMA_CTL_EN_RX_DMA), - &lp->lan_saa9730_regs->LanDmaCtl); - - /* Set the SW Reset bits in DMA and MAC control registers */ - writel(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest); - writel(readl(&lp->lan_saa9730_regs->MacCtl) | MAC_CONTROL_RESET, - &lp->lan_saa9730_regs->MacCtl); - - /* - * Wait for MAC reset to have finished. The reset bit is auto cleared - * when the reset is done. - */ - i = 0; - while (readl(&lp->lan_saa9730_regs->MacCtl) & MAC_CONTROL_RESET) { - i++; - if (i > 100) { - printk - ("Error: lan_sa9730_stop: MAC reset timeout\n"); - return -1; - } - mdelay(1); /* wait 1 ms. */ - } - - return 0; -} - -static int lan_saa9730_dma_init(struct lan_saa9730_private *lp) -{ - /* Stop lan controller. */ - lan_saa9730_stop(lp); - - writel(LAN_SAA9730_DEFAULT_TIME_OUT_CNT, - &lp->lan_saa9730_regs->Timeout); - - return 0; -} - -static int lan_saa9730_start(struct lan_saa9730_private *lp) -{ - lan_saa9730_buffer_init(lp); - - /* Initialize Rx Buffer Index */ - lp->NextRcvPacketIndex = 0; - lp->NextRcvBufferIndex = 0; - - /* Set current buffer index & next available packet index */ - lp->NextTxmPacketIndex = 0; - lp->NextTxmBufferIndex = 0; - lp->PendingTxmPacketIndex = 0; - lp->PendingTxmBufferIndex = 0; - - writel(readl(&lp->lan_saa9730_regs->LanDmaCtl) | DMA_CTL_EN_TX_DMA | - DMA_CTL_EN_RX_DMA, &lp->lan_saa9730_regs->LanDmaCtl); - - /* For Tx, turn on MAC then DMA */ - writel(readl(&lp->lan_saa9730_regs->TxCtl) | TX_CTL_TX_EN, - &lp->lan_saa9730_regs->TxCtl); - - /* For Rx, turn on DMA then MAC */ - writel(readl(&lp->lan_saa9730_regs->RxCtl) | RX_CTL_RX_EN, - &lp->lan_saa9730_regs->RxCtl); - - /* Set Ok2Use to let hardware own the buffers. */ - writel(OK2USE_RX_A | OK2USE_RX_B, &lp->lan_saa9730_regs->Ok2Use); - - return 0; -} - -static int lan_saa9730_restart(struct lan_saa9730_private *lp) -{ - lan_saa9730_stop(lp); - lan_saa9730_start(lp); - - return 0; -} - -static int lan_saa9730_tx(struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - unsigned int *pPacket; - unsigned int tx_status; - - if (lan_saa9730_debug > 5) - printk("lan_saa9730_tx interrupt\n"); - - /* Clear interrupt. */ - writel(DMA_STATUS_MAC_TX_INT, &lp->lan_saa9730_regs->DmaStatus); - - while (1) { - pPacket = lp->TxmBuffer[lp->PendingTxmBufferIndex] - [lp->PendingTxmPacketIndex]; - - /* Get status of first packet transmitted. */ - tx_status = le32_to_cpu(*pPacket); - - /* Check ownership. */ - if ((tx_status & TX_STAT_CTL_OWNER_MSK) != - (TXSF_HWDONE << TX_STAT_CTL_OWNER_SHF)) break; - - /* Check for error. */ - if (tx_status & TX_STAT_CTL_ERROR_MSK) { - if (lan_saa9730_debug > 1) - printk("lan_saa9730_tx: tx error = %x\n", - tx_status); - - dev->stats.tx_errors++; - if (tx_status & - (TX_STATUS_EX_COLL << TX_STAT_CTL_STATUS_SHF)) - dev->stats.tx_aborted_errors++; - if (tx_status & - (TX_STATUS_LATE_COLL << TX_STAT_CTL_STATUS_SHF)) - dev->stats.tx_window_errors++; - if (tx_status & - (TX_STATUS_L_CARR << TX_STAT_CTL_STATUS_SHF)) - dev->stats.tx_carrier_errors++; - if (tx_status & - (TX_STATUS_UNDER << TX_STAT_CTL_STATUS_SHF)) - dev->stats.tx_fifo_errors++; - if (tx_status & - (TX_STATUS_SQ_ERR << TX_STAT_CTL_STATUS_SHF)) - dev->stats.tx_heartbeat_errors++; - - dev->stats.collisions += - tx_status & TX_STATUS_TX_COLL_MSK; - } - - /* Free buffer. */ - *pPacket = - cpu_to_le32(TXSF_EMPTY << TX_STAT_CTL_OWNER_SHF); - - /* Update pending index pointer. */ - lp->PendingTxmPacketIndex++; - if (lp->PendingTxmPacketIndex >= LAN_SAA9730_TXM_Q_SIZE) { - lp->PendingTxmPacketIndex = 0; - lp->PendingTxmBufferIndex ^= 1; - } - } - - /* The tx buffer is no longer full. */ - netif_wake_queue(dev); - - return 0; -} - -static int lan_saa9730_rx(struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - int len = 0; - struct sk_buff *skb = 0; - unsigned int rx_status; - int BufferIndex; - int PacketIndex; - unsigned int *pPacket; - unsigned char *pData; - - if (lan_saa9730_debug > 5) - printk("lan_saa9730_rx interrupt\n"); - - /* Clear receive interrupts. */ - writel(DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT | - DMA_STATUS_RX_TO_INT, &lp->lan_saa9730_regs->DmaStatus); - - /* Address next packet */ - BufferIndex = lp->NextRcvBufferIndex; - PacketIndex = lp->NextRcvPacketIndex; - pPacket = lp->RcvBuffer[BufferIndex][PacketIndex]; - rx_status = le32_to_cpu(*pPacket); - - /* Process each packet. */ - while ((rx_status & RX_STAT_CTL_OWNER_MSK) == - (RXSF_HWDONE << RX_STAT_CTL_OWNER_SHF)) { - /* Check the rx status. */ - if (rx_status & (RX_STATUS_GOOD << RX_STAT_CTL_STATUS_SHF)) { - /* Received packet is good. */ - len = (rx_status & RX_STAT_CTL_LENGTH_MSK) >> - RX_STAT_CTL_LENGTH_SHF; - - pData = (unsigned char *) pPacket; - pData += 4; - skb = dev_alloc_skb(len + 2); - if (skb == 0) { - printk - ("%s: Memory squeeze, deferring packet.\n", - dev->name); - dev->stats.rx_dropped++; - } else { - dev->stats.rx_bytes += len; - dev->stats.rx_packets++; - skb_reserve(skb, 2); /* 16 byte align */ - skb_put(skb, len); /* make room */ - skb_copy_to_linear_data(skb, - (unsigned char *) pData, - len); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->last_rx = jiffies; - } - } else { - /* We got an error packet. */ - if (lan_saa9730_debug > 2) - printk - ("lan_saa9730_rx: We got an error packet = %x\n", - rx_status); - - dev->stats.rx_errors++; - if (rx_status & - (RX_STATUS_CRC_ERR << RX_STAT_CTL_STATUS_SHF)) - dev->stats.rx_crc_errors++; - if (rx_status & - (RX_STATUS_ALIGN_ERR << RX_STAT_CTL_STATUS_SHF)) - dev->stats.rx_frame_errors++; - if (rx_status & - (RX_STATUS_OVERFLOW << RX_STAT_CTL_STATUS_SHF)) - dev->stats.rx_fifo_errors++; - if (rx_status & - (RX_STATUS_LONG_ERR << RX_STAT_CTL_STATUS_SHF)) - dev->stats.rx_length_errors++; - } - - /* Indicate we have processed the buffer. */ - *pPacket = cpu_to_le32(RXSF_READY << RX_STAT_CTL_OWNER_SHF); - - /* Make sure A or B is available to hardware as appropriate. */ - writel(BufferIndex ? OK2USE_RX_B : OK2USE_RX_A, - &lp->lan_saa9730_regs->Ok2Use); - - /* Go to next packet in sequence. */ - lp->NextRcvPacketIndex++; - if (lp->NextRcvPacketIndex >= LAN_SAA9730_RCV_Q_SIZE) { - lp->NextRcvPacketIndex = 0; - lp->NextRcvBufferIndex ^= 1; - } - - /* Address next packet */ - BufferIndex = lp->NextRcvBufferIndex; - PacketIndex = lp->NextRcvPacketIndex; - pPacket = lp->RcvBuffer[BufferIndex][PacketIndex]; - rx_status = le32_to_cpu(*pPacket); - } - - return 0; -} - -static irqreturn_t lan_saa9730_interrupt(const int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct lan_saa9730_private *lp = netdev_priv(dev); - - if (lan_saa9730_debug > 5) - printk("lan_saa9730_interrupt\n"); - - /* Disable the EVM LAN interrupt. */ - evm_saa9730_block_lan_int(lp); - - /* Clear the EVM LAN interrupt. */ - evm_saa9730_clear_lan_int(lp); - - /* Service pending transmit interrupts. */ - if (readl(&lp->lan_saa9730_regs->DmaStatus) & DMA_STATUS_MAC_TX_INT) - lan_saa9730_tx(dev); - - /* Service pending receive interrupts. */ - if (readl(&lp->lan_saa9730_regs->DmaStatus) & - (DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT | - DMA_STATUS_RX_TO_INT)) lan_saa9730_rx(dev); - - /* Enable the EVM LAN interrupt. */ - evm_saa9730_unblock_lan_int(lp); - - return IRQ_HANDLED; -} - -static int lan_saa9730_open(struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - - /* Associate IRQ with lan_saa9730_interrupt */ - if (request_irq(dev->irq, &lan_saa9730_interrupt, 0, "SAA9730 Eth", - dev)) { - printk("lan_saa9730_open: Can't get irq %d\n", dev->irq); - return -EAGAIN; - } - - /* Enable the Lan interrupt in the event manager. */ - evm_saa9730_enable_lan_int(lp); - - /* Start the LAN controller */ - if (lan_saa9730_start(lp)) - return -1; - - netif_start_queue(dev); - - return 0; -} - -static int lan_saa9730_write(struct lan_saa9730_private *lp, - struct sk_buff *skb, int skblen) -{ - unsigned char *pbData = skb->data; - unsigned int len = skblen; - unsigned char *pbPacketData; - unsigned int tx_status; - int BufferIndex; - int PacketIndex; - - if (lan_saa9730_debug > 5) - printk("lan_saa9730_write: skb=%p\n", skb); - - BufferIndex = lp->NextTxmBufferIndex; - PacketIndex = lp->NextTxmPacketIndex; - - tx_status = le32_to_cpu(*(unsigned int *)lp->TxmBuffer[BufferIndex] - [PacketIndex]); - if ((tx_status & TX_STAT_CTL_OWNER_MSK) != - (TXSF_EMPTY << TX_STAT_CTL_OWNER_SHF)) { - if (lan_saa9730_debug > 4) - printk - ("lan_saa9730_write: Tx buffer not available: tx_status = %x\n", - tx_status); - return -1; - } - - lp->NextTxmPacketIndex++; - if (lp->NextTxmPacketIndex >= LAN_SAA9730_TXM_Q_SIZE) { - lp->NextTxmPacketIndex = 0; - lp->NextTxmBufferIndex ^= 1; - } - - pbPacketData = lp->TxmBuffer[BufferIndex][PacketIndex]; - pbPacketData += 4; - - /* copy the bits */ - memcpy(pbPacketData, pbData, len); - - /* Set transmit status for hardware */ - *(unsigned int *)lp->TxmBuffer[BufferIndex][PacketIndex] = - cpu_to_le32((TXSF_READY << TX_STAT_CTL_OWNER_SHF) | - (TX_STAT_CTL_INT_AFTER_TX << - TX_STAT_CTL_FRAME_SHF) | - (len << TX_STAT_CTL_LENGTH_SHF)); - - /* Make sure A or B is available to hardware as appropriate. */ - writel(BufferIndex ? OK2USE_TX_B : OK2USE_TX_A, - &lp->lan_saa9730_regs->Ok2Use); - - return 0; -} - -static void lan_saa9730_tx_timeout(struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - - /* Transmitter timeout, serious problems */ - dev->stats.tx_errors++; - printk("%s: transmit timed out, reset\n", dev->name); - /*show_saa9730_regs(dev); */ - lan_saa9730_restart(lp); - - dev->trans_start = jiffies; - netif_wake_queue(dev); -} - -static int lan_saa9730_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - unsigned long flags; - int skblen; - int len; - - if (lan_saa9730_debug > 4) - printk("Send packet: skb=%p\n", skb); - - skblen = skb->len; - - spin_lock_irqsave(&lp->lock, flags); - - len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; - - if (lan_saa9730_write(lp, skb, skblen)) { - spin_unlock_irqrestore(&lp->lock, flags); - printk("Error when writing packet to controller: skb=%p\n", skb); - netif_stop_queue(dev); - return -1; - } - - dev->stats.tx_bytes += len; - dev->stats.tx_packets++; - - dev->trans_start = jiffies; - netif_wake_queue(dev); - dev_kfree_skb(skb); - - spin_unlock_irqrestore(&lp->lock, flags); - - return 0; -} - -static int lan_saa9730_close(struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - - if (lan_saa9730_debug > 1) - printk("lan_saa9730_close:\n"); - - netif_stop_queue(dev); - - /* Disable the Lan interrupt in the event manager. */ - evm_saa9730_disable_lan_int(lp); - - /* Stop the controller */ - if (lan_saa9730_stop(lp)) - return -1; - - free_irq(dev->irq, (void *) dev); - - return 0; -} - -static void lan_saa9730_set_multicast(struct net_device *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - - /* Stop the controller */ - lan_saa9730_stop(lp); - - if (dev->flags & IFF_PROMISC) { - /* accept all packets */ - writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_STATION_ACC | - CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC, - &lp->lan_saa9730_regs->CamCtl); - } else { - if (dev->flags & IFF_ALLMULTI || dev->mc_count) { - /* accept all multicast packets */ - /* - * Will handle the multicast stuff later. -carstenl - */ - writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | - CAM_CONTROL_BROAD_ACC, - &lp->lan_saa9730_regs->CamCtl); - } - } - - lan_saa9730_restart(lp); -} - - -static void __devexit saa9730_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct lan_saa9730_private *lp = netdev_priv(dev); - - if (dev) { - unregister_netdev(dev); - lan_saa9730_free_buffers(pdev, lp); - iounmap(lp->lan_saa9730_regs); - iounmap(lp->evm_saa9730_regs); - free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - } -} - - -static int lan_saa9730_init(struct net_device *dev, struct pci_dev *pdev, - unsigned long ioaddr, int irq) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - unsigned char ethernet_addr[6]; - int ret; - - if (get_ethernet_addr(ethernet_addr)) { - ret = -ENODEV; - goto out; - } - - memcpy(dev->dev_addr, ethernet_addr, 6); - dev->base_addr = ioaddr; - dev->irq = irq; - - lp->pci_dev = pdev; - - /* Set SAA9730 LAN base address. */ - lp->lan_saa9730_regs = ioremap(ioaddr + SAA9730_LAN_REGS_ADDR, - SAA9730_LAN_REGS_SIZE); - if (!lp->lan_saa9730_regs) { - ret = -ENOMEM; - goto out; - } - - /* Set SAA9730 EVM base address. */ - lp->evm_saa9730_regs = ioremap(ioaddr + SAA9730_EVM_REGS_ADDR, - SAA9730_EVM_REGS_SIZE); - if (!lp->evm_saa9730_regs) { - ret = -ENOMEM; - goto out_iounmap_lan; - } - - /* Allocate LAN RX/TX frame buffer space. */ - if ((ret = lan_saa9730_allocate_buffers(pdev, lp))) - goto out_iounmap; - - /* Stop LAN controller. */ - if ((ret = lan_saa9730_stop(lp))) - goto out_free_consistent; - - /* Initialize CAM registers. */ - if ((ret = lan_saa9730_cam_init(dev))) - goto out_free_consistent; - - /* Initialize MII registers. */ - if ((ret = lan_saa9730_mii_init(lp))) - goto out_free_consistent; - - /* Initialize control registers. */ - if ((ret = lan_saa9730_control_init(lp))) - goto out_free_consistent; - - /* Load CAM registers. */ - if ((ret = lan_saa9730_cam_load(lp))) - goto out_free_consistent; - - /* Initialize DMA context registers. */ - if ((ret = lan_saa9730_dma_init(lp))) - goto out_free_consistent; - - spin_lock_init(&lp->lock); - - dev->open = lan_saa9730_open; - dev->hard_start_xmit = lan_saa9730_start_xmit; - dev->stop = lan_saa9730_close; - dev->set_multicast_list = lan_saa9730_set_multicast; - dev->tx_timeout = lan_saa9730_tx_timeout; - dev->watchdog_timeo = (HZ >> 1); - dev->dma = 0; - - ret = register_netdev (dev); - if (ret) - goto out_free_consistent; - - return 0; - -out_free_consistent: - lan_saa9730_free_buffers(pdev, lp); -out_iounmap: - iounmap(lp->evm_saa9730_regs); -out_iounmap_lan: - iounmap(lp->lan_saa9730_regs); -out: - return ret; -} - - -static int __devinit saa9730_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *dev = NULL; - unsigned long pci_ioaddr; - int err; - - if (lan_saa9730_debug > 1) - printk("saa9730.c: PCI bios is present, checking for devices...\n"); - - err = pci_enable_device(pdev); - if (err) { - printk(KERN_ERR "Cannot enable PCI device, aborting.\n"); - goto out; - } - - err = pci_request_regions(pdev, DRV_MODULE_NAME); - if (err) { - printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n"); - goto out_disable_pdev; - } - - pci_irq_line = pdev->irq; - /* LAN base address in located at BAR 1. */ - - pci_ioaddr = pci_resource_start(pdev, 1); - pci_set_master(pdev); - - printk("Found SAA9730 (PCI) at %lx, irq %d.\n", - pci_ioaddr, pci_irq_line); - - dev = alloc_etherdev(sizeof(struct lan_saa9730_private)); - if (!dev) - goto out_disable_pdev; - - err = lan_saa9730_init(dev, pdev, pci_ioaddr, pci_irq_line); - if (err) { - printk("LAN init failed"); - goto out_free_netdev; - } - - pci_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - return 0; - -out_free_netdev: - free_netdev(dev); -out_disable_pdev: - pci_disable_device(pdev); -out: - pci_set_drvdata(pdev, NULL); - return err; -} - - -static struct pci_driver saa9730_driver = { - .name = DRV_MODULE_NAME, - .id_table = saa9730_pci_tbl, - .probe = saa9730_init_one, - .remove = __devexit_p(saa9730_remove_one), -}; - - -static int __init saa9730_init(void) -{ - return pci_register_driver(&saa9730_driver); -} - -static void __exit saa9730_cleanup(void) -{ - pci_unregister_driver(&saa9730_driver); -} - -module_init(saa9730_init); -module_exit(saa9730_cleanup); - -MODULE_AUTHOR("Ralf Baechle "); -MODULE_DESCRIPTION("Philips SAA9730 ethernet driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/saa9730.h b/drivers/net/saa9730.h deleted file mode 100644 index 010a120ea938..000000000000 --- a/drivers/net/saa9730.h +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Copyright (C) 2000, 2005 MIPS Technologies, Inc. All rights reserved. - * Authors: Carsten Langgaard - * Maciej W. Rozycki - * - * ######################################################################## - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * ######################################################################## - * - * SAA9730 ethernet driver description. - * - */ -#ifndef _SAA9730_H -#define _SAA9730_H - - -/* Number of 6-byte entries in the CAM. */ -#define LAN_SAA9730_CAM_ENTRIES 10 -#define LAN_SAA9730_CAM_DWORDS ((LAN_SAA9730_CAM_ENTRIES*6)/4) - -/* TX and RX packet size: fixed to 2048 bytes, according to HW requirements. */ -#define LAN_SAA9730_PACKET_SIZE 2048 - -/* - * Number of TX buffers = number of RX buffers = 2, which is fixed according - * to HW requirements. - */ -#define LAN_SAA9730_BUFFERS 2 - -/* Number of RX packets per RX buffer. */ -#define LAN_SAA9730_RCV_Q_SIZE 15 - -/* Number of TX packets per TX buffer. */ -#define LAN_SAA9730_TXM_Q_SIZE 15 - -/* - * We get an interrupt for each LAN_SAA9730_DEFAULT_RCV_Q_INT_THRESHOLD - * packets received. - * If however we receive less than LAN_SAA9730_DEFAULT_RCV_Q_INT_THRESHOLD - * packets, the hardware can timeout after a certain time and still tell - * us packets have arrived. - * The timeout value in unit of 32 PCI clocks (33Mhz). - * The value 200 approximates 0.0002 seconds. - */ -#define LAN_SAA9730_RCV_Q_INT_THRESHOLD 1 -#define LAN_SAA9730_DEFAULT_TIME_OUT_CNT 10 - -#define RXSF_NDIS 0 -#define RXSF_READY 2 -#define RXSF_HWDONE 3 - -#define TXSF_EMPTY 0 -#define TXSF_READY 2 -#define TXSF_HWDONE 3 - -#define LANEND_LITTLE 0 -#define LANEND_BIG_2143 1 -#define LANEND_BIG_4321 2 - -#define LANMB_ANY 0 -#define LANMB_8 1 -#define LANMB_32 2 -#define LANMB_64 3 - -#define MACCM_AUTOMATIC 0 -#define MACCM_10MB 1 -#define MACCM_MII 2 - -/* - * PHY definitions for Basic registers of QS6612 (used on MIPS ATLAS board) - */ -#define PHY_CONTROL 0x0 -#define PHY_STATUS 0x1 -#define PHY_STATUS_LINK_UP 0x4 -#define PHY_CONTROL_RESET 0x8000 -#define PHY_CONTROL_AUTO_NEG 0x1000 -#define PHY_CONTROL_RESTART_AUTO_NEG 0x0200 -#define PHY_ADDRESS 0x0 - -/* PK_COUNT register. */ -#define PK_COUNT_TX_A_SHF 24 -#define PK_COUNT_TX_A_MSK (0xff << PK_COUNT_TX_A_SHF) -#define PK_COUNT_TX_B_SHF 16 -#define PK_COUNT_TX_B_MSK (0xff << PK_COUNT_TX_B_SHF) -#define PK_COUNT_RX_A_SHF 8 -#define PK_COUNT_RX_A_MSK (0xff << PK_COUNT_RX_A_SHF) -#define PK_COUNT_RX_B_SHF 0 -#define PK_COUNT_RX_B_MSK (0xff << PK_COUNT_RX_B_SHF) - -/* OK2USE register. */ -#define OK2USE_TX_A 0x8 -#define OK2USE_TX_B 0x4 -#define OK2USE_RX_A 0x2 -#define OK2USE_RX_B 0x1 - -/* LAN DMA CONTROL register. */ -#define DMA_CTL_BLK_INT 0x80000000 -#define DMA_CTL_MAX_XFER_SHF 18 -#define DMA_CTL_MAX_XFER_MSK (0x3 << LAN_DMA_CTL_MAX_XFER_SHF) -#define DMA_CTL_ENDIAN_SHF 16 -#define DMA_CTL_ENDIAN_MSK (0x3 << LAN_DMA_CTL_ENDIAN_SHF) -#define DMA_CTL_RX_INT_COUNT_SHF 8 -#define DMA_CTL_RX_INT_COUNT_MSK (0xff << LAN_DMA_CTL_RX_INT_COUNT_SHF) -#define DMA_CTL_EN_TX_DMA 0x00000080 -#define DMA_CTL_EN_RX_DMA 0x00000040 -#define DMA_CTL_RX_INT_BUFFUL_EN 0x00000020 -#define DMA_CTL_RX_INT_TO_EN 0x00000010 -#define DMA_CTL_RX_INT_EN 0x00000008 -#define DMA_CTL_TX_INT_EN 0x00000004 -#define DMA_CTL_MAC_TX_INT_EN 0x00000002 -#define DMA_CTL_MAC_RX_INT_EN 0x00000001 - -/* DMA STATUS register. */ -#define DMA_STATUS_BAD_ADDR_SHF 16 -#define DMA_STATUS_BAD_ADDR_MSK (0xf << DMA_STATUS_BAD_ADDR_SHF) -#define DMA_STATUS_RX_PKTS_RECEIVED_SHF 8 -#define DMA_STATUS_RX_PKTS_RECEIVED_MSK (0xff << DMA_STATUS_RX_PKTS_RECEIVED_SHF) -#define DMA_STATUS_TX_EN_SYNC 0x00000080 -#define DMA_STATUS_RX_BUF_A_FUL 0x00000040 -#define DMA_STATUS_RX_BUF_B_FUL 0x00000020 -#define DMA_STATUS_RX_TO_INT 0x00000010 -#define DMA_STATUS_RX_INT 0x00000008 -#define DMA_STATUS_TX_INT 0x00000004 -#define DMA_STATUS_MAC_TX_INT 0x00000002 -#define DMA_STATUS_MAC_RX_INT 0x00000001 - -/* DMA TEST/PANIC SWITHES register. */ -#define DMA_TEST_LOOPBACK 0x01000000 -#define DMA_TEST_SW_RESET 0x00000001 - -/* MAC CONTROL register. */ -#define MAC_CONTROL_EN_MISS_ROLL 0x00002000 -#define MAC_CONTROL_MISS_ROLL 0x00000400 -#define MAC_CONTROL_LOOP10 0x00000080 -#define MAC_CONTROL_CONN_SHF 5 -#define MAC_CONTROL_CONN_MSK (0x3 << MAC_CONTROL_CONN_SHF) -#define MAC_CONTROL_MAC_LOOP 0x00000010 -#define MAC_CONTROL_FULL_DUP 0x00000008 -#define MAC_CONTROL_RESET 0x00000004 -#define MAC_CONTROL_HALT_IMM 0x00000002 -#define MAC_CONTROL_HALT_REQ 0x00000001 - -/* CAM CONTROL register. */ -#define CAM_CONTROL_COMP_EN 0x00000010 -#define CAM_CONTROL_NEG_CAM 0x00000008 -#define CAM_CONTROL_BROAD_ACC 0x00000004 -#define CAM_CONTROL_GROUP_ACC 0x00000002 -#define CAM_CONTROL_STATION_ACC 0x00000001 - -/* TRANSMIT CONTROL register. */ -#define TX_CTL_EN_COMP 0x00004000 -#define TX_CTL_EN_TX_PAR 0x00002000 -#define TX_CTL_EN_LATE_COLL 0x00001000 -#define TX_CTL_EN_EX_COLL 0x00000800 -#define TX_CTL_EN_L_CARR 0x00000400 -#define TX_CTL_EN_EX_DEFER 0x00000200 -#define TX_CTL_EN_UNDER 0x00000100 -#define TX_CTL_MII10 0x00000080 -#define TX_CTL_SD_PAUSE 0x00000040 -#define TX_CTL_NO_EX_DEF0 0x00000020 -#define TX_CTL_F_BACK 0x00000010 -#define TX_CTL_NO_CRC 0x00000008 -#define TX_CTL_NO_PAD 0x00000004 -#define TX_CTL_TX_HALT 0x00000002 -#define TX_CTL_TX_EN 0x00000001 - -/* TRANSMIT STATUS register. */ -#define TX_STATUS_SQ_ERR 0x00010000 -#define TX_STATUS_TX_HALTED 0x00008000 -#define TX_STATUS_COMP 0x00004000 -#define TX_STATUS_TX_PAR 0x00002000 -#define TX_STATUS_LATE_COLL 0x00001000 -#define TX_STATUS_TX10_STAT 0x00000800 -#define TX_STATUS_L_CARR 0x00000400 -#define TX_STATUS_EX_DEFER 0x00000200 -#define TX_STATUS_UNDER 0x00000100 -#define TX_STATUS_IN_TX 0x00000080 -#define TX_STATUS_PAUSED 0x00000040 -#define TX_STATUS_TX_DEFERRED 0x00000020 -#define TX_STATUS_EX_COLL 0x00000010 -#define TX_STATUS_TX_COLL_SHF 0 -#define TX_STATUS_TX_COLL_MSK (0xf << TX_STATUS_TX_COLL_SHF) - -/* RECEIVE CONTROL register. */ -#define RX_CTL_EN_GOOD 0x00004000 -#define RX_CTL_EN_RX_PAR 0x00002000 -#define RX_CTL_EN_LONG_ERR 0x00000800 -#define RX_CTL_EN_OVER 0x00000400 -#define RX_CTL_EN_CRC_ERR 0x00000200 -#define RX_CTL_EN_ALIGN 0x00000100 -#define RX_CTL_IGNORE_CRC 0x00000040 -#define RX_CTL_PASS_CTL 0x00000020 -#define RX_CTL_STRIP_CRC 0x00000010 -#define RX_CTL_SHORT_EN 0x00000008 -#define RX_CTL_LONG_EN 0x00000004 -#define RX_CTL_RX_HALT 0x00000002 -#define RX_CTL_RX_EN 0x00000001 - -/* RECEIVE STATUS register. */ -#define RX_STATUS_RX_HALTED 0x00008000 -#define RX_STATUS_GOOD 0x00004000 -#define RX_STATUS_RX_PAR 0x00002000 -#define RX_STATUS_LONG_ERR 0x00000800 -#define RX_STATUS_OVERFLOW 0x00000400 -#define RX_STATUS_CRC_ERR 0x00000200 -#define RX_STATUS_ALIGN_ERR 0x00000100 -#define RX_STATUS_RX10_STAT 0x00000080 -#define RX_STATUS_INT_RX 0x00000040 -#define RX_STATUS_CTL_RECD 0x00000020 - -/* MD_CA register. */ -#define MD_CA_PRE_SUP 0x00001000 -#define MD_CA_BUSY 0x00000800 -#define MD_CA_WR 0x00000400 -#define MD_CA_PHY_SHF 5 -#define MD_CA_PHY_MSK (0x1f << MD_CA_PHY_SHF) -#define MD_CA_ADDR_SHF 0 -#define MD_CA_ADDR_MSK (0x1f << MD_CA_ADDR_SHF) - -/* Tx Status/Control. */ -#define TX_STAT_CTL_OWNER_SHF 30 -#define TX_STAT_CTL_OWNER_MSK (0x3 << TX_STAT_CTL_OWNER_SHF) -#define TX_STAT_CTL_FRAME_SHF 27 -#define TX_STAT_CTL_FRAME_MSK (0x7 << TX_STAT_CTL_FRAME_SHF) -#define TX_STAT_CTL_STATUS_SHF 11 -#define TX_STAT_CTL_STATUS_MSK (0x1ffff << TX_STAT_CTL_STATUS_SHF) -#define TX_STAT_CTL_LENGTH_SHF 0 -#define TX_STAT_CTL_LENGTH_MSK (0x7ff << TX_STAT_CTL_LENGTH_SHF) - -#define TX_STAT_CTL_ERROR_MSK ((TX_STATUS_SQ_ERR | \ - TX_STATUS_TX_HALTED | \ - TX_STATUS_TX_PAR | \ - TX_STATUS_LATE_COLL | \ - TX_STATUS_L_CARR | \ - TX_STATUS_EX_DEFER | \ - TX_STATUS_UNDER | \ - TX_STATUS_PAUSED | \ - TX_STATUS_TX_DEFERRED | \ - TX_STATUS_EX_COLL | \ - TX_STATUS_TX_COLL_MSK) \ - << TX_STAT_CTL_STATUS_SHF) -#define TX_STAT_CTL_INT_AFTER_TX 0x4 - -/* Rx Status/Control. */ -#define RX_STAT_CTL_OWNER_SHF 30 -#define RX_STAT_CTL_OWNER_MSK (0x3 << RX_STAT_CTL_OWNER_SHF) -#define RX_STAT_CTL_STATUS_SHF 11 -#define RX_STAT_CTL_STATUS_MSK (0xffff << RX_STAT_CTL_STATUS_SHF) -#define RX_STAT_CTL_LENGTH_SHF 0 -#define RX_STAT_CTL_LENGTH_MSK (0x7ff << RX_STAT_CTL_LENGTH_SHF) - - - -/* The SAA9730 (LAN) controller register map, as seen via the PCI-bus. */ -#define SAA9730_LAN_REGS_ADDR 0x20400 -#define SAA9730_LAN_REGS_SIZE 0x00400 - -struct lan_saa9730_regmap { - volatile unsigned int TxBuffA; /* 0x20400 */ - volatile unsigned int TxBuffB; /* 0x20404 */ - volatile unsigned int RxBuffA; /* 0x20408 */ - volatile unsigned int RxBuffB; /* 0x2040c */ - volatile unsigned int PacketCount; /* 0x20410 */ - volatile unsigned int Ok2Use; /* 0x20414 */ - volatile unsigned int LanDmaCtl; /* 0x20418 */ - volatile unsigned int Timeout; /* 0x2041c */ - volatile unsigned int DmaStatus; /* 0x20420 */ - volatile unsigned int DmaTest; /* 0x20424 */ - volatile unsigned char filler20428[0x20430 - 0x20428]; - volatile unsigned int PauseCount; /* 0x20430 */ - volatile unsigned int RemotePauseCount; /* 0x20434 */ - volatile unsigned char filler20438[0x20440 - 0x20438]; - volatile unsigned int MacCtl; /* 0x20440 */ - volatile unsigned int CamCtl; /* 0x20444 */ - volatile unsigned int TxCtl; /* 0x20448 */ - volatile unsigned int TxStatus; /* 0x2044c */ - volatile unsigned int RxCtl; /* 0x20450 */ - volatile unsigned int RxStatus; /* 0x20454 */ - volatile unsigned int StationMgmtData; /* 0x20458 */ - volatile unsigned int StationMgmtCtl; /* 0x2045c */ - volatile unsigned int CamAddress; /* 0x20460 */ - volatile unsigned int CamData; /* 0x20464 */ - volatile unsigned int CamEnable; /* 0x20468 */ - volatile unsigned char filler2046c[0x20500 - 0x2046c]; - volatile unsigned int DebugPCIMasterAddr; /* 0x20500 */ - volatile unsigned int DebugLanTxStateMachine; /* 0x20504 */ - volatile unsigned int DebugLanRxStateMachine; /* 0x20508 */ - volatile unsigned int DebugLanTxFifoPointers; /* 0x2050c */ - volatile unsigned int DebugLanRxFifoPointers; /* 0x20510 */ - volatile unsigned int DebugLanCtlStateMachine; /* 0x20514 */ -}; -typedef volatile struct lan_saa9730_regmap t_lan_saa9730_regmap; - - -/* EVM interrupt control registers. */ -#define EVM_LAN_INT 0x00010000 -#define EVM_MASTER_EN 0x00000001 - -/* The SAA9730 (EVM) controller register map, as seen via the PCI-bus. */ -#define SAA9730_EVM_REGS_ADDR 0x02000 -#define SAA9730_EVM_REGS_SIZE 0x00400 - -struct evm_saa9730_regmap { - volatile unsigned int InterruptStatus1; /* 0x2000 */ - volatile unsigned int InterruptEnable1; /* 0x2004 */ - volatile unsigned int InterruptMonitor1; /* 0x2008 */ - volatile unsigned int Counter; /* 0x200c */ - volatile unsigned int CounterThreshold; /* 0x2010 */ - volatile unsigned int CounterControl; /* 0x2014 */ - volatile unsigned int GpioControl1; /* 0x2018 */ - volatile unsigned int InterruptStatus2; /* 0x201c */ - volatile unsigned int InterruptEnable2; /* 0x2020 */ - volatile unsigned int InterruptMonitor2; /* 0x2024 */ - volatile unsigned int GpioControl2; /* 0x2028 */ - volatile unsigned int InterruptBlock1; /* 0x202c */ - volatile unsigned int InterruptBlock2; /* 0x2030 */ -}; -typedef volatile struct evm_saa9730_regmap t_evm_saa9730_regmap; - - -struct lan_saa9730_private { - /* - * Rx/Tx packet buffers. - * The Rx and Tx packets must be PACKET_SIZE aligned. - */ - void *buffer_start; - unsigned int buffer_size; - - /* - * DMA address of beginning of this object, returned - * by pci_alloc_consistent(). - */ - dma_addr_t dma_addr; - - /* Pointer to the associated pci device structure */ - struct pci_dev *pci_dev; - - /* Pointer for the SAA9730 LAN controller register set. */ - t_lan_saa9730_regmap *lan_saa9730_regs; - - /* Pointer to the SAA9730 EVM register. */ - t_evm_saa9730_regmap *evm_saa9730_regs; - - /* Rcv buffer Index. */ - unsigned char NextRcvPacketIndex; - /* Next buffer index. */ - unsigned char NextRcvBufferIndex; - - /* Index of next packet to use in that buffer. */ - unsigned char NextTxmPacketIndex; - /* Next buffer index. */ - unsigned char NextTxmBufferIndex; - - /* Index of first pending packet ready to send. */ - unsigned char PendingTxmPacketIndex; - /* Pending buffer index. */ - unsigned char PendingTxmBufferIndex; - - unsigned char DmaRcvPackets; - unsigned char DmaTxmPackets; - - void *TxmBuffer[LAN_SAA9730_BUFFERS][LAN_SAA9730_TXM_Q_SIZE]; - void *RcvBuffer[LAN_SAA9730_BUFFERS][LAN_SAA9730_RCV_Q_SIZE]; - unsigned int TxBufferFree[LAN_SAA9730_BUFFERS]; - - unsigned char PhysicalAddress[LAN_SAA9730_CAM_ENTRIES][6]; - - spinlock_t lock; -}; - -#endif /* _SAA9730_H */ -- cgit v1.2.1