From d0a8ba6cba0a05ce79df100a3d99502b94117d88 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 24 Jun 2014 16:19:06 -0500 Subject: amd-xgbe: Make defines in xgbe.h unique In order to avoid conflicts with other include files, add a prefix to the defines in xgbe.h. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 23 +++---- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 100 +++++++++++++++--------------- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 28 +++++---- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 8 +-- drivers/net/ethernet/amd/xgbe/xgbe.h | 43 +++++++------ 5 files changed, 102 insertions(+), 100 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 6f1c85956d50..a9ce56d5e988 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -131,7 +131,7 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata, if (ring->rdata) { for (i = 0; i < ring->rdesc_count; i++) { - rdata = GET_DESC_DATA(ring, i); + rdata = XGBE_GET_DESC_DATA(ring, i); xgbe_unmap_skb(pdata, rdata); } @@ -256,7 +256,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) rdesc_dma = ring->rdesc_dma; for (j = 0; j < ring->rdesc_count; j++) { - rdata = GET_DESC_DATA(ring, j); + rdata = XGBE_GET_DESC_DATA(ring, j); rdata->rdesc = rdesc; rdata->rdesc_dma = rdesc_dma; @@ -298,7 +298,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) rdesc_dma = ring->rdesc_dma; for (j = 0; j < ring->rdesc_count; j++) { - rdata = GET_DESC_DATA(ring, j); + rdata = XGBE_GET_DESC_DATA(ring, j); rdata->rdesc = rdesc; rdata->rdesc_dma = rdesc_dma; @@ -392,7 +392,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) if ((tso && (packet->mss != ring->tx.cur_mss)) || (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))) cur_index++; - rdata = GET_DESC_DATA(ring, cur_index); + rdata = XGBE_GET_DESC_DATA(ring, cur_index); if (tso) { DBGPR(" TSO packet\n"); @@ -413,12 +413,12 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) packet->length += packet->header_len; cur_index++; - rdata = GET_DESC_DATA(ring, cur_index); + rdata = XGBE_GET_DESC_DATA(ring, cur_index); } /* Map the (remainder of the) packet */ for (datalen = skb_headlen(skb) - offset; datalen; ) { - len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE); + len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE); skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, DMA_TO_DEVICE); @@ -437,7 +437,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) packet->length += len; cur_index++; - rdata = GET_DESC_DATA(ring, cur_index); + rdata = XGBE_GET_DESC_DATA(ring, cur_index); } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { @@ -447,7 +447,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) offset = 0; for (datalen = skb_frag_size(frag); datalen; ) { - len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE); + len = min_t(unsigned int, datalen, + XGBE_TX_MAX_BUF_SIZE); skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, len, DMA_TO_DEVICE); @@ -468,7 +469,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) packet->length += len; cur_index++; - rdata = GET_DESC_DATA(ring, cur_index); + rdata = XGBE_GET_DESC_DATA(ring, cur_index); } } @@ -484,7 +485,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) err_out: while (start_index < cur_index) { - rdata = GET_DESC_DATA(ring, start_index++); + rdata = XGBE_GET_DESC_DATA(ring, start_index++); xgbe_unmap_skb(pdata, rdata); } @@ -507,7 +508,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel) ring->rx.realloc_index); for (i = 0; i < ring->dirty; i++) { - rdata = GET_DESC_DATA(ring, ring->rx.realloc_index); + rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index); /* Reset rdata values */ xgbe_unmap_skb(pdata, rdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 002293b0819d..864af2da54cd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -766,7 +766,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel) /* Initialze all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { - rdata = GET_DESC_DATA(ring, i); + rdata = XGBE_GET_DESC_DATA(ring, i); rdesc = rdata->rdesc; /* Initialize Tx descriptor @@ -791,7 +791,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel) XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); /* Update the starting address of descriptor ring */ - rdata = GET_DESC_DATA(ring, start_index); + rdata = XGBE_GET_DESC_DATA(ring, start_index); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, upper_32_bits(rdata->rdesc_dma)); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, @@ -848,7 +848,7 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) /* Initialize all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { - rdata = GET_DESC_DATA(ring, i); + rdata = XGBE_GET_DESC_DATA(ring, i); rdesc = rdata->rdesc; /* Initialize Rx descriptor @@ -882,14 +882,14 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); /* Update the starting address of descriptor ring */ - rdata = GET_DESC_DATA(ring, start_index); + rdata = XGBE_GET_DESC_DATA(ring, start_index); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, upper_32_bits(rdata->rdesc_dma)); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, lower_32_bits(rdata->rdesc_dma)); /* Update the Rx Descriptor Tail Pointer */ - rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); + rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, lower_32_bits(rdata->rdesc_dma)); @@ -933,7 +933,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) if (tx_coalesce && !channel->tx_timer_active) ring->coalesce_count = 0; - rdata = GET_DESC_DATA(ring, ring->cur); + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; /* Create a context descriptor if this is a TSO packet */ @@ -977,7 +977,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) } ring->cur++; - rdata = GET_DESC_DATA(ring, ring->cur); + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; } @@ -1034,7 +1034,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) { ring->cur++; - rdata = GET_DESC_DATA(ring, ring->cur); + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; /* Update buffer address */ @@ -1074,7 +1074,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) wmb(); /* Set OWN bit for the first descriptor */ - rdata = GET_DESC_DATA(ring, start_index); + rdata = XGBE_GET_DESC_DATA(ring, start_index); rdesc = rdata->rdesc; XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); @@ -1088,7 +1088,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) /* Issue a poll command to Tx DMA by writing address * of next immediate free descriptor */ ring->cur++; - rdata = GET_DESC_DATA(ring, ring->cur); + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, lower_32_bits(rdata->rdesc_dma)); @@ -1117,7 +1117,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); - rdata = GET_DESC_DATA(ring, ring->cur); + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; /* Check for data availability */ @@ -1195,7 +1195,7 @@ static void xgbe_save_interrupt_status(struct xgbe_channel *channel, if (int_state == XGMAC_INT_STATE_SAVE) { channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - channel->saved_ier &= DMA_INTERRUPT_MASK; + channel->saved_ier &= XGBE_DMA_INTERRUPT_MASK; } else { dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); dma_ch_ier |= channel->saved_ier; @@ -1275,7 +1275,7 @@ static int xgbe_disable_int(struct xgbe_channel *channel, xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE); dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - dma_ch_ier &= ~DMA_INTERRUPT_MASK; + dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); break; default: @@ -1342,23 +1342,23 @@ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) unsigned int arcache, awcache; arcache = 0; - XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, XGBE_DMA_ARCACHE); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, XGBE_DMA_ARDOMAIN); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, XGBE_DMA_ARCACHE); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, XGBE_DMA_ARDOMAIN); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, XGBE_DMA_ARCACHE); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, XGBE_DMA_ARDOMAIN); XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); awcache = 0; - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, XGBE_DMA_AWCACHE); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, XGBE_DMA_AWDOMAIN); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, XGBE_DMA_AWCACHE); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, XGBE_DMA_AWDOMAIN); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, XGBE_DMA_AWCACHE); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, XGBE_DMA_AWDOMAIN); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, XGBE_DMA_AWCACHE); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, XGBE_DMA_AWDOMAIN); XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); } @@ -1388,66 +1388,66 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, /* Calculate Tx/Rx fifo share per queue */ switch (fifo_size) { case 0: - q_fifo_size = FIFO_SIZE_B(128); + q_fifo_size = XGBE_FIFO_SIZE_B(128); break; case 1: - q_fifo_size = FIFO_SIZE_B(256); + q_fifo_size = XGBE_FIFO_SIZE_B(256); break; case 2: - q_fifo_size = FIFO_SIZE_B(512); + q_fifo_size = XGBE_FIFO_SIZE_B(512); break; case 3: - q_fifo_size = FIFO_SIZE_KB(1); + q_fifo_size = XGBE_FIFO_SIZE_KB(1); break; case 4: - q_fifo_size = FIFO_SIZE_KB(2); + q_fifo_size = XGBE_FIFO_SIZE_KB(2); break; case 5: - q_fifo_size = FIFO_SIZE_KB(4); + q_fifo_size = XGBE_FIFO_SIZE_KB(4); break; case 6: - q_fifo_size = FIFO_SIZE_KB(8); + q_fifo_size = XGBE_FIFO_SIZE_KB(8); break; case 7: - q_fifo_size = FIFO_SIZE_KB(16); + q_fifo_size = XGBE_FIFO_SIZE_KB(16); break; case 8: - q_fifo_size = FIFO_SIZE_KB(32); + q_fifo_size = XGBE_FIFO_SIZE_KB(32); break; case 9: - q_fifo_size = FIFO_SIZE_KB(64); + q_fifo_size = XGBE_FIFO_SIZE_KB(64); break; case 10: - q_fifo_size = FIFO_SIZE_KB(128); + q_fifo_size = XGBE_FIFO_SIZE_KB(128); break; case 11: - q_fifo_size = FIFO_SIZE_KB(256); + q_fifo_size = XGBE_FIFO_SIZE_KB(256); break; } q_fifo_size = q_fifo_size / queue_count; /* Set the queue fifo size programmable value */ - if (q_fifo_size >= FIFO_SIZE_KB(256)) + if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256)) p_fifo = XGMAC_MTL_FIFO_SIZE_256K; - else if (q_fifo_size >= FIFO_SIZE_KB(128)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128)) p_fifo = XGMAC_MTL_FIFO_SIZE_128K; - else if (q_fifo_size >= FIFO_SIZE_KB(64)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64)) p_fifo = XGMAC_MTL_FIFO_SIZE_64K; - else if (q_fifo_size >= FIFO_SIZE_KB(32)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32)) p_fifo = XGMAC_MTL_FIFO_SIZE_32K; - else if (q_fifo_size >= FIFO_SIZE_KB(16)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16)) p_fifo = XGMAC_MTL_FIFO_SIZE_16K; - else if (q_fifo_size >= FIFO_SIZE_KB(8)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8)) p_fifo = XGMAC_MTL_FIFO_SIZE_8K; - else if (q_fifo_size >= FIFO_SIZE_KB(4)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4)) p_fifo = XGMAC_MTL_FIFO_SIZE_4K; - else if (q_fifo_size >= FIFO_SIZE_KB(2)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2)) p_fifo = XGMAC_MTL_FIFO_SIZE_2K; - else if (q_fifo_size >= FIFO_SIZE_KB(1)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1)) p_fifo = XGMAC_MTL_FIFO_SIZE_1K; - else if (q_fifo_size >= FIFO_SIZE_B(512)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512)) p_fifo = XGMAC_MTL_FIFO_SIZE_512; - else if (q_fifo_size >= FIFO_SIZE_B(256)) + else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256)) p_fifo = XGMAC_MTL_FIFO_SIZE_256; return p_fifo; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index cfe3d93b5f52..0efb5062e271 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -144,9 +144,10 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) } rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - if (rx_buf_size < RX_MIN_BUF_SIZE) - rx_buf_size = RX_MIN_BUF_SIZE; - rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1); + if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE) + rx_buf_size = XGBE_RX_MIN_BUF_SIZE; + rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & + ~(XGBE_RX_BUF_ALIGN - 1); return rx_buf_size; } @@ -446,7 +447,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) break; for (j = 0; j < ring->rdesc_count; j++) { - rdata = GET_DESC_DATA(ring, j); + rdata = XGBE_GET_DESC_DATA(ring, j); desc_if->unmap_skb(pdata, rdata); } } @@ -471,7 +472,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) break; for (j = 0; j < ring->rdesc_count; j++) { - rdata = GET_DESC_DATA(ring, j); + rdata = XGBE_GET_DESC_DATA(ring, j); desc_if->unmap_skb(pdata, rdata); } } @@ -726,14 +727,14 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb, for (len = skb_headlen(skb); len;) { packet->rdesc_count++; - len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE); + len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; for (len = skb_frag_size(frag); len; ) { packet->rdesc_count++; - len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE); + len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); } } } @@ -1089,8 +1090,9 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) spin_lock_irqsave(&ring->lock, flags); - while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) { - rdata = GET_DESC_DATA(ring, ring->dirty); + while ((processed < XGBE_TX_DESC_MAX_PROC) && + (ring->dirty < ring->cur)) { + rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); rdesc = rdata->rdesc; if (!hw_if->tx_complete(rdesc)) @@ -1109,7 +1111,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) } if ((ring->tx.queue_stopped == 1) && - (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) { + (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) { ring->tx.queue_stopped = 0; netif_wake_subqueue(netdev, channel->queue_index); } @@ -1152,7 +1154,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) cur_len = 0; read_again: - rdata = GET_DESC_DATA(ring, ring->cur); + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); if (hw_if->dev_read(channel)) break; @@ -1244,7 +1246,7 @@ read_again: /* Update the Rx Tail Pointer Register with address of * the last cleaned entry */ - rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1); + rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, lower_32_bits(rdata->rdesc_dma)); } @@ -1296,7 +1298,7 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, struct xgbe_ring_desc *rdesc; while (count--) { - rdata = GET_DESC_DATA(ring, idx); + rdata = XGBE_GET_DESC_DATA(ring, idx); rdesc = rdata->rdesc; DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index c83584a26713..fefcbb6c69a8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -247,16 +247,16 @@ static int xgbe_probe(struct platform_device *pdev) mutex_init(&pdata->xpcs_mutex); /* Set and validate the number of descriptors for a ring */ - BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT); - pdata->tx_desc_count = TX_DESC_CNT; + BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); + pdata->tx_desc_count = XGBE_TX_DESC_CNT; if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { dev_err(dev, "tx descriptor count (%d) is not valid\n", pdata->tx_desc_count); ret = -EINVAL; goto err_io; } - BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT); - pdata->rx_desc_count = RX_DESC_CNT; + BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); + pdata->rx_desc_count = XGBE_RX_DESC_CNT; if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { dev_err(dev, "rx descriptor count (%d) is not valid\n", pdata->rx_desc_count); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ab0627162c01..f5da54cddd8d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -128,22 +128,25 @@ #define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver" /* Descriptor related defines */ -#define TX_DESC_CNT 512 -#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3) -#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1) -#define RX_DESC_CNT 512 +#define XGBE_TX_DESC_CNT 512 +#define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3) +#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) +#define XGBE_RX_DESC_CNT 512 -#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) -#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) -#define RX_BUF_ALIGN 64 +#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) +#define XGBE_RX_BUF_ALIGN 64 #define XGBE_MAX_DMA_CHANNELS 16 -#define DMA_ARDOMAIN_SETTING 0x2 -#define DMA_ARCACHE_SETTING 0xb -#define DMA_AWDOMAIN_SETTING 0x2 -#define DMA_AWCACHE_SETTING 0x7 -#define DMA_INTERRUPT_MASK 0x31c7 + +/* DMA cache settings - Outer sharable, write-back, write-allocate */ +#define XGBE_DMA_ARDOMAIN 0x2 +#define XGBE_DMA_ARCACHE 0xb +#define XGBE_DMA_AWDOMAIN 0x2 +#define XGBE_DMA_AWCACHE 0x7 + +#define XGBE_DMA_INTERRUPT_MASK 0x31c7 #define XGMAC_MIN_PACKET 60 #define XGMAC_STD_PACKET_MTU 1500 @@ -151,10 +154,6 @@ #define XGMAC_JUMBO_PACKET_MTU 9000 #define XGMAC_MAX_JUMBO_PACKET 9018 -#define MAX_MULTICAST_LIST 14 -#define TX_FLAGS_IP_PKT 0x00000001 -#define TX_FLAGS_TCP_PKT 0x00000002 - /* MDIO bus phy name */ #define XGBE_PHY_NAME "amd_xgbe_phy" #define XGBE_PRTAD 0 @@ -163,18 +162,18 @@ #define XGMAC_DRIVER_CONTEXT 1 #define XGMAC_IOCTL_CONTEXT 2 -#define FIFO_SIZE_B(x) (x) -#define FIFO_SIZE_KB(x) (x * 1024) +#define XGBE_FIFO_SIZE_B(x) (x) +#define XGBE_FIFO_SIZE_KB(x) (x * 1024) #define XGBE_TC_CNT 2 /* Helper macro for descriptor handling - * Always use GET_DESC_DATA to access the descriptor data + * Always use XGBE_GET_DESC_DATA to access the descriptor data * since the index is free-running and needs to be and-ed * with the descriptor count value of the ring to index to * the proper descriptor data. */ -#define GET_DESC_DATA(_ring, _idx) \ +#define XGBE_GET_DESC_DATA(_ring, _idx) \ ((_ring)->rdata + \ ((_idx) & ((_ring)->rdesc_count - 1))) @@ -219,7 +218,7 @@ struct xgbe_ring_desc { /* Structure used to hold information related to the descriptor * and the packet associated with the descriptor (always use - * use the GET_DESC_DATA macro to access this data from the ring) + * use the XGBE_GET_DESC_DATA macro to access this data from the ring) */ struct xgbe_ring_data { struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */ @@ -250,7 +249,7 @@ struct xgbe_ring { unsigned int rdesc_count; /* Array of descriptor data corresponding the descriptor memory - * (always use the GET_DESC_DATA macro to access this data) + * (always use the XGBE_GET_DESC_DATA macro to access this data) */ struct xgbe_ring_data *rdata; -- cgit v1.2.1 From 6e5eed042fcac4166874d25cf9fb89bee841eb26 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 24 Jun 2014 16:19:12 -0500 Subject: amd-xgbe: VLAN Tx tag insertion fix The MAC_VLAN_Incl register (0x0060) must be set to indicate that the VLAN tag to be inserted comes from a Tx context descriptor and not the MAC_VLAN_Incl register. Also, even though it is the default, explicitly set the type of tag to be inserted as a CTAG. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 4 ++++ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 4 ++++ 2 files changed, 8 insertions(+) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index bf462ee86f5c..129d322977f1 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -427,6 +427,10 @@ #define MAC_TCR_SS_WIDTH 2 #define MAC_TCR_TE_INDEX 0 #define MAC_TCR_TE_WIDTH 1 +#define MAC_VLANIR_VLTI_INDEX 20 +#define MAC_VLANIR_VLTI_WIDTH 1 +#define MAC_VLANIR_CSVL_INDEX 19 +#define MAC_VLANIR_CSVL_WIDTH 1 #define MAC_VLANTR_DOVLTC_INDEX 20 #define MAC_VLANTR_DOVLTC_WIDTH 1 #define MAC_VLANTR_ERSVLM_INDEX 19 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 864af2da54cd..b239b216e191 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1541,6 +1541,10 @@ static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) { + /* Indicate that VLAN Tx CTAGs come from context descriptors */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) xgbe_enable_rx_vlan_stripping(pdata); else -- cgit v1.2.1 From c52e9c6385ec5578eaac2989b855742e52777711 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 24 Jun 2014 16:19:18 -0500 Subject: amd-xgbe: VLAN Rx tag stripping fix When receiving a VLAN packet check to be sure that VLAN RX CTAG stripping is enabled before indicating that the tag has been stripped in the packet information data structure. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index b239b216e191..d0f437945dd4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1113,6 +1113,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; + struct net_device *netdev = channel->pdata->netdev; unsigned int err, etlt; DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); @@ -1153,7 +1154,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) DBGPR(" err=%u, etlt=%#x\n", err, etlt); if (!err || (err && !etlt)) { - if (etlt == 0x09) { + if ((etlt == 0x09) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG, 1); packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, -- cgit v1.2.1 From 801c62d945c6121c0262924732e430f0553bfb37 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 24 Jun 2014 16:19:24 -0500 Subject: amd-xgbe: Add support for VLAN filtering This patch adds support for (imperfect) filtering of VLAN tag ids using a 16-bit filter hash table. When VLANs are added, a 4-bit hash is calculated with the result indicating the bit in the hash table to set. This table is used by the hardware to drop packets with a VLAN id that does not hash to a set bit in the table. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/Kconfig | 1 + drivers/net/ethernet/amd/xgbe/xgbe-common.h | 12 ++++ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 95 +++++++++++++++++++++++++++++ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 60 ++++++++++++++---- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 3 +- drivers/net/ethernet/amd/xgbe/xgbe.h | 8 +++ 6 files changed, 165 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index bbaf36d9f5e1..8e83e4c8fff0 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -182,6 +182,7 @@ config AMD_XGBE depends on OF_NET select PHYLIB select AMD_XGBE_PHY + select BITREVERSE ---help--- This driver supports the AMD 10GbE Ethernet device found on an AMD SoC. diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 129d322977f1..1de9d0fc594a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -393,6 +393,8 @@ #define MAC_PFR_PM_WIDTH 1 #define MAC_PFR_PR_INDEX 0 #define MAC_PFR_PR_WIDTH 1 +#define MAC_PFR_VTFE_INDEX 16 +#define MAC_PFR_VTFE_WIDTH 1 #define MAC_PMTCSR_MGKPKTEN_INDEX 1 #define MAC_PMTCSR_MGKPKTEN_WIDTH 1 #define MAC_PMTCSR_PWRDWN_INDEX 0 @@ -427,6 +429,8 @@ #define MAC_TCR_SS_WIDTH 2 #define MAC_TCR_TE_INDEX 0 #define MAC_TCR_TE_WIDTH 1 +#define MAC_VLANHTR_VLHT_INDEX 0 +#define MAC_VLANHTR_VLHT_WIDTH 16 #define MAC_VLANIR_VLTI_INDEX 20 #define MAC_VLANIR_VLTI_WIDTH 1 #define MAC_VLANIR_CSVL_INDEX 19 @@ -437,10 +441,18 @@ #define MAC_VLANTR_ERSVLM_WIDTH 1 #define MAC_VLANTR_ESVL_INDEX 18 #define MAC_VLANTR_ESVL_WIDTH 1 +#define MAC_VLANTR_ETV_INDEX 16 +#define MAC_VLANTR_ETV_WIDTH 1 #define MAC_VLANTR_EVLS_INDEX 21 #define MAC_VLANTR_EVLS_WIDTH 2 #define MAC_VLANTR_EVLRXS_INDEX 24 #define MAC_VLANTR_EVLRXS_WIDTH 1 +#define MAC_VLANTR_VL_INDEX 0 +#define MAC_VLANTR_VL_WIDTH 16 +#define MAC_VLANTR_VTHM_INDEX 25 +#define MAC_VLANTR_VTHM_WIDTH 1 +#define MAC_VLANTR_VTIM_INDEX 17 +#define MAC_VLANTR_VTIM_WIDTH 1 #define MAC_VR_DEVID_INDEX 8 #define MAC_VR_DEVID_WIDTH 8 #define MAC_VR_SNPSVER_INDEX 0 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index d0f437945dd4..2c7d582e0859 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -116,6 +116,7 @@ #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -738,6 +739,89 @@ static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) return 0; } +static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Enable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); + + /* Enable VLAN Hash Table filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); + + /* Disable VLAN tag inverse matching */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); + + /* Only filter on the lower 12-bits of the VLAN tag */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); + + /* In order for the VLAN Hash Table filtering to be effective, + * the VLAN tag identifier in the VLAN Tag Register must not + * be zero. Set the VLAN tag identifier to "1" to enable the + * VLAN Hash Table filtering. This implies that a VLAN tag of + * 1 will always pass filtering. + */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); + + return 0; +} + +static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Disable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); + + return 0; +} + +#ifndef CRCPOLY_LE +#define CRCPOLY_LE 0xedb88320 +#endif +static u32 xgbe_vid_crc32_le(__le16 vid_le) +{ + u32 poly = CRCPOLY_LE; + u32 crc = ~0; + u32 temp = 0; + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= poly; + } + + return crc; +} + +static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) +{ + u32 crc; + u16 vid; + __le16 vid_le; + u16 vlan_hash_table = 0; + + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + } + + /* Set the VLAN Hash Table filtering register */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); + + return 0; +} + static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) { struct xgbe_ring_desc *rdesc = rdata->rdesc; @@ -1547,6 +1631,14 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); + /* Set the current VLAN Hash Table register value */ + xgbe_update_vlan_hash_table(pdata); + + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + xgbe_enable_rx_vlan_filtering(pdata); + else + xgbe_disable_rx_vlan_filtering(pdata); + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) xgbe_enable_rx_vlan_stripping(pdata); else @@ -2118,6 +2210,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; + hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; + hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; + hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; hw_if->read_mmd_regs = xgbe_read_mmd_regs; hw_if->write_mmd_regs = xgbe_write_mmd_regs; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 0efb5062e271..2acc37c07d9b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1000,6 +1000,38 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, return s; } +static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + DBGPR("-->%s\n", __func__); + + set_bit(vid, pdata->active_vlans); + hw_if->update_vlan_hash_table(pdata); + + DBGPR("<--%s\n", __func__); + + return 0; +} + +static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + DBGPR("-->%s\n", __func__); + + clear_bit(vid, pdata->active_vlans); + hw_if->update_vlan_hash_table(pdata); + + DBGPR("<--%s\n", __func__); + + return 0; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void xgbe_poll_controller(struct net_device *netdev) { @@ -1022,26 +1054,26 @@ static int xgbe_set_features(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int rxcsum_enabled, rxvlan_enabled; + unsigned int rxcsum, rxvlan, rxvlan_filter; - rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM); - rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX); + rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; + rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; + rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; - if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) { + if ((features & NETIF_F_RXCSUM) && !rxcsum) hw_if->enable_rx_csum(pdata); - netdev_alert(netdev, "state change - rxcsum enabled\n"); - } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) { + else if (!(features & NETIF_F_RXCSUM) && rxcsum) hw_if->disable_rx_csum(pdata); - netdev_alert(netdev, "state change - rxcsum disabled\n"); - } - if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) { + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) hw_if->enable_rx_vlan_stripping(pdata); - netdev_alert(netdev, "state change - rxvlan enabled\n"); - } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) { + else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) hw_if->disable_rx_vlan_stripping(pdata); - netdev_alert(netdev, "state change - rxvlan disabled\n"); - } + + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) + hw_if->enable_rx_vlan_filtering(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) + hw_if->disable_rx_vlan_filtering(pdata); pdata->netdev_features = features; @@ -1059,6 +1091,8 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = xgbe_change_mtu, .ndo_get_stats64 = xgbe_get_stats64, + .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xgbe_poll_controller, #endif diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index fefcbb6c69a8..c3a48b7c2aed 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -385,7 +385,8 @@ static int xgbe_probe(struct platform_device *pdev) NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index f5da54cddd8d..3cb911f39b60 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -121,6 +121,8 @@ #include #include #include +#include +#include #define XGBE_DRV_NAME "amd-xgbe" @@ -393,6 +395,9 @@ struct xgbe_hw_if { int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *); int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *); + int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *); + int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *); + int (*update_vlan_hash_table)(struct xgbe_prv_data *); int (*read_mmd_regs)(struct xgbe_prv_data *, int, int); void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int); @@ -588,6 +593,9 @@ struct xgbe_prv_data { struct napi_struct napi; struct xgbe_mmc_stats mmc_stats; + /* Filtering support */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* System clock value used for Rx watchdog */ struct clk *sysclock; -- cgit v1.2.1 From b85e4d8960f10e4b28613a3e7b76f8889a2089e3 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 24 Jun 2014 16:19:29 -0500 Subject: amd-xgbe: Change destination address filtering support Currently the driver makes use of the additional mac address registers in the hardware to provide perfect filtering. The hardware can also have a set of hash table registers that can be used for imperfect filtering. By using imperfect filtering the additional mac address registers can be used for layer 2 filtering support. Use the hash table registers if the device has them. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/Kconfig | 1 + drivers/net/ethernet/amd/xgbe/xgbe-common.h | 10 +- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 141 ++++++++++++++++++---------- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 27 ++++-- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 2 + drivers/net/ethernet/amd/xgbe/xgbe.h | 4 +- 6 files changed, 116 insertions(+), 69 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 8e83e4c8fff0..6e314dbba805 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -183,6 +183,7 @@ config AMD_XGBE select PHYLIB select AMD_XGBE_PHY select BITREVERSE + select CRC32 ---help--- This driver supports the AMD 10GbE Ethernet device found on an AMD SoC. diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 1de9d0fc594a..ccbceba553e5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -276,13 +276,6 @@ #define MAC_PFR 0x0008 #define MAC_WTR 0x000c #define MAC_HTR0 0x0010 -#define MAC_HTR1 0x0014 -#define MAC_HTR2 0x0018 -#define MAC_HTR3 0x001c -#define MAC_HTR4 0x0020 -#define MAC_HTR5 0x0024 -#define MAC_HTR6 0x0028 -#define MAC_HTR7 0x002c #define MAC_VLANTR 0x0050 #define MAC_VLANHTR 0x0058 #define MAC_VLANIR 0x0060 @@ -315,6 +308,7 @@ #define MAC_QTFCR_INC 4 #define MAC_MACA_INC 4 +#define MAC_HTR_INC 4 /* MAC register entry bit positions and sizes */ #define MAC_HWF0R_ADDMACADRSEL_INDEX 18 @@ -387,6 +381,8 @@ #define MAC_MACA1HR_AE_WIDTH 1 #define MAC_PFR_HMC_INDEX 2 #define MAC_PFR_HMC_WIDTH 1 +#define MAC_PFR_HPF_INDEX 10 +#define MAC_PFR_HPF_WIDTH 1 #define MAC_PFR_HUC_INDEX 1 #define MAC_PFR_HUC_WIDTH 1 #define MAC_PFR_PM_INDEX 4 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 2c7d582e0859..a56069c91fc4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -117,6 +117,7 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -548,24 +549,16 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, return 0; } -static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata, - unsigned int am_mode) +static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, + struct netdev_hw_addr *ha, unsigned int *mac_reg) { - struct netdev_hw_addr *ha; - unsigned int mac_reg; unsigned int mac_addr_hi, mac_addr_lo; u8 *mac_addr; - unsigned int i; - XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); - XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0); - - i = 0; - mac_reg = MAC_MACA1HR; + mac_addr_lo = 0; + mac_addr_hi = 0; - netdev_for_each_uc_addr(ha, pdata->netdev) { - mac_addr_lo = 0; - mac_addr_hi = 0; + if (ha) { mac_addr = (u8 *)&mac_addr_lo; mac_addr[0] = ha->addr[0]; mac_addr[1] = ha->addr[1]; @@ -575,54 +568,93 @@ static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata, mac_addr[0] = ha->addr[4]; mac_addr[1] = ha->addr[5]; - DBGPR(" adding unicast address %pM at 0x%04x\n", - ha->addr, mac_reg); + DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr, + *mac_reg); XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); + } - XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi); - mac_reg += MAC_MACA_INC; - XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo); - mac_reg += MAC_MACA_INC; + XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); + *mac_reg += MAC_MACA_INC; + XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); + *mac_reg += MAC_MACA_INC; +} - i++; - } +static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; + unsigned int mac_reg; + unsigned int addn_macs; + + mac_reg = MAC_MACA1HR; + addn_macs = pdata->hw_feat.addn_mac; - if (!am_mode) { - netdev_for_each_mc_addr(ha, pdata->netdev) { - mac_addr_lo = 0; - mac_addr_hi = 0; - mac_addr = (u8 *)&mac_addr_lo; - mac_addr[0] = ha->addr[0]; - mac_addr[1] = ha->addr[1]; - mac_addr[2] = ha->addr[2]; - mac_addr[3] = ha->addr[3]; - mac_addr = (u8 *)&mac_addr_hi; - mac_addr[0] = ha->addr[4]; - mac_addr[1] = ha->addr[5]; - - DBGPR(" adding multicast address %pM at 0x%04x\n", - ha->addr, mac_reg); - - XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); - - XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi); - mac_reg += MAC_MACA_INC; - XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo); - mac_reg += MAC_MACA_INC; - - i++; + if (netdev_uc_count(netdev) > addn_macs) { + xgbe_set_promiscuous_mode(pdata, 1); + } else { + netdev_for_each_uc_addr(ha, netdev) { + xgbe_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + + if (netdev_mc_count(netdev) > addn_macs) { + xgbe_set_all_multicast_mode(pdata, 1); + } else { + netdev_for_each_mc_addr(ha, netdev) { + xgbe_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } } } /* Clear remaining additional MAC address entries */ - for (; i < pdata->hw_feat.addn_mac; i++) { - XGMAC_IOWRITE(pdata, mac_reg, 0); - mac_reg += MAC_MACA_INC; - XGMAC_IOWRITE(pdata, mac_reg, 0); - mac_reg += MAC_MACA_INC; + while (addn_macs--) + xgbe_set_mac_reg(pdata, NULL, &mac_reg); +} + +static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; + unsigned int hash_reg; + unsigned int hash_table_shift, hash_table_count; + u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE]; + u32 crc; + unsigned int i; + + hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); + hash_table_count = pdata->hw_feat.hash_table_size / 32; + memset(hash_table, 0, sizeof(hash_table)); + + /* Build the MAC Hash Table register values */ + netdev_for_each_uc_addr(ha, netdev) { + crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); + crc >>= hash_table_shift; + hash_table[crc >> 5] |= (1 << (crc & 0x1f)); + } + + netdev_for_each_mc_addr(ha, netdev) { + crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); + crc >>= hash_table_shift; + hash_table[crc >> 5] |= (1 << (crc & 0x1f)); } + /* Set the MAC Hash Table registers */ + hash_reg = MAC_HTR0; + for (i = 0; i < hash_table_count; i++) { + XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]); + hash_reg += MAC_HTR_INC; + } +} + +static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) +{ + if (pdata->hw_feat.hash_table_size) + xgbe_set_mac_hash_table(pdata); + else + xgbe_set_mac_addn_addrs(pdata); + return 0; } @@ -1606,6 +1638,13 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) { xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); + + /* Filtering is done using perfect filtering and hash filtering */ + if (pdata->hw_feat.hash_table_size) { + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); + } } static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) @@ -2202,7 +2241,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode; hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode; - hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs; + hw_if->add_mac_addresses = xgbe_add_mac_addresses; hw_if->set_mac_address = xgbe_set_mac_address; hw_if->enable_rx_csum = xgbe_enable_rx_csum; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 2acc37c07d9b..72dd61166a1c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -378,6 +378,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); + /* Translate the Hash Table size into actual number */ + switch (hw_feat->hash_table_size) { + case 0: + break; + case 1: + hw_feat->hash_table_size = 64; + break; + case 2: + hw_feat->hash_table_size = 128; + break; + case 3: + hw_feat->hash_table_size = 256; + break; + } + /* The Queue and Channel counts are zero based so increment them * to get the actual number */ @@ -912,18 +927,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev) pr_mode = ((netdev->flags & IFF_PROMISC) != 0); am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); - if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac) - pr_mode = 1; - if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac) - am_mode = 1; - if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) > - pdata->hw_feat.addn_mac) - pr_mode = 1; - hw_if->set_promiscuous_mode(pdata, pr_mode); hw_if->set_all_multicast_mode(pdata, am_mode); - if (!pr_mode) - hw_if->set_addn_mac_addrs(pdata, am_mode); + + hw_if->add_mac_addresses(pdata); DBGPR("<--xgbe_set_rx_mode\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index c3a48b7c2aed..b411ac557c47 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -397,6 +397,8 @@ static int xgbe_probe(struct platform_device *pdev) netdev->features |= netdev->hw_features; pdata->netdev_features = netdev->features; + netdev->priv_flags |= IFF_UNICAST_FLT; + xgbe_init_rx_coalesce(pdata); xgbe_init_tx_coalesce(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 3cb911f39b60..a2d5f5f5d8b7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -191,6 +191,8 @@ /* Flow control queue count */ #define XGMAC_MAX_FLOW_CONTROL_QUEUES 8 +/* Maximum MAC address hash table size (256 bits = 8 bytes) */ +#define XGBE_MAC_HASH_TABLE_SIZE 8 struct xgbe_prv_data; @@ -387,7 +389,7 @@ struct xgbe_hw_if { int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int); int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int); - int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int); + int (*add_mac_addresses)(struct xgbe_prv_data *); int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr); int (*enable_rx_csum)(struct xgbe_prv_data *); -- cgit v1.2.1 From 66f95c35c413f674a835034dc667099b44225df2 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 24 Jun 2014 16:19:35 -0500 Subject: amd-xgbe: Resolve checkpatch warning about sscanf usage Checkpatch issued a warning preferring to use kstrto when using a single variable sscanf. Change the sscanf invocation to a kstrtouint call. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 6bb76d5c817b..81198587a6c6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -151,7 +151,7 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count, { char workarea[32]; ssize_t len; - unsigned int scan_value; + int ret; if (*ppos != 0) return 0; @@ -165,10 +165,9 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count, return len; workarea[len] = '\0'; - if (sscanf(workarea, "%x", &scan_value) == 1) - *value = scan_value; - else - return -EIO; + ret = kstrtouint(workarea, 0, value); + if (ret) + return ret; return len; } -- cgit v1.2.1 From f3f128d40c4cc263af8e30b009a3eb17655e912b Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 2 Jul 2014 13:04:28 -0500 Subject: amd-xgbe: Fix debugfs compatibility change with kstrtouint The initial change from sscanf to kstrtouint broke backward compatbility by using a base of "0" in the kstrtouint call. This allowed for entering decimal, hexadecimal or octal as input where previously the sscanf always interpreted the input as hexadecimal. Additionally, -EIO was returned on error prior to this change and now it is whatever the error value that is returned by kstrtouint. Change the base value of the kstrtouint from 0 to 16 and return -EIO on error. Signed-off-by: Tom Lendacky Reported-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 81198587a6c6..346592dca33c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -165,9 +165,9 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count, return len; workarea[len] = '\0'; - ret = kstrtouint(workarea, 0, value); + ret = kstrtouint(workarea, 16, value); if (ret) - return ret; + return -EIO; return len; } -- cgit v1.2.1 From 91f873453b70fe6cdb83409bee68e1023204c381 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 2 Jul 2014 13:04:34 -0500 Subject: amd-xgbe: Clear the proper MTL interrupt register When initializing the MTL interrupts the interrupt status register is written to instead of the interrupt enable register. Since no MTL interrupts are being enabled and the default state is for MTL interrupts to be disabled this did not cause a problem, but needs to be fixed to target the correct register. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index a56069c91fc4..e9fed23b2c33 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -486,7 +486,7 @@ static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); /* No MTL interrupts to be enabled */ - XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0); + XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); } } -- cgit v1.2.1 From ff42606eed00bc065365f55269d558c06b968594 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 2 Jul 2014 13:04:40 -0500 Subject: amd-xgbe: Call netif_napi_del on ndo_stop operation Currently the napi context is added using netif_napi_add each time the ndo_open operation is called. However, there is not a corresponding netif_napi_del call during the ndo_stop operation. If the device ndo_open operation was called more than once an infinite loop occurs during module unload. Add a call to netif_napi_del during the ndo_stop operation. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 72dd61166a1c..b5fdf66f9a56 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -412,9 +412,12 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) napi_enable(&pdata->napi); } -static void xgbe_napi_disable(struct xgbe_prv_data *pdata) +static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) { napi_disable(&pdata->napi); + + if (del) + netif_napi_del(&pdata->napi); } void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) @@ -518,7 +521,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) netif_device_detach(netdev); netif_tx_stop_all_queues(netdev); - xgbe_napi_disable(pdata); + xgbe_napi_disable(pdata, 0); /* Powerdown Tx/Rx */ hw_if->powerdown_tx(pdata); @@ -607,7 +610,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) phy_stop(pdata->phydev); netif_tx_stop_all_queues(netdev); - xgbe_napi_disable(pdata); + xgbe_napi_disable(pdata, 1); xgbe_stop_tx_timers(pdata); -- cgit v1.2.1 From 9867e8fb2c45888cc594457914dcbba599f086c8 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 2 Jul 2014 13:04:46 -0500 Subject: amd-xgbe: Performance enhancements This patch provides some general performance enhancements for the driver: - Modify the default coalescing settings (reduce usec, increase frames) - Change the AXI burst length to 256 bytes (default was 16 bytes which was smaller than a cache line) - Change the AXI cache settings to write-back/write-allocate which allocate cache entries for received packets during the DMA since the packet will be processed soon afterwards - Combine ioread/iowrite when disabling both the Tx and Rx interrupts - Change to processing the Tx/Rx channels in pairs - Only recycle the Rx descriptors when a threshold of dirty descriptors is reached Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 + drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 73 +++++++++++------------ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 91 ++++++++++++++++++----------- drivers/net/ethernet/amd/xgbe/xgbe.h | 12 ++-- 4 files changed, 100 insertions(+), 78 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index ccbceba553e5..7ec80ac7043f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -170,6 +170,8 @@ #define DMA_MR_SWR_WIDTH 1 #define DMA_SBMR_EAME_INDEX 11 #define DMA_SBMR_EAME_WIDTH 1 +#define DMA_SBMR_BLEN_256_INDEX 7 +#define DMA_SBMR_BLEN_256_WIDTH 1 #define DMA_SBMR_UNDEF_INDEX 0 #define DMA_SBMR_UNDEF_WIDTH 1 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index e9fed23b2c33..d6a45ced8adb 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1306,56 +1306,48 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); } -static void xgbe_save_interrupt_status(struct xgbe_channel *channel, - enum xgbe_int_state int_state) +static int xgbe_enable_int(struct xgbe_channel *channel, + enum xgbe_int int_id) { unsigned int dma_ch_ier; - if (int_state == XGMAC_INT_STATE_SAVE) { - channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - channel->saved_ier &= XGBE_DMA_INTERRUPT_MASK; - } else { - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - dma_ch_ier |= channel->saved_ier; - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); - } -} + dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); -static int xgbe_enable_int(struct xgbe_channel *channel, - enum xgbe_int int_id) -{ switch (int_id) { - case XGMAC_INT_DMA_ISR_DC0IS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1); - break; case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); + break; + case XGMAC_INT_DMA_CH_SR_TI_RI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); break; case XGMAC_INT_DMA_ALL: - xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE); + dma_ch_ier |= channel->saved_ier; break; default: return -1; } + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + return 0; } @@ -1364,42 +1356,44 @@ static int xgbe_disable_int(struct xgbe_channel *channel, { unsigned int dma_ch_ier; + dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); + switch (int_id) { - case XGMAC_INT_DMA_ISR_DC0IS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0); - break; case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); + break; + case XGMAC_INT_DMA_CH_SR_TI_RI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); break; case XGMAC_INT_DMA_ALL: - xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE); - - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); + channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); break; default: return -1; } + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + return 0; } @@ -1453,6 +1447,7 @@ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) /* Set the System Bus mode */ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); + XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1); } static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index b5fdf66f9a56..344e6b19ec0e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -156,16 +156,21 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel; + enum xgbe_int int_id; unsigned int i; channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { - if (channel->tx_ring) - hw_if->enable_int(channel, - XGMAC_INT_DMA_CH_SR_TI); - if (channel->rx_ring) - hw_if->enable_int(channel, - XGMAC_INT_DMA_CH_SR_RI); + if (channel->tx_ring && channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_RI; + else + continue; + + hw_if->enable_int(channel, int_id); } } @@ -173,16 +178,21 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel; + enum xgbe_int int_id; unsigned int i; channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { - if (channel->tx_ring) - hw_if->disable_int(channel, - XGMAC_INT_DMA_CH_SR_TI); - if (channel->rx_ring) - hw_if->disable_int(channel, - XGMAC_INT_DMA_CH_SR_RI); + if (channel->tx_ring && channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_RI; + else + continue; + + hw_if->disable_int(channel, int_id); } } @@ -1114,6 +1124,22 @@ struct net_device_ops *xgbe_get_netdev_ops(void) return (struct net_device_ops *)&xgbe_netdev_ops; } +static void xgbe_rx_refresh(struct xgbe_channel *channel) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + + desc_if->realloc_skb(channel); + + /* Update the Rx Tail Pointer Register with address of + * the last cleaned entry */ + rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1); + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, + lower_32_bits(rdata->rdesc_dma)); +} + static int xgbe_tx_poll(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; @@ -1171,7 +1197,6 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; @@ -1198,6 +1223,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) cur_len = 0; read_again: + if (ring->dirty > (XGBE_RX_DESC_CNT >> 3)) + xgbe_rx_refresh(channel); + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); if (hw_if->dev_read(channel)) @@ -1285,16 +1313,6 @@ read_again: napi_gro_receive(&pdata->napi, skb); } - if (received) { - desc_if->realloc_skb(channel); - - /* Update the Rx Tail Pointer Register with address of - * the last cleaned entry */ - rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1); - XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, - lower_32_bits(rdata->rdesc_dma)); - } - DBGPR("<--xgbe_rx_poll: received = %d\n", received); return received; @@ -1305,21 +1323,28 @@ static int xgbe_poll(struct napi_struct *napi, int budget) struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, napi); struct xgbe_channel *channel; - int processed; + int ring_budget; + int processed, last_processed; unsigned int i; DBGPR("-->xgbe_poll: budget=%d\n", budget); - /* Cleanup Tx ring first */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - xgbe_tx_poll(channel); - - /* Process Rx ring next */ processed = 0; - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - processed += xgbe_rx_poll(channel, budget - processed); + ring_budget = budget / pdata->rx_ring_count; + do { + last_processed = processed; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + /* Cleanup Tx ring first */ + xgbe_tx_poll(channel); + + /* Process Rx ring next */ + if (ring_budget > (budget - processed)) + ring_budget = budget - processed; + processed += xgbe_rx_poll(channel, ring_budget); + } + } while ((processed < budget) && (processed != last_processed)); /* If we processed everything, we are done */ if (processed < budget) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index a2d5f5f5d8b7..eef8ea1dee47 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -146,7 +146,7 @@ #define XGBE_DMA_ARDOMAIN 0x2 #define XGBE_DMA_ARCACHE 0xb #define XGBE_DMA_AWDOMAIN 0x2 -#define XGBE_DMA_AWCACHE 0x7 +#define XGBE_DMA_AWCACHE 0xf #define XGBE_DMA_INTERRUPT_MASK 0x31c7 @@ -181,12 +181,12 @@ /* Default coalescing parameters */ -#define XGMAC_INIT_DMA_TX_USECS 100 -#define XGMAC_INIT_DMA_TX_FRAMES 16 +#define XGMAC_INIT_DMA_TX_USECS 50 +#define XGMAC_INIT_DMA_TX_FRAMES 25 #define XGMAC_MAX_DMA_RIWT 0xff -#define XGMAC_INIT_DMA_RX_USECS 100 -#define XGMAC_INIT_DMA_RX_FRAMES 16 +#define XGMAC_INIT_DMA_RX_USECS 30 +#define XGMAC_INIT_DMA_RX_FRAMES 25 /* Flow control queue count */ #define XGMAC_MAX_FLOW_CONTROL_QUEUES 8 @@ -307,13 +307,13 @@ struct xgbe_channel { } ____cacheline_aligned; enum xgbe_int { - XGMAC_INT_DMA_ISR_DC0IS, XGMAC_INT_DMA_CH_SR_TI, XGMAC_INT_DMA_CH_SR_TPS, XGMAC_INT_DMA_CH_SR_TBU, XGMAC_INT_DMA_CH_SR_RI, XGMAC_INT_DMA_CH_SR_RBU, XGMAC_INT_DMA_CH_SR_RPS, + XGMAC_INT_DMA_CH_SR_TI_RI, XGMAC_INT_DMA_CH_SR_FBE, XGMAC_INT_DMA_ALL, }; -- cgit v1.2.1 From cfa50c7811fe3857fd882be96b2b2f130fa5da6d Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 2 Jul 2014 13:04:57 -0500 Subject: amd-xgbe: Base AXI DMA cache settings on device tree The default cache operations for ARM64 were changed during 3.15. To use coherent operations a "dma-coherent" device tree property is required. If that property is not present in the device tree node then the non-coherent operations are assigned for the device. Add support to the amd-xgbe driver to assign the AXI DMA cache settings based on whether the "dma-coherent" property is present in the device node. If present, use settings that work with the caches. If not present, use settings that do not look at the caches. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 28 ++++++++++++++-------------- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 10 ++++++++++ drivers/net/ethernet/amd/xgbe/xgbe.h | 17 +++++++++++++---- 3 files changed, 37 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index d6a45ced8adb..699cff5d3184 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1455,23 +1455,23 @@ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) unsigned int arcache, awcache; arcache = 0; - XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, XGBE_DMA_ARCACHE); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, XGBE_DMA_ARDOMAIN); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, XGBE_DMA_ARCACHE); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, XGBE_DMA_ARDOMAIN); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, XGBE_DMA_ARCACHE); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, XGBE_DMA_ARDOMAIN); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain); XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); awcache = 0; - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, XGBE_DMA_AWCACHE); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, XGBE_DMA_AWDOMAIN); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, XGBE_DMA_AWCACHE); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, XGBE_DMA_AWDOMAIN); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, XGBE_DMA_AWCACHE); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, XGBE_DMA_AWDOMAIN); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, XGBE_DMA_AWCACHE); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, XGBE_DMA_AWDOMAIN); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain); XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index b411ac557c47..d5a4f76e9474 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -297,6 +297,16 @@ static int xgbe_probe(struct platform_device *pdev) *(dev->dma_mask) = DMA_BIT_MASK(40); dev->coherent_dma_mask = DMA_BIT_MASK(40); + if (of_property_read_bool(dev->of_node, "dma-coherent")) { + pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; + pdata->arcache = XGBE_DMA_OS_ARCACHE; + pdata->awcache = XGBE_DMA_OS_AWCACHE; + } else { + pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; + pdata->arcache = XGBE_DMA_SYS_ARCACHE; + pdata->awcache = XGBE_DMA_SYS_AWCACHE; + } + ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "platform_get_irq failed\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index eef8ea1dee47..9e24b296e272 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -143,10 +143,14 @@ #define XGBE_MAX_DMA_CHANNELS 16 /* DMA cache settings - Outer sharable, write-back, write-allocate */ -#define XGBE_DMA_ARDOMAIN 0x2 -#define XGBE_DMA_ARCACHE 0xb -#define XGBE_DMA_AWDOMAIN 0x2 -#define XGBE_DMA_AWCACHE 0xf +#define XGBE_DMA_OS_AXDOMAIN 0x2 +#define XGBE_DMA_OS_ARCACHE 0xb +#define XGBE_DMA_OS_AWCACHE 0xf + +/* DMA cache settings - System, no caches used */ +#define XGBE_DMA_SYS_AXDOMAIN 0x3 +#define XGBE_DMA_SYS_ARCACHE 0x0 +#define XGBE_DMA_SYS_AWCACHE 0x0 #define XGBE_DMA_INTERRUPT_MASK 0x31c7 @@ -536,6 +540,11 @@ struct xgbe_prv_data { struct xgbe_hw_if hw_if; struct xgbe_desc_if desc_if; + /* AXI DMA settings */ + unsigned int axdomain; + unsigned int arcache; + unsigned int awcache; + /* Rings for Tx/Rx on a DMA channel */ struct xgbe_channel *channel; unsigned int channel_count; -- cgit v1.2.1 From 3d5baba0ecfdd5de35bb7ce41ef9218f2b17b006 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Thu, 3 Jul 2014 05:56:51 +0100 Subject: declance: Fix 64-bit compilation warnings This fixes compiler warnings: drivers/net/ethernet/amd/declance.c: In function 'lance_init_ring': drivers/net/ethernet/amd/declance.c:478: warning: format '%8.8x' expects type 'unsigned int', but argument 3 has type 'long unsigned int' drivers/net/ethernet/amd/declance.c:487: warning: format '%8.8x' expects type 'unsigned int', but argument 3 has type 'long unsigned int' drivers/net/ethernet/amd/declance.c:503: warning: cast from pointer to integer of different size drivers/net/ethernet/amd/declance.c:520: warning: cast from pointer to integer of different size in 64-bit compilation. Where the value printed is an offset (whose range will always fit) the cast uses a 32-bit type, otherwise, where it is a host memory address, the pointer is output directly with %p. Also the remaining `0x' prefix is dropped for consistency across these messages. Tested with both 32-bit and 64-bit compilation, as well as at the run time (with the debug messages affected enabled). Signed-off-by: Maciej W. Rozycki Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/declance.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 57397295887c..b584b78237df 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -475,7 +475,7 @@ static void lance_init_ring(struct net_device *dev) *lib_ptr(ib, rx_ptr, lp->type) = leptr; if (ZERO) printk("RX ptr: %8.8x(%8.8x)\n", - leptr, lib_off(brx_ring, lp->type)); + leptr, (uint)lib_off(brx_ring, lp->type)); /* Setup tx descriptor pointer */ leptr = offsetof(struct lance_init_block, btx_ring); @@ -484,7 +484,7 @@ static void lance_init_ring(struct net_device *dev) *lib_ptr(ib, tx_ptr, lp->type) = leptr; if (ZERO) printk("TX ptr: %8.8x(%8.8x)\n", - leptr, lib_off(btx_ring, lp->type)); + leptr, (uint)lib_off(btx_ring, lp->type)); if (ZERO) printk("TX rings:\n"); @@ -499,8 +499,8 @@ static void lance_init_ring(struct net_device *dev) /* The ones required by tmd2 */ *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0; if (i < 3 && ZERO) - printk("%d: 0x%8.8x(0x%8.8x)\n", - i, leptr, (uint)lp->tx_buf_ptr_cpu[i]); + printk("%d: %8.8x(%p)\n", + i, leptr, lp->tx_buf_ptr_cpu[i]); } /* Setup the Rx ring entries */ @@ -516,8 +516,8 @@ static void lance_init_ring(struct net_device *dev) 0xf000; *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0; if (i < 3 && ZERO) - printk("%d: 0x%8.8x(0x%8.8x)\n", - i, leptr, (uint)lp->rx_buf_ptr_cpu[i]); + printk("%d: %8.8x(%p)\n", + i, leptr, lp->rx_buf_ptr_cpu[i]); } iob(); } -- cgit v1.2.1 From 43519e60ef8020e7b4f99d20b6663fab50af21dc Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Mon, 14 Jul 2014 14:09:04 +0530 Subject: ethernet: amd: move amd111e_remove_one after probe This patch moves the remove functionalities after the probe so that we can see the registered and released resources properly. Every driver follows the same concept. Signed-off-by: Varka Bhadram Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 068dc7cad5fa..ddd09e830527 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1701,18 +1701,6 @@ static int amd8111e_resume(struct pci_dev *pci_dev) return 0; } - -static void amd8111e_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - if (dev) { - unregister_netdev(dev); - iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio); - free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - } -} static void amd8111e_config_ipg(struct net_device* dev) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -1970,6 +1958,19 @@ err_disable_pdev: } +static void amd8111e_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + unregister_netdev(dev); + iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + static struct pci_driver amd8111e_driver = { .name = MODULE_NAME, .id_table = amd8111e_pci_tbl, -- cgit v1.2.1 From 711fec5d22e15f24b11b6c7f79cc39cb96f52b13 Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Mon, 14 Jul 2014 14:09:05 +0530 Subject: ethernet: amd: use devm_ioremap() This patch replace ioremap() with the devm_ioremap() so that the resource will be freed automatically with the probe failed. Signed-off-by: Varka Bhadram Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index ddd09e830527..433107c3cd2b 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1866,7 +1866,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, spin_lock_init(&lp->lock); - lp->mmio = ioremap(reg_addr, reg_len); + lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len); if (!lp->mmio) { printk(KERN_ERR "amd8111e: Cannot map device registers, " "exiting\n"); @@ -1913,7 +1913,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, if (err) { printk(KERN_ERR "amd8111e: Cannot register net device, " "exiting.\n"); - goto err_iounmap; + goto err_free_dev; } pci_set_drvdata(pdev, dev); @@ -1943,8 +1943,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev, printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n", dev->name); return 0; -err_iounmap: - iounmap(lp->mmio); err_free_dev: free_netdev(dev); @@ -1964,7 +1962,6 @@ static void amd8111e_remove_one(struct pci_dev *pdev) if (dev) { unregister_netdev(dev); - iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); -- cgit v1.2.1 From f7afbaa557337d059603ef8c3519831c9e0d1591 Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Mon, 14 Jul 2014 14:09:06 +0530 Subject: ethernet: amd: dynamic debug fixes This patch convert printk() to netdev_dbg/info/err or dev_info/err/dbg Signed-off-by: Varka Bhadram Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 58 +++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 433107c3cd2b..c322aa77ad1c 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -501,8 +501,7 @@ static int amd8111e_restart(struct net_device *dev) /* Enable interrupt coalesce */ if(lp->options & OPTION_INTR_COAL_ENABLE){ - printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n", - dev->name); + netdev_info(dev, "Interrupt Coalescing Enabled.\n"); amd8111e_set_coalesce(dev,ENABLE_COAL); } @@ -860,16 +859,19 @@ static int amd8111e_link_change(struct net_device* dev) else if(speed == PHY_SPEED_100) lp->link_config.speed = SPEED_100; - printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name, - (lp->link_config.speed == SPEED_100) ? "100": "10", - (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half"); + netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n", + (lp->link_config.speed == SPEED_100) ? + "100" : "10", + (lp->link_config.duplex == DUPLEX_FULL) ? + "Full" : "Half"); + netif_carrier_on(dev); } else{ lp->link_config.speed = SPEED_INVALID; lp->link_config.duplex = DUPLEX_INVALID; lp->link_config.autoneg = AUTONEG_INVALID; - printk(KERN_INFO "%s: Link is Down.\n",dev->name); + netdev_info(dev, "Link is Down.\n"); netif_carrier_off(dev); } @@ -1168,7 +1170,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) /* Schedule a polling routine */ __napi_schedule(&lp->napi); } else if (intren0 & RINTEN0) { - printk("************Driver bug! interrupt while in poll\n"); + netdev_dbg(dev, "************Driver bug! interrupt while in poll\n"); /* Fix by disable receive interrupts */ writel(RINTEN0, mmio + INTEN0); } @@ -1264,7 +1266,7 @@ static int amd8111e_open(struct net_device * dev ) /* Start ipg timer */ if(lp->options & OPTION_DYN_IPG_ENABLE){ add_timer(&lp->ipg_data.ipg_timer); - printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name); + netdev_info(dev, "Dynamic IPG Enabled\n"); } lp->opened = 1; @@ -1623,8 +1625,8 @@ static void amd8111e_tx_timeout(struct net_device *dev) struct amd8111e_priv* lp = netdev_priv(dev); int err; - printk(KERN_ERR "%s: transmit timed out, resetting\n", - dev->name); + netdev_err(dev, "transmit timed out, resetting\n"); + spin_lock_irq(&lp->lock); err = amd8111e_restart(dev); spin_unlock_irq(&lp->lock); @@ -1807,22 +1809,19 @@ static int amd8111e_probe_one(struct pci_dev *pdev, err = pci_enable_device(pdev); if(err){ - printk(KERN_ERR "amd8111e: Cannot enable new PCI device, " - "exiting.\n"); + dev_err(&pdev->dev, "Cannot enable new PCI device\n"); return err; } if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){ - printk(KERN_ERR "amd8111e: Cannot find PCI base address, " - "exiting.\n"); + dev_err(&pdev->dev, "Cannot find PCI base address\n"); err = -ENODEV; goto err_disable_pdev; } err = pci_request_regions(pdev, MODULE_NAME); if(err){ - printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, " - "exiting.\n"); + dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); goto err_disable_pdev; } @@ -1830,16 +1829,14 @@ static int amd8111e_probe_one(struct pci_dev *pdev, /* Find power-management capability. */ if (!pdev->pm_cap) { - printk(KERN_ERR "amd8111e: No Power Management capability, " - "exiting.\n"); + dev_err(&pdev->dev, "No Power Management capability\n"); err = -ENODEV; goto err_free_reg; } /* Initialize DMA */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { - printk(KERN_ERR "amd8111e: DMA not supported," - "exiting.\n"); + dev_err(&pdev->dev, "DMA not supported\n"); err = -ENODEV; goto err_free_reg; } @@ -1868,8 +1865,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len); if (!lp->mmio) { - printk(KERN_ERR "amd8111e: Cannot map device registers, " - "exiting\n"); + dev_err(&pdev->dev, "Cannot map device registers\n"); err = -ENOMEM; goto err_free_dev; } @@ -1911,8 +1907,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, err = register_netdev(dev); if (err) { - printk(KERN_ERR "amd8111e: Cannot register net device, " - "exiting.\n"); + dev_err(&pdev->dev, "Cannot register net device\n"); goto err_free_dev; } @@ -1932,16 +1927,15 @@ static int amd8111e_probe_one(struct pci_dev *pdev, /* display driver and device information */ chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; - printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", - dev->name,MODULE_VERS); - printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", - dev->name, chip_version, dev->dev_addr); + dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS); + dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", + chip_version, dev->dev_addr); if (lp->ext_phy_id) - printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n", - dev->name, lp->ext_phy_id, lp->ext_phy_addr); + dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n", + lp->ext_phy_id, lp->ext_phy_addr); else - printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n", - dev->name); + dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n"); + return 0; err_free_dev: -- cgit v1.2.1 From 13a4fa43bff03b745904fb1c6baaca42dd0fbf2c Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Mon, 14 Jul 2014 14:09:07 +0530 Subject: ethernet: amd: fix comment styles This patch fixes the comment style issues Signed-off-by: Varka Bhadram Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 181 +++++++++++++++++------------------- 1 file changed, 86 insertions(+), 95 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index c322aa77ad1c..a8f5c496c6cf 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -116,9 +116,8 @@ static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = { { 0, } }; -/* -This function will read the PHY registers. -*/ + +/* This function will read the PHY registers. */ static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val) { void __iomem *mmio = lp->mmio; @@ -146,9 +145,7 @@ err_phy_read: } -/* -This function will write into PHY registers. -*/ +/* This function will write into PHY registers. */ static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val) { unsigned int repeat = REPEAT_CNT; @@ -176,9 +173,8 @@ err_phy_write: return -EINVAL; } -/* -This is the mii register read function provided to the mii interface. -*/ + +/* This is the mii register read function provided to the mii interface. */ static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num) { struct amd8111e_priv* lp = netdev_priv(dev); @@ -189,9 +185,7 @@ static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num) } -/* -This is the mii register write function provided to the mii interface. -*/ +/* This is the mii register write function provided to the mii interface. */ static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val) { struct amd8111e_priv* lp = netdev_priv(dev); @@ -199,9 +193,9 @@ static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num amd8111e_write_phy(lp, phy_id, reg_num, val); } -/* -This function will set PHY speed. During initialization sets the original speed to 100 full. -*/ +/* This function will set PHY speed. During initialization sets + * the original speed to 100 full + */ static void amd8111e_set_ext_phy(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -240,10 +234,9 @@ static void amd8111e_set_ext_phy(struct net_device *dev) } -/* -This function will unmap skb->data space and will free -all transmit and receive skbuffs. -*/ +/* This function will unmap skb->data space and will free + * all transmit and receive skbuffs. + */ static int amd8111e_free_skbs(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -274,9 +267,9 @@ static int amd8111e_free_skbs(struct net_device *dev) return 0; } -/* -This will set the receive buffer length corresponding to the mtu size of networkinterface. -*/ +/* This will set the receive buffer length corresponding + * to the mtu size of networkinterface. + */ static inline void amd8111e_set_rx_buff_len(struct net_device* dev) { struct amd8111e_priv* lp = netdev_priv(dev); @@ -284,8 +277,8 @@ static inline void amd8111e_set_rx_buff_len(struct net_device* dev) if (mtu > ETH_DATA_LEN){ /* MTU + ethernet header + FCS - + optional VLAN tag + skb reserve space 2 */ - + * + optional VLAN tag + skb reserve space 2 + */ lp->rx_buff_len = mtu + ETH_HLEN + 10; lp->options |= OPTION_JUMBO_ENABLE; } else{ @@ -294,8 +287,10 @@ static inline void amd8111e_set_rx_buff_len(struct net_device* dev) } } -/* -This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors. +/* This function will free all the previously allocated buffers, + * determine new receive buffer length and will allocate new receive buffers. + * This function also allocates and initializes both the transmitter + * and receive hardware descriptors. */ static int amd8111e_init_ring(struct net_device *dev) { @@ -376,7 +371,10 @@ err_free_tx_ring: err_no_mem: return -ENOMEM; } -/* This function will set the interrupt coalescing according to the input arguments */ + +/* This function will set the interrupt coalescing according + * to the input arguments + */ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod) { unsigned int timeout; @@ -435,9 +433,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod) } -/* -This function initializes the device registers and starts the device. -*/ +/* This function initializes the device registers and starts the device. */ static int amd8111e_restart(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -513,9 +509,8 @@ static int amd8111e_restart(struct net_device *dev) readl(mmio+CMD0); return 0; } -/* -This function clears necessary the device registers. -*/ + +/* This function clears necessary the device registers. */ static void amd8111e_init_hw_default( struct amd8111e_priv* lp) { unsigned int reg_val; @@ -604,9 +599,8 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp) } -/* -This function disables the interrupt and clears all the pending -interrupts in INT0 +/* This function disables the interrupt and clears all the pending + * interrupts in INT0 */ static void amd8111e_disable_interrupt(struct amd8111e_priv* lp) { @@ -624,9 +618,7 @@ static void amd8111e_disable_interrupt(struct amd8111e_priv* lp) } -/* -This function stops the chip. -*/ +/* This function stops the chip. */ static void amd8111e_stop_chip(struct amd8111e_priv* lp) { writel(RUN, lp->mmio + CMD0); @@ -635,9 +627,7 @@ static void amd8111e_stop_chip(struct amd8111e_priv* lp) readl(lp->mmio + CMD0); } -/* -This function frees the transmiter and receiver descriptor rings. -*/ +/* This function frees the transmiter and receiver descriptor rings. */ static void amd8111e_free_ring(struct amd8111e_priv* lp) { /* Free transmit and receive descriptor rings */ @@ -658,9 +648,10 @@ static void amd8111e_free_ring(struct amd8111e_priv* lp) } -/* -This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb. -*/ +/* This function will free all the transmit skbs that are actually + * transmitted by the device. It will check the ownership of the + * skb before freeing the skb. + */ static int amd8111e_tx(struct net_device *dev) { struct amd8111e_priv* lp = netdev_priv(dev); @@ -723,21 +714,20 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) goto rx_not_empty; do{ - /* process receive packets until we use the quota*/ - /* If we own the next entry, it's a new packet. Send it up. */ + /* process receive packets until we use the quota. + * If we own the next entry, it's a new packet. Send it up. + */ while(1) { status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); if (status & OWN_BIT) break; - /* - * There is a tricky error noted by John Murphy, + /* There is a tricky error noted by John Murphy, * to Russ Nelson: Even with * full-sized * buffers it's possible for a * jabber packet to use two buffers, with only * the last correctly noting the error. */ - if(status & ERR_BIT) { /* reseting flags */ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; @@ -770,7 +760,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); if (!new_skb) { /* if allocation fail, - ignore that pkt and go to next one */ + * ignore that pkt and go to next one + */ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; lp->drv_rx_errors++; goto err_next_pkt; @@ -811,8 +802,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; } /* Check the interrupt status register for more packets in the - mean time. Process them since we have not used up our quota.*/ - + * mean time. Process them since we have not used up our quota. + */ intr0 = readl(mmio + INT0); /*Ack receive packets */ writel(intr0 & RINT0,mmio + INT0); @@ -832,9 +823,7 @@ rx_not_empty: return num_rx_pkt; } -/* -This function will indicate the link status to the kernel. -*/ +/* This function will indicate the link status to the kernel. */ static int amd8111e_link_change(struct net_device* dev) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -877,9 +866,8 @@ static int amd8111e_link_change(struct net_device* dev) return 0; } -/* -This function reads the mib counters. -*/ + +/* This function reads the mib counters. */ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER) { unsigned int status; @@ -897,8 +885,7 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER) return data; } -/* - * This function reads the mib registers and returns the hardware statistics. +/* This function reads the mib registers and returns the hardware statistics. * It updates previous internal driver statistics with new values. */ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) @@ -994,9 +981,10 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) return new_stats; } + /* This function recalculate the interrupt coalescing mode on every interrupt -according to the datarate and the packet rate. -*/ + * according to the datarate and the packet rate. + */ static int amd8111e_calc_coalesce(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -1128,9 +1116,10 @@ static int amd8111e_calc_coalesce(struct net_device *dev) return 0; } -/* -This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts. -*/ + +/* This is device interrupt function. It handles transmit, + * receive,link change and hardware timer interrupts. + */ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) { @@ -1207,9 +1196,10 @@ static void amd8111e_poll(struct net_device *dev) #endif -/* -This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down. -*/ +/* This function closes the network interface and updates + * the statistics so that most recent statistics will be + * available after the interface is down. + */ static int amd8111e_close(struct net_device * dev) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -1240,8 +1230,10 @@ static int amd8111e_close(struct net_device * dev) lp->opened = 0; return 0; } -/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device. -*/ + +/* This function opens new interface.It requests irq for the device, + * initializes the device,buffers and descriptors, and starts the device. + */ static int amd8111e_open(struct net_device * dev ) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -1277,9 +1269,10 @@ static int amd8111e_open(struct net_device * dev ) return 0; } -/* -This function checks if there is any transmit descriptors available to queue more packet. -*/ + +/* This function checks if there is any transmit descriptors + * available to queue more packet. + */ static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp ) { int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK; @@ -1289,10 +1282,12 @@ static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp ) return 0; } -/* -This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc. -*/ +/* This function will queue the transmit packets to the + * descriptors and will trigger the send operation. It also + * initializes the transmit descriptors with buffer physical address, + * byte count, ownership to hardware etc. + */ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev) { @@ -1340,9 +1335,7 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } -/* -This function returns all the memory mapped registers of the device. -*/ +/* This function returns all the memory mapped registers of the device. */ static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf) { void __iomem *mmio = lp->mmio; @@ -1363,10 +1356,9 @@ static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf) } -/* -This function sets promiscuos mode, all-multi mode or the multicast address -list to the device. -*/ +/* This function sets promiscuos mode, all-multi mode or the multicast address + * list to the device. + */ static void amd8111e_set_multicast_list(struct net_device *dev) { struct netdev_hw_addr *ha; @@ -1503,10 +1495,10 @@ static const struct ethtool_ops ops = { .set_wol = amd8111e_set_wol, }; -/* -This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application. -*/ - +/* This function handles all the ethtool ioctls. It gives driver info, + * gets/sets driver speed, gets memory mapped register values, forces + * auto negotiation, sets/gets WOL options for ethtool application. + */ static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); @@ -1561,9 +1553,9 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p) return 0; } -/* -This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers. -*/ +/* This function changes the mtu of the device. It restarts the device to + * initialize the descriptor with new receive buffers. + */ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -1574,7 +1566,8 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) if (!netif_running(dev)) { /* new_mtu will be used - when device starts netxt time */ + * when device starts netxt time + */ dev->mtu = new_mtu; return 0; } @@ -1614,8 +1607,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv* lp) return 0; } -/* - * This function is called when a packet transmission fails to complete +/* This function is called when a packet transmission fails to complete * within a reasonable period, on the assumption that an interrupt have * failed or the interface is locked up. This function will reinitialize * the hardware. @@ -1925,7 +1917,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev, } /* display driver and device information */ - chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS); dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", -- cgit v1.2.1 From ba69a3d78e4f51e65933a86b8b107c86709bb2f5 Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Mon, 14 Jul 2014 14:09:08 +0530 Subject: ethernet: amd: fix pci device ids Normally any device ids will be above the corresponding device driver structure. This patch moves the pci device ids and MODULE_DEVICE_TABLE() above the pci driver structure. Signed-off-by: Varka Bhadram Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index a8f5c496c6cf..9f8a15e3b8bb 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -101,7 +101,6 @@ Revision History: MODULE_AUTHOR("Advanced Micro Devices, Inc."); MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS); MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); module_param_array(speed_duplex, int, NULL, 0); MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); module_param_array(coalesce, bool, NULL, 0); @@ -109,14 +108,6 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0 module_param_array(dynamic_ipg, bool, NULL, 0); MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); -static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = { - - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { 0, } - -}; - /* This function will read the PHY registers. */ static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val) { @@ -1953,6 +1944,17 @@ static void amd8111e_remove_one(struct pci_dev *pdev) } } +static const struct pci_device_id amd8111e_pci_tbl[] = { + { + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD8111E_7462, + }, + { + .vendor = 0, + } +}; +MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); + static struct pci_driver amd8111e_driver = { .name = MODULE_NAME, .id_table = amd8111e_pci_tbl, -- cgit v1.2.1 From 46c73ecc6168586c7628b87ac1d0436eca9574dd Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Mon, 14 Jul 2014 14:09:09 +0530 Subject: ethernet: amd: fix 'foo* bar' This patch fix the 'foo*' bar with 'foo *bar' and (foo*) with (foo *). Signed-off-by: Varka Bhadram Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 76 +++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 36 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 9f8a15e3b8bb..841e6558db68 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -109,7 +109,8 @@ module_param_array(dynamic_ipg, bool, NULL, 0); MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); /* This function will read the PHY registers. */ -static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val) +static int amd8111e_read_phy(struct amd8111e_priv *lp, + int phy_id, int reg, u32 *val) { void __iomem *mmio = lp->mmio; unsigned int reg_val; @@ -137,7 +138,8 @@ err_phy_read: } /* This function will write into PHY registers. */ -static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val) +static int amd8111e_write_phy(struct amd8111e_priv *lp, + int phy_id, int reg, u32 val) { unsigned int repeat = REPEAT_CNT; void __iomem *mmio = lp->mmio; @@ -166,9 +168,9 @@ err_phy_write: } /* This is the mii register read function provided to the mii interface. */ -static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num) +static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num) { - struct amd8111e_priv* lp = netdev_priv(dev); + struct amd8111e_priv *lp = netdev_priv(dev); unsigned int reg_val; amd8111e_read_phy(lp,phy_id,reg_num,®_val); @@ -177,9 +179,10 @@ static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num) } /* This is the mii register write function provided to the mii interface. */ -static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val) +static void amd8111e_mdio_write(struct net_device *dev, + int phy_id, int reg_num, int val) { - struct amd8111e_priv* lp = netdev_priv(dev); + struct amd8111e_priv *lp = netdev_priv(dev); amd8111e_write_phy(lp, phy_id, reg_num, val); } @@ -231,7 +234,7 @@ static void amd8111e_set_ext_phy(struct net_device *dev) static int amd8111e_free_skbs(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); - struct sk_buff* rx_skbuff; + struct sk_buff *rx_skbuff; int i; /* Freeing transmit skbs */ @@ -261,9 +264,9 @@ static int amd8111e_free_skbs(struct net_device *dev) /* This will set the receive buffer length corresponding * to the mtu size of networkinterface. */ -static inline void amd8111e_set_rx_buff_len(struct net_device* dev) +static inline void amd8111e_set_rx_buff_len(struct net_device *dev) { - struct amd8111e_priv* lp = netdev_priv(dev); + struct amd8111e_priv *lp = netdev_priv(dev); unsigned int mtu = dev->mtu; if (mtu > ETH_DATA_LEN){ @@ -366,14 +369,14 @@ err_no_mem: /* This function will set the interrupt coalescing according * to the input arguments */ -static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod) +static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod) { unsigned int timeout; unsigned int event_count; struct amd8111e_priv *lp = netdev_priv(dev); void __iomem *mmio = lp->mmio; - struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf; + struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf; switch(cmod) @@ -502,7 +505,7 @@ static int amd8111e_restart(struct net_device *dev) } /* This function clears necessary the device registers. */ -static void amd8111e_init_hw_default( struct amd8111e_priv* lp) +static void amd8111e_init_hw_default(struct amd8111e_priv *lp) { unsigned int reg_val; unsigned int logic_filter[2] ={0,}; @@ -572,7 +575,7 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp) writew(MIB_CLEAR, mmio + MIB_ADDR); /* Clear LARF */ - amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF); + amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF); /* SRAM_SIZE register */ reg_val = readl(mmio + SRAM_SIZE); @@ -593,7 +596,7 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp) /* This function disables the interrupt and clears all the pending * interrupts in INT0 */ -static void amd8111e_disable_interrupt(struct amd8111e_priv* lp) +static void amd8111e_disable_interrupt(struct amd8111e_priv *lp) { u32 intr0; @@ -610,7 +613,7 @@ static void amd8111e_disable_interrupt(struct amd8111e_priv* lp) } /* This function stops the chip. */ -static void amd8111e_stop_chip(struct amd8111e_priv* lp) +static void amd8111e_stop_chip(struct amd8111e_priv *lp) { writel(RUN, lp->mmio + CMD0); @@ -619,7 +622,7 @@ static void amd8111e_stop_chip(struct amd8111e_priv* lp) } /* This function frees the transmiter and receiver descriptor rings. */ -static void amd8111e_free_ring(struct amd8111e_priv* lp) +static void amd8111e_free_ring(struct amd8111e_priv *lp) { /* Free transmit and receive descriptor rings */ if(lp->rx_ring){ @@ -645,7 +648,7 @@ static void amd8111e_free_ring(struct amd8111e_priv* lp) */ static int amd8111e_tx(struct net_device *dev) { - struct amd8111e_priv* lp = netdev_priv(dev); + struct amd8111e_priv *lp = netdev_priv(dev); int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; int status; /* Complete all the transmit packet */ @@ -815,7 +818,7 @@ rx_not_empty: } /* This function will indicate the link status to the kernel. */ -static int amd8111e_link_change(struct net_device* dev) +static int amd8111e_link_change(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); int status0,speed; @@ -979,7 +982,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) static int amd8111e_calc_coalesce(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); - struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf; + struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf; int tx_pkt_rate; int rx_pkt_rate; int tx_data_rate; @@ -1114,7 +1117,7 @@ static int amd8111e_calc_coalesce(struct net_device *dev) static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) { - struct net_device * dev = (struct net_device *) dev_id; + struct net_device *dev = (struct net_device *)dev_id; struct amd8111e_priv *lp = netdev_priv(dev); void __iomem *mmio = lp->mmio; unsigned int intr0, intren0; @@ -1191,7 +1194,7 @@ static void amd8111e_poll(struct net_device *dev) * the statistics so that most recent statistics will be * available after the interface is down. */ -static int amd8111e_close(struct net_device * dev) +static int amd8111e_close(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); netif_stop_queue(dev); @@ -1225,7 +1228,7 @@ static int amd8111e_close(struct net_device * dev) /* This function opens new interface.It requests irq for the device, * initializes the device,buffers and descriptors, and starts the device. */ -static int amd8111e_open(struct net_device * dev ) +static int amd8111e_open(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); @@ -1264,7 +1267,7 @@ static int amd8111e_open(struct net_device * dev ) /* This function checks if there is any transmit descriptors * available to queue more packet. */ -static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp ) +static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp) { int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK; if (lp->tx_skbuff[tx_index]) @@ -1280,7 +1283,7 @@ static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp ) * byte count, ownership to hardware etc. */ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, - struct net_device * dev) + struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); int tx_index; @@ -1368,14 +1371,14 @@ static void amd8111e_set_multicast_list(struct net_device *dev) /* get all multicast packet */ mc_filter[1] = mc_filter[0] = 0xffffffff; lp->options |= OPTION_MULTICAST_ENABLE; - amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF); + amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); return; } if (netdev_mc_empty(dev)) { /* get only own packets */ mc_filter[1] = mc_filter[0] = 0; lp->options &= ~OPTION_MULTICAST_ENABLE; - amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF); + amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); /* disable promiscuous mode */ writel(PROM, lp->mmio + CMD2); return; @@ -1387,14 +1390,15 @@ static void amd8111e_set_multicast_list(struct net_device *dev) bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f; mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); } - amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); + amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); /* To eliminate PCI posting bug */ readl(lp->mmio + CMD2); } -static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info) +static void amd8111e_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) { struct amd8111e_priv *lp = netdev_priv(dev); struct pci_dev *pci_dev = lp->pci_dev; @@ -1490,7 +1494,7 @@ static const struct ethtool_ops ops = { * gets/sets driver speed, gets memory mapped register values, forces * auto negotiation, sets/gets WOL options for ethtool application. */ -static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd) +static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct amd8111e_priv *lp = netdev_priv(dev); @@ -1577,7 +1581,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) return err; } -static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp) +static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp) { writel( VAL1|MPPLBA, lp->mmio + CMD3); writel( VAL0|MPEN_SW, lp->mmio + CMD7); @@ -1587,7 +1591,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp) return 0; } -static int amd8111e_enable_link_change(struct amd8111e_priv* lp) +static int amd8111e_enable_link_change(struct amd8111e_priv *lp) { /* Adapter is already stoped/suspended/interrupt-disabled */ @@ -1605,7 +1609,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv* lp) */ static void amd8111e_tx_timeout(struct net_device *dev) { - struct amd8111e_priv* lp = netdev_priv(dev); + struct amd8111e_priv *lp = netdev_priv(dev); int err; netdev_err(dev, "transmit timed out, resetting\n"); @@ -1686,10 +1690,10 @@ static int amd8111e_resume(struct pci_dev *pci_dev) return 0; } -static void amd8111e_config_ipg(struct net_device* dev) +static void amd8111e_config_ipg(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); - struct ipg_info* ipg_data = &lp->ipg_data; + struct ipg_info *ipg_data = &lp->ipg_data; void __iomem *mmio = lp->mmio; unsigned int prev_col_cnt = ipg_data->col_cnt; unsigned int total_col_cnt; @@ -1787,8 +1791,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev, { int err, i; unsigned long reg_addr,reg_len; - struct amd8111e_priv* lp; - struct net_device* dev; + struct amd8111e_priv *lp; + struct net_device *dev; err = pci_enable_device(pdev); if(err){ -- cgit v1.2.1 From 6964e97051146c2dcac99065e0c0367cf76c829a Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Mon, 14 Jul 2014 14:05:52 -0500 Subject: amd-xgbe: Remove the adjustments needed for fixed speed With the addition of entries in the phy speed/duplex settings array to support KR and KX mode, the work-around to add/remove baseT settings to run at a fixed speed is no longer needed. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 10 ---------- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 4 ---- 2 files changed, 14 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 8909f2b51af1..f7405261f23e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -331,16 +331,6 @@ static int xgbe_set_settings(struct net_device *netdev, (cmd->duplex != DUPLEX_FULL))) goto unlock; - if (cmd->autoneg == AUTONEG_ENABLE) { - /* Clear settings needed to force speeds */ - phydev->supported &= ~SUPPORTED_1000baseT_Full; - phydev->supported &= ~SUPPORTED_10000baseT_Full; - } else { - /* Add settings needed to force speed */ - phydev->supported |= SUPPORTED_1000baseT_Full; - phydev->supported |= SUPPORTED_10000baseT_Full; - } - cmd->advertising &= phydev->supported; if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) goto unlock; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index ea7a5d6750ea..225f22d5fe0a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -375,10 +375,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata) phydev->autoneg = pdata->default_autoneg; if (phydev->autoneg == AUTONEG_DISABLE) { - /* Add settings needed to force speed */ - phydev->supported |= SUPPORTED_1000baseT_Full; - phydev->supported |= SUPPORTED_10000baseT_Full; - phydev->speed = pdata->default_speed; phydev->duplex = DUPLEX_FULL; -- cgit v1.2.1 From 23e4eef7cf56b5e36e76af9078f0012826c86b2f Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 29 Jul 2014 08:57:19 -0500 Subject: amd-xgbe: Add hardware timestamp support This patch adds support for Tx and Rx hardware timestamping. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/Kconfig | 1 + drivers/net/ethernet/amd/xgbe/Makefile | 3 +- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 71 +++++ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 9 + drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 143 +++++++++- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 372 ++++++++++++++++++++++++--- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 35 +++ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 21 +- drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | 285 ++++++++++++++++++++ drivers/net/ethernet/amd/xgbe/xgbe.h | 56 +++- 10 files changed, 944 insertions(+), 52 deletions(-) create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-ptp.c (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 6e314dbba805..98ef8ff8fa08 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -184,6 +184,7 @@ config AMD_XGBE select AMD_XGBE_PHY select BITREVERSE select CRC32 + select PTP_1588_CLOCK ---help--- This driver supports the AMD 10GbE Ethernet device found on an AMD SoC. diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile index 26cf9af1642f..66c49b43e5f2 100644 --- a/drivers/net/ethernet/amd/xgbe/Makefile +++ b/drivers/net/ethernet/amd/xgbe/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \ - xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o + xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \ + xgbe-ptp.o amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 7ec80ac7043f..646702cd75fb 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -307,6 +307,16 @@ #define MAC_MACA0LR 0x0304 #define MAC_MACA1HR 0x0308 #define MAC_MACA1LR 0x030c +#define MAC_TSCR 0x0d00 +#define MAC_SSIR 0x0d04 +#define MAC_STSR 0x0d08 +#define MAC_STNR 0x0d0c +#define MAC_STSUR 0x0d10 +#define MAC_STNUR 0x0d14 +#define MAC_TSAR 0x0d18 +#define MAC_TSSR 0x0d20 +#define MAC_TXSNR 0x0d30 +#define MAC_TXSSR 0x0d34 #define MAC_QTFCR_INC 4 #define MAC_MACA_INC 4 @@ -373,12 +383,16 @@ #define MAC_HWF2R_TXCHCNT_WIDTH 4 #define MAC_HWF2R_TXQCNT_INDEX 6 #define MAC_HWF2R_TXQCNT_WIDTH 4 +#define MAC_IER_TSIE_INDEX 12 +#define MAC_IER_TSIE_WIDTH 1 #define MAC_ISR_MMCRXIS_INDEX 9 #define MAC_ISR_MMCRXIS_WIDTH 1 #define MAC_ISR_MMCTXIS_INDEX 10 #define MAC_ISR_MMCTXIS_WIDTH 1 #define MAC_ISR_PMTIS_INDEX 4 #define MAC_ISR_PMTIS_WIDTH 1 +#define MAC_ISR_TSIS_INDEX 12 +#define MAC_ISR_TSIS_WIDTH 1 #define MAC_MACA1HR_AE_INDEX 31 #define MAC_MACA1HR_AE_WIDTH 1 #define MAC_PFR_HMC_INDEX 2 @@ -423,10 +437,48 @@ #define MAC_RFCR_RFE_WIDTH 1 #define MAC_RQC0R_RXQ0EN_INDEX 0 #define MAC_RQC0R_RXQ0EN_WIDTH 2 +#define MAC_SSIR_SNSINC_INDEX 8 +#define MAC_SSIR_SNSINC_WIDTH 8 +#define MAC_SSIR_SSINC_INDEX 16 +#define MAC_SSIR_SSINC_WIDTH 8 #define MAC_TCR_SS_INDEX 29 #define MAC_TCR_SS_WIDTH 2 #define MAC_TCR_TE_INDEX 0 #define MAC_TCR_TE_WIDTH 1 +#define MAC_TSCR_AV8021ASMEN_INDEX 28 +#define MAC_TSCR_AV8021ASMEN_WIDTH 1 +#define MAC_TSCR_SNAPTYPSEL_INDEX 16 +#define MAC_TSCR_SNAPTYPSEL_WIDTH 2 +#define MAC_TSCR_TSADDREG_INDEX 5 +#define MAC_TSCR_TSADDREG_WIDTH 1 +#define MAC_TSCR_TSCFUPDT_INDEX 1 +#define MAC_TSCR_TSCFUPDT_WIDTH 1 +#define MAC_TSCR_TSCTRLSSR_INDEX 9 +#define MAC_TSCR_TSCTRLSSR_WIDTH 1 +#define MAC_TSCR_TSENA_INDEX 0 +#define MAC_TSCR_TSENA_WIDTH 1 +#define MAC_TSCR_TSENALL_INDEX 8 +#define MAC_TSCR_TSENALL_WIDTH 1 +#define MAC_TSCR_TSEVNTENA_INDEX 14 +#define MAC_TSCR_TSEVNTENA_WIDTH 1 +#define MAC_TSCR_TSINIT_INDEX 2 +#define MAC_TSCR_TSINIT_WIDTH 1 +#define MAC_TSCR_TSIPENA_INDEX 11 +#define MAC_TSCR_TSIPENA_WIDTH 1 +#define MAC_TSCR_TSIPV4ENA_INDEX 13 +#define MAC_TSCR_TSIPV4ENA_WIDTH 1 +#define MAC_TSCR_TSIPV6ENA_INDEX 12 +#define MAC_TSCR_TSIPV6ENA_WIDTH 1 +#define MAC_TSCR_TSMSTRENA_INDEX 15 +#define MAC_TSCR_TSMSTRENA_WIDTH 1 +#define MAC_TSCR_TSVER2ENA_INDEX 10 +#define MAC_TSCR_TSVER2ENA_WIDTH 1 +#define MAC_TSCR_TXTSSTSM_INDEX 24 +#define MAC_TSCR_TXTSSTSM_WIDTH 1 +#define MAC_TSSR_TXTSC_INDEX 15 +#define MAC_TSSR_TXTSC_WIDTH 1 +#define MAC_TXSNR_TXTSSTSMIS_INDEX 31 +#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1 #define MAC_VLANHTR_VLHT_INDEX 0 #define MAC_VLANHTR_VLHT_WIDTH 16 #define MAC_VLANIR_VLTI_INDEX 20 @@ -778,9 +830,19 @@ #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 #define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 #define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 +#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 +#define RX_NORMAL_DESC3_CDA_INDEX 27 +#define RX_NORMAL_DESC3_CDA_WIDTH 1 +#define RX_NORMAL_DESC3_CTXT_INDEX 30 +#define RX_NORMAL_DESC3_CTXT_WIDTH 1 #define RX_NORMAL_DESC3_ES_INDEX 15 #define RX_NORMAL_DESC3_ES_WIDTH 1 #define RX_NORMAL_DESC3_ETLT_INDEX 16 @@ -794,12 +856,19 @@ #define RX_NORMAL_DESC3_PL_INDEX 0 #define RX_NORMAL_DESC3_PL_WIDTH 14 +#define RX_CONTEXT_DESC3_TSA_INDEX 4 +#define RX_CONTEXT_DESC3_TSA_WIDTH 1 +#define RX_CONTEXT_DESC3_TSD_INDEX 6 +#define RX_CONTEXT_DESC3_TSD_WIDTH 1 + #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 +#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 #define TX_CONTEXT_DESC2_MSS_INDEX 0 #define TX_CONTEXT_DESC2_MSS_WIDTH 15 @@ -816,6 +885,8 @@ #define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 #define TX_NORMAL_DESC2_IC_INDEX 31 #define TX_NORMAL_DESC2_IC_WIDTH 1 +#define TX_NORMAL_DESC2_TTSE_INDEX 30 +#define TX_NORMAL_DESC2_TTSE_WIDTH 1 #define TX_NORMAL_DESC2_VTIR_INDEX 14 #define TX_NORMAL_DESC2_VTIR_WIDTH 2 #define TX_NORMAL_DESC3_CIC_INDEX 16 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index a9ce56d5e988..1c5d62e8dab6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -359,6 +359,15 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, rdata->len = 0; rdata->interrupt = 0; rdata->mapped_as_page = 0; + + if (rdata->state_saved) { + rdata->state_saved = 0; + rdata->state.incomplete = 0; + rdata->state.context_next = 0; + rdata->state.skb = NULL; + rdata->state.len = 0; + rdata->state.error = 0; + } } static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 699cff5d3184..098a7461db2d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -131,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, DBGPR("-->xgbe_usec_to_riwt\n"); - rate = clk_get_rate(pdata->sysclock); + rate = clk_get_rate(pdata->sysclk); /* * Convert the input usec value to the watchdog timer value. Each @@ -154,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, DBGPR("-->xgbe_riwt_to_usec\n"); - rate = clk_get_rate(pdata->sysclock); + rate = clk_get_rate(pdata->sysclk); /* * Convert the input watchdog timer value to the usec value. Each @@ -492,8 +492,12 @@ static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) { - /* No MAC interrupts to be enabled */ - XGMAC_IOWRITE(pdata, MAC_IER, 0); + unsigned int mac_ier = 0; + + /* Enable Timestamp interrupt */ + XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); + + XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); /* Enable all counter interrupts */ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); @@ -1012,6 +1016,107 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) DBGPR("<--rx_desc_init\n"); } +static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, + unsigned int addend) +{ + /* Set the addend register value and tell the device */ + XGMAC_IOWRITE(pdata, MAC_TSAR, addend); + XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); + + /* Wait for addend update to complete */ + while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) + udelay(5); +} + +static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, + unsigned int nsec) +{ + /* Set the time values and tell the device */ + XGMAC_IOWRITE(pdata, MAC_STSUR, sec); + XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); + XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); + + /* Wait for time update to complete */ + while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) + udelay(5); +} + +static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) +{ + u64 nsec; + + nsec = XGMAC_IOREAD(pdata, MAC_STSR); + nsec *= NSEC_PER_SEC; + nsec += XGMAC_IOREAD(pdata, MAC_STNR); + + return nsec; +} + +static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) +{ + unsigned int tx_snr; + u64 nsec; + + tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); + if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) + return 0; + + nsec = XGMAC_IOREAD(pdata, MAC_TXSSR); + nsec *= NSEC_PER_SEC; + nsec += tx_snr; + + return nsec; +} + +static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, + struct xgbe_ring_desc *rdesc) +{ + u64 nsec; + + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && + !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { + nsec = le32_to_cpu(rdesc->desc1); + nsec <<= 32; + nsec |= le32_to_cpu(rdesc->desc0); + if (nsec != 0xffffffffffffffffULL) { + packet->rx_tstamp = nsec; + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + RX_TSTAMP, 1); + } + } +} + +static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, + unsigned int mac_tscr) +{ + /* Set one nano-second accuracy */ + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); + + /* Set fine timestamp update */ + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); + + /* Overwrite earlier timestamps */ + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); + + XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); + + /* Exit if timestamping is not enabled */ + if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) + return 0; + + /* Initialize time registers */ + XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); + XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); + xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); + xgbe_set_tstamp_time(pdata, 0, 0); + + /* Initialize the timecounter */ + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, + ktime_to_ns(ktime_get_real())); + + return 0; +} + static void xgbe_pre_xmit(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; @@ -1110,6 +1215,10 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, TX_NORMAL_DESC2_VLAN_INSERT); + /* Timestamp enablement check */ + if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); + /* Set IC bit based on Tx coalescing settings */ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); if (tx_coalesce && (!tx_frames || @@ -1245,6 +1354,25 @@ static int xgbe_dev_read(struct xgbe_channel *channel) xgbe_dump_rx_desc(ring, rdesc, ring->cur); #endif + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { + /* Timestamp Context Descriptor */ + xgbe_get_rx_tstamp(packet, rdesc); + + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CONTEXT, 1); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CONTEXT_NEXT, 0); + return 0; + } + + /* Normal Descriptor, be sure Context Descriptor bit is off */ + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); + + /* Indicate if a Context Descriptor is next */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CONTEXT_NEXT, 1); + /* Get the packet length */ rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); @@ -2313,5 +2441,12 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->rx_mmc_int = xgbe_rx_mmc_int; hw_if->read_mmc_stats = xgbe_read_mmc_stats; + /* For PTP config */ + hw_if->config_tstamp = xgbe_config_tstamp; + hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; + hw_if->set_tstamp_time = xgbe_set_tstamp_time; + hw_if->get_tstamp_time = xgbe_get_tstamp_time; + hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 344e6b19ec0e..47a1e25b5609 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -121,6 +121,7 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -202,7 +203,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel; unsigned int dma_isr, dma_ch_isr; - unsigned int mac_isr; + unsigned int mac_isr, mac_tssr; unsigned int i; /* The DMA interrupt status register also reports MAC and MTL @@ -255,6 +256,17 @@ static irqreturn_t xgbe_isr(int irq, void *data) if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) hw_if->rx_mmc_int(pdata); + + if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) { + mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR); + + if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) { + /* Read Tx Timestamp to clear interrupt */ + pdata->tx_tstamp = + hw_if->get_tx_tstamp(pdata); + schedule_work(&pdata->tx_tstamp_work); + } + } } DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); @@ -668,6 +680,197 @@ static void xgbe_restart(struct work_struct *work) rtnl_unlock(); } +static void xgbe_tx_tstamp(struct work_struct *work) +{ + struct xgbe_prv_data *pdata = container_of(work, + struct xgbe_prv_data, + tx_tstamp_work); + struct skb_shared_hwtstamps hwtstamps; + u64 nsec; + unsigned long flags; + + if (pdata->tx_tstamp) { + nsec = timecounter_cyc2time(&pdata->tstamp_tc, + pdata->tx_tstamp); + + memset(&hwtstamps, 0, sizeof(hwtstamps)); + hwtstamps.hwtstamp = ns_to_ktime(nsec); + skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps); + } + + dev_kfree_skb_any(pdata->tx_tstamp_skb); + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + pdata->tx_tstamp_skb = NULL; + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); +} + +static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, + struct ifreq *ifreq) +{ + if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, + sizeof(pdata->tstamp_config))) + return -EFAULT; + + return 0; +} + +static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, + struct ifreq *ifreq) +{ + struct hwtstamp_config config; + unsigned int mac_tscr; + + if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) + return -EINVAL; + + mac_tscr = 0; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + + case HWTSTAMP_TX_ON: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + + case HWTSTAMP_FILTER_ALL: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2, UDP, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + /* PTP v1, UDP, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2, UDP, Sync packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + /* PTP v1, UDP, Sync packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2, UDP, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + /* PTP v1, UDP, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* 802.AS1, Ethernet, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* 802.AS1, Ethernet, Sync packet */ + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* 802.AS1, Ethernet, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2/802.AS1, any layer, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V2_EVENT: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2/802.AS1, any layer, Sync packet */ + case HWTSTAMP_FILTER_PTP_V2_SYNC: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2/802.AS1, any layer, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + default: + return -ERANGE; + } + + pdata->hw_if.config_tstamp(pdata, mac_tscr); + + memcpy(&pdata->tstamp_config, &config, sizeof(config)); + + return 0; +} + +static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, + struct sk_buff *skb, + struct xgbe_packet_data *packet) +{ + unsigned long flags; + + if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) { + spin_lock_irqsave(&pdata->tstamp_lock, flags); + if (pdata->tx_tstamp_skb) { + /* Another timestamp in progress, ignore this one */ + XGMAC_SET_BITS(packet->attributes, + TX_PACKET_ATTRIBUTES, PTP, 0); + } else { + pdata->tx_tstamp_skb = skb_get(skb); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + } + + if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) + skb_tx_timestamp(skb); +} + static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) { if (vlan_tx_tag_present(skb)) @@ -711,7 +914,8 @@ static int xgbe_is_tso(struct sk_buff *skb) return 1; } -static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb, +static void xgbe_packet_info(struct xgbe_prv_data *pdata, + struct xgbe_ring *ring, struct sk_buff *skb, struct xgbe_packet_data *packet) { struct skb_frag_struct *frag; @@ -753,6 +957,11 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb, VLAN_CTAG, 1); } + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) + XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + PTP, 1); + for (len = skb_headlen(skb); len;) { packet->rdesc_count++; len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); @@ -776,26 +985,33 @@ static int xgbe_open(struct net_device *netdev) DBGPR("-->xgbe_open\n"); - /* Enable the clock */ - ret = clk_prepare_enable(pdata->sysclock); + /* Enable the clocks */ + ret = clk_prepare_enable(pdata->sysclk); if (ret) { - netdev_alert(netdev, "clk_prepare_enable failed\n"); + netdev_alert(netdev, "dma clk_prepare_enable failed\n"); return ret; } + ret = clk_prepare_enable(pdata->ptpclk); + if (ret) { + netdev_alert(netdev, "ptp clk_prepare_enable failed\n"); + goto err_sysclk; + } + /* Calculate the Rx buffer size before allocating rings */ ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); if (ret < 0) - goto err_clk; + goto err_ptpclk; pdata->rx_buf_size = ret; /* Allocate the ring descriptors and buffers */ ret = desc_if->alloc_ring_resources(pdata); if (ret) - goto err_clk; + goto err_ptpclk; - /* Initialize the device restart work struct */ + /* Initialize the device restart and Tx timestamp work struct */ INIT_WORK(&pdata->restart_work, xgbe_restart); + INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); /* Request interrupts */ ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, @@ -824,8 +1040,11 @@ err_start: err_irq: desc_if->free_ring_resources(pdata); -err_clk: - clk_disable_unprepare(pdata->sysclock); +err_ptpclk: + clk_disable_unprepare(pdata->ptpclk); + +err_sysclk: + clk_disable_unprepare(pdata->sysclk); return ret; } @@ -853,8 +1072,9 @@ static int xgbe_close(struct net_device *netdev) pdata->irq_number = 0; } - /* Disable the clock */ - clk_disable_unprepare(pdata->sysclock); + /* Disable the clocks */ + clk_disable_unprepare(pdata->ptpclk); + clk_disable_unprepare(pdata->sysclk); DBGPR("<--xgbe_close\n"); @@ -890,7 +1110,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) /* Calculate preliminary packet info */ memset(packet, 0, sizeof(*packet)); - xgbe_packet_info(ring, skb, packet); + xgbe_packet_info(pdata, ring, skb, packet); /* Check that there are enough descriptors available */ if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) { @@ -914,6 +1134,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) goto tx_netdev_return; } + xgbe_prep_tx_tstamp(pdata, skb, packet); + /* Configure required descriptor fields for transmission */ hw_if->pre_xmit(channel); @@ -968,6 +1190,27 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr) return 0; } +static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + int ret; + + switch (cmd) { + case SIOCGHWTSTAMP: + ret = xgbe_get_hwtstamp_settings(pdata, ifreq); + break; + + case SIOCSHWTSTAMP: + ret = xgbe_set_hwtstamp_settings(pdata, ifreq); + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + static int xgbe_change_mtu(struct net_device *netdev, int mtu) { struct xgbe_prv_data *pdata = netdev_priv(netdev); @@ -1109,6 +1352,7 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_set_rx_mode = xgbe_set_rx_mode, .ndo_set_mac_address = xgbe_set_mac_address, .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = xgbe_ioctl, .ndo_change_mtu = xgbe_change_mtu, .ndo_get_stats64 = xgbe_get_stats64, .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, @@ -1202,8 +1446,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) struct xgbe_packet_data *packet; struct net_device *netdev = pdata->netdev; struct sk_buff *skb; - unsigned int incomplete, error; - unsigned int cur_len, put_len, max_len; + struct skb_shared_hwtstamps *hwtstamps; + unsigned int incomplete, error, context_next, context; + unsigned int len, put_len, max_len; int received = 0; DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); @@ -1212,22 +1457,33 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) if (!ring) return 0; + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); packet = &ring->packet_data; while (received < budget) { DBGPR(" cur = %d\n", ring->cur); - /* Clear the packet data information */ - memset(packet, 0, sizeof(*packet)); - skb = NULL; - error = 0; - cur_len = 0; + /* First time in loop see if we need to restore state */ + if (!received && rdata->state_saved) { + incomplete = rdata->state.incomplete; + context_next = rdata->state.context_next; + skb = rdata->state.skb; + error = rdata->state.error; + len = rdata->state.len; + } else { + memset(packet, 0, sizeof(*packet)); + incomplete = 0; + context_next = 0; + skb = NULL; + error = 0; + len = 0; + } read_again: + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + if (ring->dirty > (XGBE_RX_DESC_CNT >> 3)) xgbe_rx_refresh(channel); - rdata = XGBE_GET_DESC_DATA(ring, ring->cur); - if (hw_if->dev_read(channel)) break; @@ -1242,9 +1498,15 @@ read_again: incomplete = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, INCOMPLETE); + context_next = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, + CONTEXT_NEXT); + context = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, + CONTEXT); /* Earlier error, just drain the remaining data */ - if (incomplete && error) + if ((incomplete || context_next) && error) goto read_again; if (error || packet->errors) { @@ -1254,30 +1516,37 @@ read_again: continue; } - put_len = rdata->len - cur_len; - if (skb) { - if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) { - DBGPR("pskb_expand_head error\n"); - if (incomplete) { - error = 1; - goto read_again; + if (!context) { + put_len = rdata->len - len; + if (skb) { + if (pskb_expand_head(skb, 0, put_len, + GFP_ATOMIC)) { + DBGPR("pskb_expand_head error\n"); + if (incomplete) { + error = 1; + goto read_again; + } + + dev_kfree_skb(skb); + continue; } - - dev_kfree_skb(skb); - continue; + memcpy(skb_tail_pointer(skb), rdata->skb->data, + put_len); + } else { + skb = rdata->skb; + rdata->skb = NULL; } - memcpy(skb_tail_pointer(skb), rdata->skb->data, - put_len); - } else { - skb = rdata->skb; - rdata->skb = NULL; + skb_put(skb, put_len); + len += put_len; } - skb_put(skb, put_len); - cur_len += put_len; - if (incomplete) + if (incomplete || context_next) goto read_again; + /* Stray Context Descriptor? */ + if (!skb) + continue; + /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && @@ -1304,6 +1573,16 @@ read_again: __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_ctag); + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, RX_TSTAMP)) { + u64 nsec; + + nsec = timecounter_cyc2time(&pdata->tstamp_tc, + packet->rx_tstamp); + hwtstamps = skb_hwtstamps(skb); + hwtstamps->hwtstamp = ns_to_ktime(nsec); + } + skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); @@ -1313,6 +1592,17 @@ read_again: napi_gro_receive(&pdata->napi, skb); } + /* Check if we need to save state before leaving */ + if (received && (incomplete || context_next)) { + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + rdata->state_saved = 1; + rdata->state.incomplete = incomplete; + rdata->state.context_next = context_next; + rdata->state.skb = skb; + rdata->state.len = len; + rdata->state.error = error; + } + DBGPR("<--xgbe_rx_poll: received = %d\n", received); return received; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index f7405261f23e..4961666fa55f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -116,6 +116,7 @@ #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -480,6 +481,39 @@ static int xgbe_set_coalesce(struct net_device *netdev, return 0; } +static int xgbe_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *ts_info) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (pdata->ptp_clock) + ts_info->phc_index = ptp_clock_index(pdata->ptp_clock); + else + ts_info->phc_index = -1; + + ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + static const struct ethtool_ops xgbe_ethtool_ops = { .get_settings = xgbe_get_settings, .set_settings = xgbe_set_settings, @@ -492,6 +526,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .get_strings = xgbe_get_strings, .get_ethtool_stats = xgbe_get_ethtool_stats, .get_sset_count = xgbe_get_sset_count, + .get_ts_info = xgbe_get_ts_info, }; struct ethtool_ops *xgbe_get_ethtool_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index e56c3d45b30e..ca6a6af00f9f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -245,6 +245,7 @@ static int xgbe_probe(struct platform_device *pdev) spin_lock_init(&pdata->lock); mutex_init(&pdata->xpcs_mutex); + spin_lock_init(&pdata->tstamp_lock); /* Set and validate the number of descriptors for a ring */ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); @@ -265,10 +266,18 @@ static int xgbe_probe(struct platform_device *pdev) } /* Obtain the system clock setting */ - pdata->sysclock = devm_clk_get(dev, NULL); - if (IS_ERR(pdata->sysclock)) { - dev_err(dev, "devm_clk_get failed\n"); - ret = PTR_ERR(pdata->sysclock); + pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK); + if (IS_ERR(pdata->sysclk)) { + dev_err(dev, "dma devm_clk_get failed\n"); + ret = PTR_ERR(pdata->sysclk); + goto err_io; + } + + /* Obtain the PTP clock setting */ + pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK); + if (IS_ERR(pdata->ptpclk)) { + dev_err(dev, "ptp devm_clk_get failed\n"); + ret = PTR_ERR(pdata->ptpclk); goto err_io; } @@ -420,6 +429,8 @@ static int xgbe_probe(struct platform_device *pdev) goto err_reg_netdev; } + xgbe_ptp_register(pdata); + xgbe_debugfs_init(pdata); netdev_notice(netdev, "net device enabled\n"); @@ -452,6 +463,8 @@ static int xgbe_remove(struct platform_device *pdev) xgbe_debugfs_exit(pdata); + xgbe_ptp_unregister(pdata); + unregister_netdev(netdev); xgbe_mdio_unregister(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c new file mode 100644 index 000000000000..37e64cfa5718 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c @@ -0,0 +1,285 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + + +static cycle_t xgbe_cc_read(const struct cyclecounter *cc) +{ + struct xgbe_prv_data *pdata = container_of(cc, + struct xgbe_prv_data, + tstamp_cc); + u64 nsec; + + nsec = pdata->hw_if.get_tstamp_time(pdata); + + return nsec; +} + +static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 adjust; + u32 addend, diff; + unsigned int neg_adjust = 0; + + if (delta < 0) { + neg_adjust = 1; + delta = -delta; + } + + adjust = pdata->tstamp_addend; + adjust *= delta; + diff = div_u64(adjust, 1000000000UL); + + addend = (neg_adjust) ? pdata->tstamp_addend - diff : + pdata->tstamp_addend + diff; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + pdata->hw_if.update_tstamp_addend(pdata, addend); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return 0; +} + +static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 nsec; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + nsec = timecounter_read(&pdata->tstamp_tc); + + nsec += delta; + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return 0; +} + +static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 nsec; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + nsec = timecounter_read(&pdata->tstamp_tc); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + *ts = ns_to_timespec(nsec); + + return 0; +} + +static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 nsec; + + nsec = timespec_to_ns(ts); + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return 0; +} + +static int xgbe_enable(struct ptp_clock_info *info, + struct ptp_clock_request *request, int on) +{ + return -EOPNOTSUPP; +} + +void xgbe_ptp_register(struct xgbe_prv_data *pdata) +{ + struct ptp_clock_info *info = &pdata->ptp_clock_info; + struct ptp_clock *clock; + struct cyclecounter *cc = &pdata->tstamp_cc; + u64 dividend; + + snprintf(info->name, sizeof(info->name), "%s", + netdev_name(pdata->netdev)); + info->owner = THIS_MODULE; + info->max_adj = clk_get_rate(pdata->ptpclk); + info->adjfreq = xgbe_adjfreq; + info->adjtime = xgbe_adjtime; + info->gettime = xgbe_gettime; + info->settime = xgbe_settime; + info->enable = xgbe_enable; + + clock = ptp_clock_register(info, pdata->dev); + if (IS_ERR(clock)) { + dev_err(pdata->dev, "ptp_clock_register failed\n"); + return; + } + + pdata->ptp_clock = clock; + + /* Calculate the addend: + * addend = 2^32 / (PTP ref clock / 50Mhz) + * = (2^32 * 50Mhz) / PTP ref clock + */ + dividend = 50000000; + dividend <<= 32; + pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk)); + + /* Setup the timecounter */ + cc->read = xgbe_cc_read; + cc->mask = CLOCKSOURCE_MASK(64); + cc->mult = 1; + cc->shift = 0; + + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, + ktime_to_ns(ktime_get_real())); + + /* Disable all timestamping to start */ + XGMAC_IOWRITE(pdata, MAC_TCR, 0); + pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; +} + +void xgbe_ptp_unregister(struct xgbe_prv_data *pdata) +{ + if (pdata->ptp_clock) + ptp_clock_unregister(pdata->ptp_clock); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 9e24b296e272..8b6ad3e82c34 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -123,6 +123,9 @@ #include #include #include +#include +#include +#include #define XGBE_DRV_NAME "amd-xgbe" @@ -164,6 +167,16 @@ #define XGBE_PHY_NAME "amd_xgbe_phy" #define XGBE_PRTAD 0 +/* Device-tree clock names */ +#define XGBE_DMA_CLOCK "dma_clk" +#define XGBE_PTP_CLOCK "ptp_clk" + +/* Timestamp support - values based on 50MHz PTP clock + * 50MHz => 20 nsec + */ +#define XGBE_TSTAMP_SSINC 20 +#define XGBE_TSTAMP_SNSINC 0 + /* Driver PMT macros */ #define XGMAC_DRIVER_CONTEXT 1 #define XGMAC_IOCTL_CONTEXT 2 @@ -214,6 +227,8 @@ struct xgbe_packet_data { unsigned short mss; unsigned short vlan_ctag; + + u64 rx_tstamp; }; /* Common Rx and Tx descriptor mapping */ @@ -242,6 +257,20 @@ struct xgbe_ring_data { unsigned int interrupt; /* Interrupt indicator */ unsigned int mapped_as_page; + + /* Incomplete receive save location. If the budget is exhausted + * or the last descriptor (last normal descriptor or a following + * context descriptor) has not been DMA'd yet the current state + * of the receive processing needs to be saved. + */ + unsigned int state_saved; + struct { + unsigned int incomplete; + unsigned int context_next; + struct sk_buff *skb; + unsigned int len; + unsigned int error; + } state; }; struct xgbe_ring { @@ -467,6 +496,14 @@ struct xgbe_hw_if { void (*rx_mmc_int)(struct xgbe_prv_data *); void (*tx_mmc_int)(struct xgbe_prv_data *); void (*read_mmc_stats)(struct xgbe_prv_data *); + + /* For Timestamp config */ + int (*config_tstamp)(struct xgbe_prv_data *, unsigned int); + void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int); + void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec, + unsigned int nsec); + u64 (*get_tstamp_time)(struct xgbe_prv_data *); + u64 (*get_tx_tstamp)(struct xgbe_prv_data *); }; struct xgbe_desc_if { @@ -607,8 +644,21 @@ struct xgbe_prv_data { /* Filtering support */ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - /* System clock value used for Rx watchdog */ - struct clk *sysclock; + /* Device clocks */ + struct clk *sysclk; + struct clk *ptpclk; + + /* Timestamp support */ + spinlock_t tstamp_lock; + struct ptp_clock_info ptp_clock_info; + struct ptp_clock *ptp_clock; + struct hwtstamp_config tstamp_config; + struct cyclecounter tstamp_cc; + struct timecounter tstamp_tc; + unsigned int tstamp_addend; + struct work_struct tx_tstamp_work; + struct sk_buff *tx_tstamp_skb; + u64 tx_tstamp; /* Hardware features of the device */ struct xgbe_hw_features hw_feat; @@ -639,6 +689,8 @@ struct ethtool_ops *xgbe_get_ethtool_ops(void); int xgbe_mdio_register(struct xgbe_prv_data *); void xgbe_mdio_unregister(struct xgbe_prv_data *); void xgbe_dump_phy_registers(struct xgbe_prv_data *); +void xgbe_ptp_register(struct xgbe_prv_data *); +void xgbe_ptp_unregister(struct xgbe_prv_data *); void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int, unsigned int); void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *, -- cgit v1.2.1 From f047604a3ff1a1d7c8bd4a43c72de3936d71f3c1 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 29 Jul 2014 08:57:25 -0500 Subject: amd-xgbe: Update/fix 2.5GbE support Update the amd-xgbe driver and phylib driver to better support the 2.5GbE mode for the hardware. In order to be able establish 2.5GbE using clause 73 auto negotiation the device will support speed sets of 1GbE/10GbE and 2.5GbE/10GbE. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 4961666fa55f..6005b6021f78 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -327,10 +327,19 @@ static int xgbe_set_settings(struct net_device *netdev, (cmd->autoneg != AUTONEG_DISABLE)) goto unlock; - if ((cmd->autoneg == AUTONEG_DISABLE) && - (((speed != SPEED_10000) && (speed != SPEED_1000)) || - (cmd->duplex != DUPLEX_FULL))) - goto unlock; + if (cmd->autoneg == AUTONEG_DISABLE) { + switch (speed) { + case SPEED_10000: + case SPEED_2500: + case SPEED_1000: + break; + default: + goto unlock; + } + + if (cmd->duplex != DUPLEX_FULL) + goto unlock; + } cmd->advertising &= phydev->supported; if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) -- cgit v1.2.1 From 853eb16b8b6a347315443f2ef010e5b97d8c1577 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 29 Jul 2014 08:57:31 -0500 Subject: amd-xgbe: Base queue fifo size and enablement on ring count When setting the fifo sizes for the queues and enabling the queues use the number of active Tx and Rx queues that have been enabled not the maximum number available. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 42 +++++++++++++++---------------- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 10 +++++++- drivers/net/ethernet/amd/xgbe/xgbe.h | 3 +++ 3 files changed, 33 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 098a7461db2d..2cdf34f6139f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -247,7 +247,7 @@ static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; - for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) + for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); return 0; @@ -257,7 +257,7 @@ static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; - for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) + for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); return 0; @@ -268,7 +268,7 @@ static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, { unsigned int i; - for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) + for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); return 0; @@ -279,7 +279,7 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, { unsigned int i; - for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) + for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); return 0; @@ -343,12 +343,12 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) unsigned int i; /* Clear MTL flow control */ - for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) + for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); /* Clear MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count); + q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -368,12 +368,12 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) unsigned int i; /* Set MTL flow control */ - for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) + for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count); + q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -1551,11 +1551,11 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) { unsigned int i, count; - for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) + for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); /* Poll Until Poll Condition */ - for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) { + for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, MTL_Q_TQOMR, FTQ)) @@ -1700,13 +1700,13 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) unsigned int i; fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, - pdata->hw_feat.tx_q_cnt); + pdata->tx_q_count); - for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) + for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", - pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256)); + pdata->tx_q_count, ((fifo_size + 1) * 256)); } static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) @@ -1715,19 +1715,19 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) unsigned int i; fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, - pdata->hw_feat.rx_q_cnt); + pdata->rx_q_count); - for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) + for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", - pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256)); + pdata->rx_q_count, ((fifo_size + 1) * 256)); } static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata) { unsigned int i, reg, reg_val; - unsigned int q_count = pdata->hw_feat.rx_q_cnt; + unsigned int q_count = pdata->rx_q_count; /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ reg = MTL_RQDCM0R; @@ -1749,7 +1749,7 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) { unsigned int i; - for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) { + for (i = 0; i < pdata->rx_q_count; i++) { /* Activate flow control when less than 4k left in fifo */ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); @@ -2141,7 +2141,7 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata) } /* Enable each Tx queue */ - for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) + for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, MTL_Q_ENABLED); @@ -2158,7 +2158,7 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); /* Disable each Tx queue */ - for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) + for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); /* Disable each Tx DMA channel */ @@ -2187,7 +2187,7 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata) /* Enable each Rx queue */ reg_val = 0; - for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) + for (i = 0; i < pdata->rx_q_count; i++) reg_val |= (0x02 << (i << 1)); XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index ca6a6af00f9f..04e6c72eb3c8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -355,9 +355,16 @@ static int xgbe_probe(struct platform_device *pdev) /* Set default configuration data */ xgbe_default_config(pdata); - /* Calculate the number of Tx and Rx rings to be created */ + /* Calculate the number of Tx and Rx rings to be created + * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set + * the number of Tx queues to the number of Tx channels + * enabled + * -Rx (DMA) Channels do not map 1-to-1 so use the actual + * number of Rx queues + */ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), pdata->hw_feat.tx_ch_cnt); + pdata->tx_q_count = pdata->tx_ring_count; ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); if (ret) { dev_err(dev, "error setting real tx queue count\n"); @@ -367,6 +374,7 @@ static int xgbe_probe(struct platform_device *pdev) pdata->rx_ring_count = min_t(unsigned int, netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt); + pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); if (ret) { dev_err(dev, "error setting real rx queue count\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 8b6ad3e82c34..f011d88d2211 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -590,6 +590,9 @@ struct xgbe_prv_data { unsigned int rx_ring_count; unsigned int rx_desc_count; + unsigned int tx_q_count; + unsigned int rx_q_count; + /* Tx/Rx common settings */ unsigned int pblx8; -- cgit v1.2.1 From fca2d99428473884e67ef8ea1586e58151ed6ac3 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 29 Jul 2014 08:57:55 -0500 Subject: amd-xgbe: Add traffic class support This patch adds support for traffic classes as well as support for Data Center Bridging interfaces related to traffic classes and priority flow control. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/Kconfig | 10 ++ drivers/net/ethernet/amd/xgbe/Makefile | 1 + drivers/net/ethernet/amd/xgbe/xgbe-common.h | 22 ++- drivers/net/ethernet/amd/xgbe/xgbe-dcb.c | 270 ++++++++++++++++++++++++++++ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 180 ++++++++++++++++--- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 29 +++ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 5 +- drivers/net/ethernet/amd/xgbe/xgbe.h | 18 +- 8 files changed, 508 insertions(+), 27 deletions(-) create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-dcb.c (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 98ef8ff8fa08..8319c99331b0 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -192,4 +192,14 @@ config AMD_XGBE To compile this driver as a module, choose M here: the module will be called amd-xgbe. +config AMD_XGBE_DCB + bool "Data Center Bridging (DCB) support" + default n + depends on AMD_XGBE && DCB + ---help--- + Say Y here to enable Data Center Bridging (DCB) support in the + driver. + + If unsure, say N. + endif # NET_VENDOR_AMD diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile index 66c49b43e5f2..171a7e68048d 100644 --- a/drivers/net/ethernet/amd/xgbe/Makefile +++ b/drivers/net/ethernet/amd/xgbe/Makefile @@ -4,4 +4,5 @@ amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \ xgbe-ptp.o +amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 646702cd75fb..cc25a3a9e7cf 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -322,6 +322,9 @@ #define MAC_MACA_INC 4 #define MAC_HTR_INC 4 +#define MAC_RQC2_INC 4 +#define MAC_RQC2_Q_PER_REG 4 + /* MAC register entry bit positions and sizes */ #define MAC_HWF0R_ADDMACADRSEL_INDEX 18 #define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 @@ -361,6 +364,8 @@ #define MAC_HWF1R_HASHTBLSZ_WIDTH 3 #define MAC_HWF1R_L3L4FNUM_INDEX 27 #define MAC_HWF1R_L3L4FNUM_WIDTH 4 +#define MAC_HWF1R_NUMTC_INDEX 21 +#define MAC_HWF1R_NUMTC_WIDTH 3 #define MAC_HWF1R_RSSEN_INDEX 20 #define MAC_HWF1R_RSSEN_WIDTH 1 #define MAC_HWF1R_RXFIFOSIZE_INDEX 0 @@ -433,8 +438,12 @@ #define MAC_RCR_LM_WIDTH 1 #define MAC_RCR_RE_INDEX 0 #define MAC_RCR_RE_WIDTH 1 +#define MAC_RFCR_PFCE_INDEX 8 +#define MAC_RFCR_PFCE_WIDTH 1 #define MAC_RFCR_RFE_INDEX 0 #define MAC_RFCR_RFE_WIDTH 1 +#define MAC_RFCR_UP_INDEX 1 +#define MAC_RFCR_UP_WIDTH 1 #define MAC_RQC0R_RXQ0EN_INDEX 0 #define MAC_RQC0R_RXQ0EN_WIDTH 2 #define MAC_SSIR_SNSINC_INDEX 8 @@ -704,6 +713,8 @@ #define MTL_RQDCM_INC 4 #define MTL_RQDCM_Q_PER_REG 4 +#define MTL_TCPM_INC 4 +#define MTL_TCPM_TC_PER_REG 4 /* MTL register entry bit positions and sizes */ #define MTL_OMR_ETSALG_INDEX 5 @@ -722,9 +733,6 @@ #define MTL_Q_TQOMR 0x00 #define MTL_Q_TQUR 0x04 #define MTL_Q_TQDR 0x08 -#define MTL_Q_TCECR 0x10 -#define MTL_Q_TCESR 0x14 -#define MTL_Q_TCQWR 0x18 #define MTL_Q_RQOMR 0x40 #define MTL_Q_RQMPOCR 0x44 #define MTL_Q_RQDR 0x4c @@ -732,8 +740,6 @@ #define MTL_Q_ISR 0x74 /* MTL queue register entry bit positions and sizes */ -#define MTL_Q_TCQWR_QW_INDEX 0 -#define MTL_Q_TCQWR_QW_WIDTH 21 #define MTL_Q_RQOMR_EHFC_INDEX 7 #define MTL_Q_RQOMR_EHFC_WIDTH 1 #define MTL_Q_RQOMR_RFA_INDEX 8 @@ -748,6 +754,8 @@ #define MTL_Q_RQOMR_RTC_WIDTH 2 #define MTL_Q_TQOMR_FTQ_INDEX 0 #define MTL_Q_TQOMR_FTQ_WIDTH 1 +#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8 +#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3 #define MTL_Q_TQOMR_TQS_INDEX 16 #define MTL_Q_TQOMR_TQS_WIDTH 10 #define MTL_Q_TQOMR_TSF_INDEX 1 @@ -794,10 +802,14 @@ #define MTL_TC_INC MTL_Q_INC #define MTL_TC_ETSCR 0x10 +#define MTL_TC_ETSSR 0x14 +#define MTL_TC_QWR 0x18 /* MTL traffic class register entry bit positions and sizes */ #define MTL_TC_ETSCR_TSA_INDEX 0 #define MTL_TC_ETSCR_TSA_WIDTH 2 +#define MTL_TC_QWR_QW_INDEX 0 +#define MTL_TC_QWR_QW_WIDTH 21 /* MTL traffic class register value */ #define MTL_TSA_SP 0x00 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c new file mode 100644 index 000000000000..7d6a49b24321 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -0,0 +1,270 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + + +static int xgbe_dcb_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + /* Set number of supported traffic classes */ + ets->ets_cap = pdata->hw_feat.tc_cnt; + + if (pdata->ets) { + ets->cbs = pdata->ets->cbs; + memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw, + sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, pdata->ets->tc_tsa, + sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, pdata->ets->prio_tc, + sizeof(ets->prio_tc)); + } + + return 0; +} + +static int xgbe_dcb_ieee_setets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int i, tc_ets, tc_ets_weight; + + tc_ets = 0; + tc_ets_weight = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i, + ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]); + DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]); + + if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && + (i >= pdata->hw_feat.tc_cnt)) + return -EINVAL; + + if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt) + return -EINVAL; + + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + tc_ets = 1; + tc_ets_weight += ets->tc_tx_bw[i]; + break; + + default: + return -EINVAL; + } + } + + /* Weights must add up to 100% */ + if (tc_ets && (tc_ets_weight != 100)) + return -EINVAL; + + if (!pdata->ets) { + pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets), + GFP_KERNEL); + if (!pdata->ets) + return -ENOMEM; + } + + memcpy(pdata->ets, ets, sizeof(*pdata->ets)); + + pdata->hw_if.config_dcb_tc(pdata); + + return 0; +} + +static int xgbe_dcb_ieee_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + /* Set number of supported PFC traffic classes */ + pfc->pfc_cap = pdata->hw_feat.tc_cnt; + + if (pdata->pfc) { + pfc->pfc_en = pdata->pfc->pfc_en; + pfc->mbc = pdata->pfc->mbc; + pfc->delay = pdata->pfc->delay; + } + + return 0; +} + +static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n", + pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); + + if (!pdata->pfc) { + pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc), + GFP_KERNEL); + if (!pdata->pfc) + return -ENOMEM; + } + + memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc)); + + pdata->hw_if.config_dcb_pfc(pdata); + + return 0; +} + +static u8 xgbe_dcb_getdcbx(struct net_device *netdev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx) +{ + u8 support = xgbe_dcb_getdcbx(netdev); + + DBGPR(" DCBX=%#hhx\n", dcbx); + + if (dcbx & ~support) + return 1; + + if ((dcbx & support) != support) + return 1; + + return 0; +} + +static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = { + /* IEEE 802.1Qaz std */ + .ieee_getets = xgbe_dcb_ieee_getets, + .ieee_setets = xgbe_dcb_ieee_setets, + .ieee_getpfc = xgbe_dcb_ieee_getpfc, + .ieee_setpfc = xgbe_dcb_ieee_setpfc, + + /* DCBX configuration */ + .getdcbx = xgbe_dcb_getdcbx, + .setdcbx = xgbe_dcb_setdcbx, +}; + +const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void) +{ + return &xgbe_dcbnl_ops; +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 2cdf34f6139f..edaca4496264 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -407,7 +407,9 @@ static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) { - if (pdata->tx_pause) + struct ieee_pfc *pfc = pdata->pfc; + + if (pdata->tx_pause || (pfc && pfc->pfc_en)) xgbe_enable_tx_flow_control(pdata); else xgbe_disable_tx_flow_control(pdata); @@ -417,7 +419,9 @@ static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) { - if (pdata->rx_pause) + struct ieee_pfc *pfc = pdata->pfc; + + if (pdata->rx_pause || (pfc && pfc->pfc_en)) xgbe_enable_rx_flow_control(pdata); else xgbe_disable_rx_flow_control(pdata); @@ -427,8 +431,13 @@ static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) { + struct ieee_pfc *pfc = pdata->pfc; + xgbe_config_tx_flow_control(pdata); xgbe_config_rx_flow_control(pdata); + + XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, + (pfc && pfc->pfc_en) ? 1 : 0); } static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) @@ -1117,6 +1126,79 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, return 0; } +static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) +{ + struct ieee_ets *ets = pdata->ets; + unsigned int total_weight, min_weight, weight; + unsigned int i; + + if (!ets) + return; + + /* Set Tx to deficit weighted round robin scheduling algorithm (when + * traffic class is using ETS algorithm) + */ + XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); + + /* Set Traffic Class algorithms */ + total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; + min_weight = total_weight / 100; + if (!min_weight) + min_weight = 1; + + for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + DBGPR(" TC%u using SP\n", i); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_SP); + break; + case IEEE_8021QAZ_TSA_ETS: + weight = total_weight * ets->tc_tx_bw[i] / 100; + weight = clamp(weight, min_weight, total_weight); + + DBGPR(" TC%u using DWRR (weight %u)\n", i, weight); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_ETS); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, + weight); + break; + } + } +} + +static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) +{ + struct ieee_pfc *pfc = pdata->pfc; + struct ieee_ets *ets = pdata->ets; + unsigned int mask, reg, reg_val; + unsigned int tc, prio; + + if (!pfc || !ets) + return; + + for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) { + mask = 0; + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + if ((pfc->pfc_en & (1 << prio)) && + (ets->prio_tc[prio] == tc)) + mask |= (1 << prio); + } + mask &= 0xff; + + DBGPR(" TC%u PFC mask=%#x\n", tc, mask); + reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); + reg_val = XGMAC_IOREAD(pdata, reg); + + reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3)); + reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3)); + + XGMAC_IOWRITE(pdata, reg, reg_val); + } + + xgbe_config_flow_control(pdata); +} + static void xgbe_pre_xmit(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; @@ -1607,14 +1689,15 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) { unsigned int i; - /* Set Tx to weighted round robin scheduling algorithm (when - * traffic class is using ETS algorithm) - */ + /* Set Tx to weighted round robin scheduling algorithm */ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); - /* Set Tx traffic classes to strict priority algorithm */ - for (i = 0; i < XGBE_TC_CNT; i++) - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP); + /* Set Tx traffic classes to use WRR algorithm with equal weights */ + for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_ETS); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); + } /* Set Rx to strict priority algorithm */ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); @@ -1724,18 +1807,75 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) pdata->rx_q_count, ((fifo_size + 1) * 256)); } -static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata) +static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) { - unsigned int i, reg, reg_val; - unsigned int q_count = pdata->rx_q_count; + unsigned int qptc, qptc_extra, queue; + unsigned int prio_queues; + unsigned int ppq, ppq_extra, prio; + unsigned int mask; + unsigned int i, j, reg, reg_val; + + /* Map the MTL Tx Queues to Traffic Classes + * Note: Tx Queues >= Traffic Classes + */ + qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; + qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; + + for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { + for (j = 0; j < qptc; j++) { + DBGPR(" TXq%u mapped to TC%u\n", queue, i); + XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + pdata->q2tc_map[queue++] = i; + } + + if (i < qptc_extra) { + DBGPR(" TXq%u mapped to TC%u\n", queue, i); + XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + pdata->q2tc_map[queue++] = i; + } + } + + /* Map the 8 VLAN priority values to available MTL Rx queues */ + prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, + pdata->rx_q_count); + ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; + ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; + + reg = MAC_RQC2R; + reg_val = 0; + for (i = 0, prio = 0; i < prio_queues;) { + mask = 0; + for (j = 0; j < ppq; j++) { + DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + pdata->prio2q_map[prio++] = i; + } + + if (i < ppq_extra) { + DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + pdata->prio2q_map[prio++] = i; + } + + reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); + + if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) + continue; + + XGMAC_IOWRITE(pdata, reg, reg_val); + reg += MAC_RQC2_INC; + reg_val = 0; + } /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ reg = MTL_RQDCM0R; reg_val = 0; - for (i = 0; i < q_count;) { + for (i = 0; i < pdata->rx_q_count;) { reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); - if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count)) + if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) continue; XGMAC_IOWRITE(pdata, reg, reg_val); @@ -2321,9 +2461,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata) * Initialize MTL related features */ xgbe_config_mtl_mode(pdata); - xgbe_config_rx_queue_mapping(pdata); - /*TODO: Program the priorities mapped to the Selected Traffic Classes - in MTL_TC_Prty_Map0-3 registers */ + xgbe_config_queue_mapping(pdata); xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); xgbe_config_tx_threshold(pdata, pdata->tx_threshold); @@ -2331,15 +2469,13 @@ static int xgbe_init(struct xgbe_prv_data *pdata) xgbe_config_tx_fifo_size(pdata); xgbe_config_rx_fifo_size(pdata); xgbe_config_flow_control_threshold(pdata); - /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */ /*TODO: Error Packet and undersized good Packet forwarding enable (FEP and FUP) */ + xgbe_config_dcb_tc(pdata); + xgbe_config_dcb_pfc(pdata); xgbe_enable_mtl_interrupts(pdata); - /* Transmit Class Weight */ - XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10); - /* * Initialize MAC related features */ @@ -2448,5 +2584,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->get_tstamp_time = xgbe_get_tstamp_time; hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; + /* For Data Center Bridging config */ + hw_if->config_dcb_tc = xgbe_config_dcb_tc; + hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 47a1e25b5609..3bf3c0194ad3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -387,6 +387,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); + hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, HASHTBLSZ); hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, @@ -1312,6 +1313,33 @@ static void xgbe_poll_controller(struct net_device *netdev) } #endif /* End CONFIG_NET_POLL_CONTROLLER */ +static int xgbe_setup_tc(struct net_device *netdev, u8 tc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int offset, queue; + u8 i; + + if (tc && (tc != pdata->hw_feat.tc_cnt)) + return -EINVAL; + + if (tc) { + netdev_set_num_tc(netdev, tc); + for (i = 0, queue = 0, offset = 0; i < tc; i++) { + while ((queue < pdata->tx_q_count) && + (pdata->q2tc_map[queue] == i)) + queue++; + + DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1); + netdev_set_tc_queue(netdev, i, queue - offset, offset); + offset = queue; + } + } else { + netdev_reset_tc(netdev); + } + + return 0; +} + static int xgbe_set_features(struct net_device *netdev, netdev_features_t features) { @@ -1360,6 +1388,7 @@ static const struct net_device_ops xgbe_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xgbe_poll_controller, #endif + .ndo_setup_tc = xgbe_setup_tc, .ndo_set_features = xgbe_set_features, }; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 04e6c72eb3c8..ec977d36063f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -400,9 +400,12 @@ static int xgbe_probe(struct platform_device *pdev) if (ret) goto err_bus_id; - /* Set network and ethtool operations */ + /* Set device operations */ netdev->netdev_ops = xgbe_get_netdev_ops(); netdev->ethtool_ops = xgbe_get_ethtool_ops(); +#ifdef CONFIG_AMD_XGBE_DCB + netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); +#endif /* Set device features */ netdev->hw_features = NETIF_F_SG | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index f011d88d2211..07bf70a82908 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -126,6 +126,7 @@ #include #include #include +#include #define XGBE_DRV_NAME "amd-xgbe" @@ -144,6 +145,7 @@ #define XGBE_RX_BUF_ALIGN 64 #define XGBE_MAX_DMA_CHANNELS 16 +#define XGBE_MAX_QUEUES 16 /* DMA cache settings - Outer sharable, write-back, write-allocate */ #define XGBE_DMA_OS_AXDOMAIN 0x2 @@ -184,7 +186,7 @@ #define XGBE_FIFO_SIZE_B(x) (x) #define XGBE_FIFO_SIZE_KB(x) (x * 1024) -#define XGBE_TC_CNT 2 +#define XGBE_TC_MIN_QUANTUM 10 /* Helper macro for descriptor handling * Always use XGBE_GET_DESC_DATA to access the descriptor data @@ -504,6 +506,10 @@ struct xgbe_hw_if { unsigned int nsec); u64 (*get_tstamp_time)(struct xgbe_prv_data *); u64 (*get_tx_tstamp)(struct xgbe_prv_data *); + + /* For Data Center Bridging config */ + void (*config_dcb_tc)(struct xgbe_prv_data *); + void (*config_dcb_pfc)(struct xgbe_prv_data *); }; struct xgbe_desc_if { @@ -545,6 +551,7 @@ struct xgbe_hw_features { unsigned int tso; /* TCP Segmentation Offload */ unsigned int dma_debug; /* DMA Debug Registers */ unsigned int rss; /* Receive Side Scaling */ + unsigned int tc_cnt; /* Number of Traffic Classes */ unsigned int hash_table_size; /* Hash Table Size */ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ @@ -663,6 +670,12 @@ struct xgbe_prv_data { struct sk_buff *tx_tstamp_skb; u64 tx_tstamp; + /* DCB support */ + struct ieee_ets *ets; + struct ieee_pfc *pfc; + unsigned int q2tc_map[XGBE_MAX_QUEUES]; + unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; + /* Hardware features of the device */ struct xgbe_hw_features hw_feat; @@ -688,6 +701,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *); void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); struct net_device_ops *xgbe_get_netdev_ops(void); struct ethtool_ops *xgbe_get_ethtool_ops(void); +#ifdef CONFIG_AMD_XGBE_DCB +const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void); +#endif int xgbe_mdio_register(struct xgbe_prv_data *); void xgbe_mdio_unregister(struct xgbe_prv_data *); -- cgit v1.2.1 From 8c43a2cc75b3bf4f89ea439f4439a2a61f22c961 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Fri, 1 Aug 2014 11:56:29 -0500 Subject: amd-xgbe: Remove unnecessary spinlocks Remove the spinlocks around the ethtool get and set settings functions and within the link adjustment callback routine. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 20 +++++--------------- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 5 ----- 2 files changed, 5 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 6005b6021f78..a076aca138a1 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -290,13 +290,9 @@ static int xgbe_get_settings(struct net_device *netdev, if (!pdata->phydev) return -ENODEV; - spin_lock_irq(&pdata->lock); - ret = phy_ethtool_gset(pdata->phydev, cmd); cmd->transceiver = XCVR_EXTERNAL; - spin_unlock_irq(&pdata->lock); - DBGPR("<--xgbe_get_settings\n"); return ret; @@ -315,17 +311,14 @@ static int xgbe_set_settings(struct net_device *netdev, if (!pdata->phydev) return -ENODEV; - spin_lock_irq(&pdata->lock); - speed = ethtool_cmd_speed(cmd); - ret = -EINVAL; if (cmd->phy_address != phydev->addr) - goto unlock; + return -EINVAL; if ((cmd->autoneg != AUTONEG_ENABLE) && (cmd->autoneg != AUTONEG_DISABLE)) - goto unlock; + return -EINVAL; if (cmd->autoneg == AUTONEG_DISABLE) { switch (speed) { @@ -334,16 +327,16 @@ static int xgbe_set_settings(struct net_device *netdev, case SPEED_1000: break; default: - goto unlock; + return -EINVAL; } if (cmd->duplex != DUPLEX_FULL) - goto unlock; + return -EINVAL; } cmd->advertising &= phydev->supported; if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) - goto unlock; + return -EINVAL; ret = 0; phydev->autoneg = cmd->autoneg; @@ -359,9 +352,6 @@ static int xgbe_set_settings(struct net_device *netdev, if (netif_running(netdev)) ret = phy_start_aneg(phydev); -unlock: - spin_unlock_irq(&pdata->lock); - DBGPR("<--xgbe_set_settings\n"); return ret; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 225f22d5fe0a..eecd360430a4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -163,7 +163,6 @@ static void xgbe_adjust_link(struct net_device *netdev) struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; struct phy_device *phydev = pdata->phydev; - unsigned long flags; int new_state = 0; if (phydev == NULL) @@ -172,8 +171,6 @@ static void xgbe_adjust_link(struct net_device *netdev) DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n", phydev->addr, phydev->link, pdata->phy_link); - spin_lock_irqsave(&pdata->lock, flags); - if (phydev->link) { /* Flow control support */ if (pdata->pause_autoneg) { @@ -229,8 +226,6 @@ static void xgbe_adjust_link(struct net_device *netdev) if (new_state) phy_print_status(phydev); - spin_unlock_irqrestore(&pdata->lock, flags); - DBGPR_MDIO("<--xgbe_adjust_link\n"); } -- cgit v1.2.1 From f3d0e78d2e733176c2b0d23e7a7d871c7a33c2d6 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 5 Aug 2014 13:30:38 -0500 Subject: amd-xgbe: Use dma_set_mask_and_coherent to set DMA mask Use the dma_set_mask_and_coherent function to set the DMA mask rather than setting the DMA mask fields directly. This was originally done to work around a bug in the arm64 DMA support when RAM started above the 4GB boundary which has since been fixed. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index ec977d36063f..8aa6a9353f7b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -303,8 +303,11 @@ static int xgbe_probe(struct platform_device *pdev) /* Set the DMA mask */ if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask; - *(dev->dma_mask) = DMA_BIT_MASK(40); - dev->coherent_dma_mask = DMA_BIT_MASK(40); + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed\n"); + goto err_io; + } if (of_property_read_bool(dev->of_node, "dma-coherent")) { pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; -- cgit v1.2.1 From 88131a812b16b45cf999e577ad8d89b41ad606e3 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 5 Aug 2014 13:30:44 -0500 Subject: amd-xgbe: Perform phy connect/disconnect at dev open/stop A change added to the mdiobus/phy api added a module_get/module_put during phy connect/disconnect processing. Currently, the driver performs a phy connect during module probe and a phy disconnect during module remove. With the addition of the module_get during phy connect the amd-xgbe module use count is incremented and can no longer be unloaded. Move the phy connect/disconnect from the driver probe/remove functions to the net_device_ops ndo_open/ndo_stop functions. This allows the module use count to be decremented when the device(s) are brought down and allows the module to be unloaded. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 122 +++++++++++++++++++++++++++++- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 98 ------------------------ 2 files changed, 121 insertions(+), 99 deletions(-) (limited to 'drivers/net/ethernet/amd') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 3bf3c0194ad3..1f5487f4888c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -122,6 +122,7 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -521,6 +522,114 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) DBGPR("<--xgbe_free_rx_skbuff\n"); } +static void xgbe_adjust_link(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct phy_device *phydev = pdata->phydev; + int new_state = 0; + + if (phydev == NULL) + return; + + if (phydev->link) { + /* Flow control support */ + if (pdata->pause_autoneg) { + if (phydev->pause || phydev->asym_pause) { + pdata->tx_pause = 1; + pdata->rx_pause = 1; + } else { + pdata->tx_pause = 0; + pdata->rx_pause = 0; + } + } + + if (pdata->tx_pause != pdata->phy_tx_pause) { + hw_if->config_tx_flow_control(pdata); + pdata->phy_tx_pause = pdata->tx_pause; + } + + if (pdata->rx_pause != pdata->phy_rx_pause) { + hw_if->config_rx_flow_control(pdata); + pdata->phy_rx_pause = pdata->rx_pause; + } + + /* Speed support */ + if (phydev->speed != pdata->phy_speed) { + new_state = 1; + + switch (phydev->speed) { + case SPEED_10000: + hw_if->set_xgmii_speed(pdata); + break; + + case SPEED_2500: + hw_if->set_gmii_2500_speed(pdata); + break; + + case SPEED_1000: + hw_if->set_gmii_speed(pdata); + break; + } + pdata->phy_speed = phydev->speed; + } + + if (phydev->link != pdata->phy_link) { + new_state = 1; + pdata->phy_link = 1; + } + } else if (pdata->phy_link) { + new_state = 1; + pdata->phy_link = 0; + pdata->phy_speed = SPEED_UNKNOWN; + } + + if (new_state) + phy_print_status(phydev); +} + +static int xgbe_phy_init(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct phy_device *phydev = pdata->phydev; + int ret; + + pdata->phy_link = -1; + pdata->phy_speed = SPEED_UNKNOWN; + pdata->phy_tx_pause = pdata->tx_pause; + pdata->phy_rx_pause = pdata->rx_pause; + + ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link, + pdata->phy_mode); + if (ret) { + netdev_err(netdev, "phy_connect_direct failed\n"); + return ret; + } + + if (!phydev->drv || (phydev->drv->phy_id == 0)) { + netdev_err(netdev, "phy_id not valid\n"); + ret = -ENODEV; + goto err_phy_connect; + } + DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n", + dev_name(&phydev->dev), phydev->link); + + return 0; + +err_phy_connect: + phy_disconnect(phydev); + + return ret; +} + +static void xgbe_phy_exit(struct xgbe_prv_data *pdata) +{ + if (!pdata->phydev) + return; + + phy_disconnect(pdata->phydev); +} + int xgbe_powerdown(struct net_device *netdev, unsigned int caller) { struct xgbe_prv_data *pdata = netdev_priv(netdev); @@ -986,11 +1095,16 @@ static int xgbe_open(struct net_device *netdev) DBGPR("-->xgbe_open\n"); + /* Initialize the phy */ + ret = xgbe_phy_init(pdata); + if (ret) + return ret; + /* Enable the clocks */ ret = clk_prepare_enable(pdata->sysclk); if (ret) { netdev_alert(netdev, "dma clk_prepare_enable failed\n"); - return ret; + goto err_phy_init; } ret = clk_prepare_enable(pdata->ptpclk); @@ -1047,6 +1161,9 @@ err_ptpclk: err_sysclk: clk_disable_unprepare(pdata->sysclk); +err_phy_init: + xgbe_phy_exit(pdata); + return ret; } @@ -1077,6 +1194,9 @@ static int xgbe_close(struct net_device *netdev) clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->sysclk); + /* Release the phy */ + xgbe_phy_exit(pdata); + DBGPR("<--xgbe_close\n"); return 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index eecd360430a4..6d2221e023f4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -116,7 +116,6 @@ #include #include -#include #include #include #include @@ -158,77 +157,6 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg, return 0; } -static void xgbe_adjust_link(struct net_device *netdev) -{ - struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct phy_device *phydev = pdata->phydev; - int new_state = 0; - - if (phydev == NULL) - return; - - DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n", - phydev->addr, phydev->link, pdata->phy_link); - - if (phydev->link) { - /* Flow control support */ - if (pdata->pause_autoneg) { - if (phydev->pause || phydev->asym_pause) { - pdata->tx_pause = 1; - pdata->rx_pause = 1; - } else { - pdata->tx_pause = 0; - pdata->rx_pause = 0; - } - } - - if (pdata->tx_pause != pdata->phy_tx_pause) { - hw_if->config_tx_flow_control(pdata); - pdata->phy_tx_pause = pdata->tx_pause; - } - - if (pdata->rx_pause != pdata->phy_rx_pause) { - hw_if->config_rx_flow_control(pdata); - pdata->phy_rx_pause = pdata->rx_pause; - } - - /* Speed support */ - if (phydev->speed != pdata->phy_speed) { - new_state = 1; - - switch (phydev->speed) { - case SPEED_10000: - hw_if->set_xgmii_speed(pdata); - break; - - case SPEED_2500: - hw_if->set_gmii_2500_speed(pdata); - break; - - case SPEED_1000: - hw_if->set_gmii_speed(pdata); - break; - } - pdata->phy_speed = phydev->speed; - } - - if (phydev->link != pdata->phy_link) { - new_state = 1; - pdata->phy_link = 1; - } - } else if (pdata->phy_link) { - new_state = 1; - pdata->phy_link = 0; - pdata->phy_speed = SPEED_UNKNOWN; - } - - if (new_state) - phy_print_status(phydev); - - DBGPR_MDIO("<--xgbe_adjust_link\n"); -} - void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) { struct device *dev = pdata->dev; @@ -278,7 +206,6 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) int xgbe_mdio_register(struct xgbe_prv_data *pdata) { - struct net_device *netdev = pdata->netdev; struct device_node *phy_node; struct mii_bus *mii; struct phy_device *phydev; @@ -293,7 +220,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata) return -EINVAL; } - /* Register with the MDIO bus */ mii = mdiobus_alloc(); if (mii == NULL) { dev_err(pdata->dev, "mdiobus_alloc failed\n"); @@ -348,26 +274,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata) pdata->mii = mii; pdata->mdio_mmd = MDIO_MMD_PCS; - pdata->phy_link = -1; - pdata->phy_speed = SPEED_UNKNOWN; - pdata->phy_tx_pause = pdata->tx_pause; - pdata->phy_rx_pause = pdata->rx_pause; - - ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link, - pdata->phy_mode); - if (ret) { - netdev_err(netdev, "phy_connect_direct failed\n"); - goto err_phy_device; - } - - if (!phydev->drv || (phydev->drv->phy_id == 0)) { - netdev_err(netdev, "phy_id not valid\n"); - ret = -ENODEV; - goto err_phy_connect; - } - DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n", - dev_name(&phydev->dev), phydev->link); - phydev->autoneg = pdata->default_autoneg; if (phydev->autoneg == AUTONEG_DISABLE) { phydev->speed = pdata->default_speed; @@ -386,9 +292,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata) return 0; -err_phy_connect: - phy_disconnect(phydev); - err_phy_device: phy_device_free(phydev); @@ -408,7 +311,6 @@ void xgbe_mdio_unregister(struct xgbe_prv_data *pdata) { DBGPR("-->xgbe_mdio_unregister\n"); - phy_disconnect(pdata->phydev); pdata->phydev = NULL; module_put(pdata->phy_module); -- cgit v1.2.1