summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c')
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c453
1 files changed, 333 insertions, 120 deletions
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ccdd47f3b8fb..72133cbe55d4 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -292,6 +292,26 @@ static void mvpp2_txq_inc_put(struct mvpp2_port *port,
txq_pcpu->txq_put_index = 0;
}
+/* Get number of maximum RXQ */
+static int mvpp2_get_nrxqs(struct mvpp2 *priv)
+{
+ unsigned int nrxqs;
+
+ if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ return 1;
+
+ /* According to the PPv2.2 datasheet and our experiments on
+ * PPv2.1, RX queues have an allocation granularity of 4 (when
+ * more than a single one on PPv2.2).
+ * Round up to nearest multiple of 4.
+ */
+ nrxqs = (num_possible_cpus() + 3) & ~0x3;
+ if (nrxqs > MVPP2_PORT_MAX_RXQ)
+ nrxqs = MVPP2_PORT_MAX_RXQ;
+
+ return nrxqs;
+}
+
/* Get number of physical egress port */
static inline int mvpp2_egress_port(struct mvpp2_port *port)
{
@@ -323,8 +343,7 @@ static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
/* Buffer Manager configuration routines */
/* Create pool */
-static int mvpp2_bm_pool_create(struct platform_device *pdev,
- struct mvpp2 *priv,
+static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool, int size)
{
u32 val;
@@ -343,7 +362,7 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
else
bm_pool->size_bytes = 2 * sizeof(u64) * size;
- bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
+ bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
&bm_pool->dma_addr,
GFP_KERNEL);
if (!bm_pool->virt_addr)
@@ -351,9 +370,9 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
MVPP2_BM_POOL_PTR_ALIGN)) {
- dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+ dma_free_coherent(dev, bm_pool->size_bytes,
bm_pool->virt_addr, bm_pool->dma_addr);
- dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+ dev_err(dev, "BM pool %d is not %d bytes aligned\n",
bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
return -ENOMEM;
}
@@ -468,15 +487,14 @@ static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_p
}
/* Cleanup pool */
-static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
- struct mvpp2 *priv,
+static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool)
{
int buf_num;
u32 val;
buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
- mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
+ mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
/* Check buffer counters after free */
buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
@@ -490,24 +508,26 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
val |= MVPP2_BM_STOP_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
- dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+ dma_free_coherent(dev, bm_pool->size_bytes,
bm_pool->virt_addr,
bm_pool->dma_addr);
return 0;
}
-static int mvpp2_bm_pools_init(struct platform_device *pdev,
- struct mvpp2 *priv)
+static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
{
- int i, err, size;
+ int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
struct mvpp2_bm_pool *bm_pool;
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
/* Create all pools with maximum size */
size = MVPP2_BM_POOL_SIZE_MAX;
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ for (i = 0; i < poolnum; i++) {
bm_pool = &priv->bm_pools[i];
bm_pool->id = i;
- err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
+ err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
if (err)
goto err_unroll_pools;
mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
@@ -515,17 +535,23 @@ static int mvpp2_bm_pools_init(struct platform_device *pdev,
return 0;
err_unroll_pools:
- dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
+ dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
for (i = i - 1; i >= 0; i--)
- mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
+ mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
return err;
}
-static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
+static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
- int i, err;
+ int i, err, poolnum = MVPP2_BM_POOLS_NUM;
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+ dev_info(dev, "using %d %s buffers\n", poolnum,
+ priv->percpu_pools ? "per-cpu" : "shared");
+
+ for (i = 0; i < poolnum; i++) {
/* Mask BM all interrupts */
mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
/* Clear BM cause register */
@@ -533,12 +559,12 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
}
/* Allocate and initialize BM pools */
- priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
+ priv->bm_pools = devm_kcalloc(dev, poolnum,
sizeof(*priv->bm_pools), GFP_KERNEL);
if (!priv->bm_pools)
return -ENOMEM;
- err = mvpp2_bm_pools_init(pdev, priv);
+ err = mvpp2_bm_pools_init(dev, priv);
if (err < 0)
return err;
return 0;
@@ -679,6 +705,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
phys_addr_t phys_addr;
void *buf;
+ if (port->priv->percpu_pools &&
+ bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+ netdev_err(port->dev,
+ "attempted to use jumbo frames with per-cpu pools");
+ return 0;
+ }
+
buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
@@ -722,7 +755,64 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;
- if (pool >= MVPP2_BM_POOLS_NUM) {
+ if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
+ (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
+ netdev_err(port->dev, "Invalid pool %d\n", pool);
+ return NULL;
+ }
+
+ /* Allocate buffers in case BM pool is used as long pool, but packet
+ * size doesn't match MTU or BM pool hasn't being used yet
+ */
+ if (new_pool->pkt_size == 0) {
+ int pkts_num;
+
+ /* Set default buffer number or free all the buffers in case
+ * the pool is not empty
+ */
+ pkts_num = new_pool->buf_num;
+ if (pkts_num == 0) {
+ if (port->priv->percpu_pools) {
+ if (pool < port->nrxqs)
+ pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
+ else
+ pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
+ } else {
+ pkts_num = mvpp2_pools[pool].buf_num;
+ }
+ } else {
+ mvpp2_bm_bufs_free(port->dev->dev.parent,
+ port->priv, new_pool, pkts_num);
+ }
+
+ new_pool->pkt_size = pkt_size;
+ new_pool->frag_size =
+ SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+ MVPP2_SKB_SHINFO_SIZE;
+
+ /* Allocate buffers for this pool */
+ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
+ if (num != pkts_num) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ new_pool->id, num, pkts_num);
+ return NULL;
+ }
+ }
+
+ mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
+ MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+ return new_pool;
+}
+
+static struct mvpp2_bm_pool *
+mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
+ unsigned int pool, int pkt_size)
+{
+ struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+ int num;
+
+ if (pool > port->nrxqs * 2) {
netdev_err(port->dev, "Invalid pool %d\n", pool);
return NULL;
}
@@ -738,7 +828,7 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
*/
pkts_num = new_pool->buf_num;
if (pkts_num == 0)
- pkts_num = mvpp2_pools[pool].buf_num;
+ pkts_num = mvpp2_pools[type].buf_num;
else
mvpp2_bm_bufs_free(port->dev->dev.parent,
port->priv, new_pool, pkts_num);
@@ -763,11 +853,11 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
return new_pool;
}
-/* Initialize pools for swf */
-static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+/* Initialize pools for swf, shared buffers variant */
+static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
{
- int rxq;
enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
+ int rxq;
/* If port pkt_size is higher than 1518B:
* HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
@@ -811,6 +901,47 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
return 0;
}
+/* Initialize pools for swf, percpu buffers variant */
+static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
+{
+ struct mvpp2_bm_pool *p;
+ int i;
+
+ for (i = 0; i < port->nrxqs; i++) {
+ p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
+ mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
+ if (!p)
+ return -ENOMEM;
+
+ port->priv->bm_pools[i].port_map |= BIT(port->id);
+ mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id);
+ }
+
+ for (i = 0; i < port->nrxqs; i++) {
+ p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
+ mvpp2_pools[MVPP2_BM_LONG].pkt_size);
+ if (!p)
+ return -ENOMEM;
+
+ port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id);
+ mvpp2_rxq_long_pool_set(port, i,
+ port->priv->bm_pools[i + port->nrxqs].id);
+ }
+
+ port->pool_long = NULL;
+ port->pool_short = NULL;
+
+ return 0;
+}
+
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+{
+ if (port->priv->percpu_pools)
+ return mvpp2_swf_bm_pool_init_percpu(port);
+ else
+ return mvpp2_swf_bm_pool_init_shared(port);
+}
+
static void mvpp2_set_hw_csum(struct mvpp2_port *port,
enum mvpp2_bm_pool_log_num new_long_pool)
{
@@ -837,6 +968,9 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
enum mvpp2_bm_pool_log_num new_long_pool;
int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+ if (port->priv->percpu_pools)
+ goto out_set;
+
/* If port MTU is higher than 1518B:
* HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
* else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
@@ -866,6 +1000,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
mvpp2_set_hw_csum(port, new_long_pool);
}
+out_set:
dev->mtu = mtu;
dev->wanted_features = dev->features;
@@ -979,7 +1114,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
/* Port configuration routines */
static bool mvpp2_is_xlg(phy_interface_t interface)
{
- return interface == PHY_INTERFACE_MODE_10GKR ||
+ return interface == PHY_INTERFACE_MODE_10GBASER ||
interface == PHY_INTERFACE_MODE_XAUI;
}
@@ -1065,7 +1200,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
if (port->gop_id != 0)
goto invalid_conf;
mvpp22_gop_init_10gkr(port);
@@ -1514,7 +1649,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
MAC_CLK_RESET_SD_TX;
@@ -2651,31 +2786,21 @@ handled:
return IRQ_HANDLED;
}
-static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
-{
- ktime_t interval;
-
- if (!port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
- hrtimer_start(&port_pcpu->tx_done_timer, interval,
- HRTIMER_MODE_REL_PINNED);
- }
-}
-
-static void mvpp2_tx_proc_cb(unsigned long data)
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
{
- struct net_device *dev = (struct net_device *)data;
- struct mvpp2_port *port = netdev_priv(dev);
+ struct net_device *dev;
+ struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
unsigned int tx_todo, cause;
- port_pcpu = per_cpu_ptr(port->pcpu,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
+ port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
+ dev = port_pcpu->dev;
if (!netif_running(dev))
- return;
+ return HRTIMER_NORESTART;
+
port_pcpu->timer_scheduled = false;
+ port = netdev_priv(dev);
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
@@ -2683,18 +2808,13 @@ static void mvpp2_tx_proc_cb(unsigned long data)
mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
- if (tx_todo)
- mvpp2_timer_set(port_pcpu);
-}
-
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
-{
- struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
- struct mvpp2_port_pcpu,
- tx_done_timer);
-
- tasklet_schedule(&port_pcpu->tx_done_tasklet);
+ if (tx_todo && !port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_forward_now(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ return HRTIMER_RESTART;
+ }
return HRTIMER_NORESTART;
}
@@ -2743,7 +2863,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
skb->ip_summed = CHECKSUM_NONE;
}
-/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+/* Allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int pool)
{
@@ -2751,7 +2871,6 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
phys_addr_t phys_addr;
void *buf;
- /* No recycle or too many buffers are in use, so allocate a new skb */
buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
GFP_ATOMIC);
if (!buf)
@@ -2837,14 +2956,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
* by the hardware, and the information about the buffer is
* comprised by the RX descriptor.
*/
- if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
-err_drop_frame:
- dev->stats.rx_errors++;
- mvpp2_rx_error(port, rx_desc);
- /* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
- continue;
- }
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
+ dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
+ rx_bytes + MVPP2_MH_SIZE,
+ DMA_FROM_DEVICE);
+ prefetch(data);
if (bm_pool->frag_size > PAGE_SIZE)
frag_size = 0;
@@ -2863,8 +2981,9 @@ err_drop_frame:
goto err_drop_frame;
}
- dma_unmap_single(dev->dev.parent, dma_addr,
- bm_pool->buf_size, DMA_FROM_DEVICE);
+ dma_unmap_single_attrs(dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2875,6 +2994,13 @@ err_drop_frame:
mvpp2_rx_csum(port, rx_status, skb);
napi_gro_receive(napi, skb);
+ continue;
+
+err_drop_frame:
+ dev->stats.rx_errors++;
+ mvpp2_rx_error(port, rx_desc);
+ /* Return the buffer to the pool */
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
if (rcvd_pkts) {
@@ -2923,14 +3049,15 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
- mvpp2_txdesc_size_set(port, tx_desc, frag->size);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
goto cleanup;
@@ -3181,7 +3308,12 @@ out:
txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
- mvpp2_timer_set(port_pcpu);
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS,
+ HRTIMER_MODE_REL_PINNED_SOFT);
+ }
}
if (test_bit(thread, &port->priv->lock_map))
@@ -3548,7 +3680,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
+ if (priv->hw_version == MVPP22 && port->link_irq) {
err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
dev->name, port);
if (err) {
@@ -3618,7 +3750,6 @@ static int mvpp2_stop(struct net_device *dev)
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
- tasklet_kill(&port_pcpu->tx_done_tasklet);
}
}
mvpp2_cleanup_rxqs(port);
@@ -3709,10 +3840,48 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
return err;
}
+/* Shut down all the ports, reconfigure the pools as percpu or shared,
+ * then bring up again all ports.
+ */
+static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
+{
+ int numbufs = MVPP2_BM_POOLS_NUM, i;
+ struct mvpp2_port *port = NULL;
+ bool status[MVPP2_MAX_PORTS];
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ status[i] = netif_running(port->dev);
+ if (status[i])
+ mvpp2_stop(port->dev);
+ }
+
+ /* nrxqs is the same for all ports */
+ if (priv->percpu_pools)
+ numbufs = port->nrxqs * 2;
+
+ for (i = 0; i < numbufs; i++)
+ mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
+
+ devm_kfree(port->dev->dev.parent, priv->bm_pools);
+ priv->percpu_pools = percpu;
+ mvpp2_bm_init(port->dev->dev.parent, priv);
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ mvpp2_swf_bm_pool_init(port);
+ if (status[i])
+ mvpp2_open(port->dev);
+ }
+
+ return 0;
+}
+
static int mvpp2_change_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
bool running = netif_running(dev);
+ struct mvpp2 *priv = port->priv;
int err;
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
@@ -3721,6 +3890,31 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
+ if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
+ if (priv->percpu_pools) {
+ netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
+ mvpp2_bm_switch_buffers(priv, false);
+ }
+ } else {
+ bool jumbo = false;
+ int i;
+
+ for (i = 0; i < priv->port_count; i++)
+ if (priv->port_list[i] != port &&
+ MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
+ MVPP2_BM_LONG_PKT_SIZE) {
+ jumbo = true;
+ break;
+ }
+
+ /* No port is using jumbo frames */
+ if (!jumbo) {
+ dev_info(port->dev->dev.parent,
+ "all ports have a low MTU, switching to per-cpu buffers");
+ mvpp2_bm_switch_buffers(priv, true);
+ }
+ }
+
if (running)
mvpp2_stop_dev(port);
@@ -4564,7 +4758,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
/* Invalid combinations */
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
if (port->gop_id != 0)
goto empty_set;
@@ -4586,7 +4780,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_NA:
if (port->gop_id == 0) {
@@ -4598,6 +4792,8 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 10000baseER_Full);
phylink_set(mask, 10000baseKR_Full);
}
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
/* Fall-through */
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4608,13 +4804,23 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
/* Fall-through */
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
+ if (port->comphy ||
+ state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (port->comphy ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
break;
default:
goto empty_set;
@@ -4623,14 +4829,16 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ phylink_helper_basex_speed(state);
return;
empty_set:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void mvpp22_xlg_link_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
+static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
{
u32 val;
@@ -4649,8 +4857,8 @@ static void mvpp22_xlg_link_state(struct mvpp2_port *port,
state->pause |= MLO_PAUSE_RX;
}
-static void mvpp2_gmac_link_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
+static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
{
u32 val;
@@ -4683,8 +4891,8 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port,
state->pause |= MLO_PAUSE_TX;
}
-static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct mvpp2_port *port = container_of(config, struct mvpp2_port,
phylink_config);
@@ -4694,13 +4902,12 @@ static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
- mvpp22_xlg_link_state(port, state);
- return 1;
+ mvpp22_xlg_pcs_get_state(port, state);
+ return;
}
}
- mvpp2_gmac_link_state(port, state);
- return 1;
+ mvpp2_gmac_pcs_get_state(port, state);
}
static void mvpp2_mac_an_restart(struct phylink_config *config)
@@ -4992,7 +5199,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.validate = mvpp2_phylink_validate,
- .mac_link_state = mvpp2_phylink_mac_link_state,
+ .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
.mac_an_restart = mvpp2_mac_an_restart,
.mac_config = mvpp2_mac_config,
.mac_link_up = mvpp2_mac_link_up,
@@ -5010,7 +5217,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node = to_of_node(port_fwnode);
netdev_features_t features;
struct net_device *dev;
- struct resource *res;
struct phylink *phylink;
char *mac_from = "";
unsigned int ntxqs, nrxqs, thread;
@@ -5028,18 +5234,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
ntxqs = MVPP2_MAX_TXQ;
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) {
- nrxqs = 1;
- } else {
- /* According to the PPv2.2 datasheet and our experiments on
- * PPv2.1, RX queues have an allocation granularity of 4 (when
- * more than a single one on PPv2.2).
- * Round up to nearest multiple of 4.
- */
- nrxqs = (num_possible_cpus() + 3) & ~0x3;
- if (nrxqs > MVPP2_PORT_MAX_RXQ)
- nrxqs = MVPP2_PORT_MAX_RXQ;
- }
+ nrxqs = mvpp2_get_nrxqs(priv);
dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
if (!dev)
@@ -5052,6 +5247,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_netdev;
}
+ /*
+ * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
+ * Existing usage of 10GBASE-KR is not correct; no backplane
+ * negotiation is done, and this driver does not actually support
+ * 10GBASE-KR.
+ */
+ if (phy_mode == PHY_INTERFACE_MODE_10GKR)
+ phy_mode = PHY_INTERFACE_MODE_10GBASER;
+
if (port_node) {
comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
if (IS_ERR(comphy)) {
@@ -5114,8 +5318,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->comphy = comphy;
if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
- port->base = devm_ioremap_resource(&pdev->dev, res);
+ port->base = devm_platform_ioremap_resource(pdev, 2 + id);
if (IS_ERR(port->base)) {
err = PTR_ERR(port->base);
goto err_free_irq;
@@ -5184,13 +5387,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
port_pcpu->timer_scheduled = false;
-
- tasklet_init(&port_pcpu->tx_done_tasklet,
- mvpp2_tx_proc_cb,
- (unsigned long)dev);
+ port_pcpu->dev = dev;
}
}
@@ -5205,7 +5405,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->features |= NETIF_F_NTUPLE;
}
- mvpp2_set_hw_csum(port, port->pool_long->id);
+ if (!port->priv->percpu_pools)
+ mvpp2_set_hw_csum(port, port->pool_long->id);
dev->vlan_features |= features;
dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
@@ -5233,6 +5434,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink = NULL;
}
+ /* Cycle the comphy to power it down, saving 270mW per port -
+ * don't worry about an error powering it up. When the comphy
+ * driver does this, we can remove this code.
+ */
+ if (port->comphy) {
+ err = mvpp22_comphy_init(port);
+ if (err == 0)
+ phy_power_off(port->comphy);
+ }
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
@@ -5497,7 +5708,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
/* Buffer Manager initialization */
- err = mvpp2_bm_init(pdev, priv);
+ err = mvpp2_bm_init(&pdev->dev, priv);
if (err < 0)
return err;
@@ -5544,14 +5755,12 @@ static int mvpp2_probe(struct platform_device *pdev)
if (priv->hw_version == MVPP21)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->lms_base))
return PTR_ERR(priv->lms_base);
} else {
@@ -5585,6 +5794,10 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->sysctrl_base = NULL;
}
+ if (priv->hw_version == MVPP22 &&
+ mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
+ priv->percpu_pools = 1;
+
mvpp2_setup_bm_pool();
@@ -5766,7 +5979,7 @@ static int mvpp2_remove(struct platform_device *pdev)
for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
- mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
+ mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
}
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
OpenPOWER on IntegriCloud