diff options
Diffstat (limited to 'drivers/net/ethernet/renesas')
| -rw-r--r-- | drivers/net/ethernet/renesas/ravb_main.c | 27 | ||||
| -rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.c | 74 | 
2 files changed, 54 insertions, 47 deletions
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 2b962d349f5f..009780df664b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)  	struct ravb_private *priv = netdev_priv(ndev);  	int ret = 0; -	if (priv->wol_enabled) { -		/* Reduce the usecount of the clock to zero and then -		 * restore it to its original value. This is done to force -		 * the clock to be re-enabled which is a workaround -		 * for renesas-cpg-mssr driver which do not enable clocks -		 * when resuming from PSCI suspend/resume. -		 * -		 * Without this workaround the driver fails to communicate -		 * with the hardware if WoL was enabled when the system -		 * entered PSCI suspend. This is due to that if WoL is enabled -		 * we explicitly keep the clock from being turned off when -		 * suspending, but in PSCI sleep power is cut so the clock -		 * is disabled anyhow, the clock driver is not aware of this -		 * so the clock is not turned back on when resuming. -		 * -		 * TODO: once the renesas-cpg-mssr suspend/resume is working -		 *       this clock dance should be removed. -		 */ -		clk_disable(priv->clk); -		clk_disable(priv->clk); -		clk_enable(priv->clk); -		clk_enable(priv->clk); - -		/* Set reset mode to rearm the WoL logic */ +	/* If WoL is enabled set reset mode to rearm the WoL logic */ +	if (priv->wol_enabled)  		ravb_write(ndev, CCC_OPC_RESET, CCC); -	}  	/* All register have been reset to default values.  	 * Restore all registers which where setup at probe time and diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 7e060aa9fbed..b9e2846589f8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {  	[FWNLCR0]	= 0x0090,  	[FWALCR0]	= 0x0094,  	[TXNLCR1]	= 0x00a0, -	[TXALCR1]	= 0x00a0, +	[TXALCR1]	= 0x00a4,  	[RXNLCR1]	= 0x00a8,  	[RXALCR1]	= 0x00ac,  	[FWNLCR1]	= 0x00b0, @@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {  	[FWNLCR0]	= 0x0090,  	[FWALCR0]	= 0x0094,  	[TXNLCR1]	= 0x00a0, -	[TXALCR1]	= 0x00a0, +	[TXALCR1]	= 0x00a4,  	[RXNLCR1]	= 0x00a8,  	[RXALCR1]	= 0x00ac,  	[FWNLCR1]	= 0x00b0, @@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)  			   entry, le32_to_cpu(txdesc->status));  		/* Free the original skb. */  		if (mdp->tx_skbuff[entry]) { -			dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), +			dma_unmap_single(&mdp->pdev->dev, +					 le32_to_cpu(txdesc->addr),  					 le32_to_cpu(txdesc->len) >> 16,  					 DMA_TO_DEVICE);  			dev_kfree_skb_irq(mdp->tx_skbuff[entry]); @@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)  			if (mdp->rx_skbuff[i]) {  				struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; -				dma_unmap_single(&ndev->dev, +				dma_unmap_single(&mdp->pdev->dev,  						 le32_to_cpu(rxdesc->addr),  						 ALIGN(mdp->rx_buf_sz, 32),  						 DMA_FROM_DEVICE);  			}  		}  		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; -		dma_free_coherent(NULL, ringsize, mdp->rx_ring, +		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,  				  mdp->rx_desc_dma);  		mdp->rx_ring = NULL;  	} @@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)  		sh_eth_tx_free(ndev, false);  		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; -		dma_free_coherent(NULL, ringsize, mdp->tx_ring, +		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,  				  mdp->tx_desc_dma);  		mdp->tx_ring = NULL;  	} @@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)  		/* The size of the buffer is a multiple of 32 bytes. */  		buf_len = ALIGN(mdp->rx_buf_sz, 32); -		dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, +		dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,  					  DMA_FROM_DEVICE); -		if (dma_mapping_error(&ndev->dev, dma_addr)) { +		if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {  			kfree_skb(skb);  			break;  		} @@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)  	/* Allocate all Rx descriptors. */  	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; -	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, -					  GFP_KERNEL); +	mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, +					  &mdp->rx_desc_dma, GFP_KERNEL);  	if (!mdp->rx_ring)  		goto ring_free; @@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)  	/* Allocate all Tx descriptors. */  	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; -	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, -					  GFP_KERNEL); +	mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, +					  &mdp->tx_desc_dma, GFP_KERNEL);  	if (!mdp->tx_ring)  		goto ring_free;  	return 0; @@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)  			mdp->rx_skbuff[entry] = NULL;  			if (mdp->cd->rpadir)  				skb_reserve(skb, NET_IP_ALIGN); -			dma_unmap_single(&ndev->dev, dma_addr, +			dma_unmap_single(&mdp->pdev->dev, dma_addr,  					 ALIGN(mdp->rx_buf_sz, 32),  					 DMA_FROM_DEVICE);  			skb_put(skb, pkt_len); @@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)  			if (skb == NULL)  				break;	/* Better luck next round. */  			sh_eth_set_receive_align(skb); -			dma_addr = dma_map_single(&ndev->dev, skb->data, +			dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,  						  buf_len, DMA_FROM_DEVICE); -			if (dma_mapping_error(&ndev->dev, dma_addr)) { +			if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {  				kfree_skb(skb);  				break;  			} @@ -1891,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)  		return PTR_ERR(phydev);  	} +	/* mask with MAC supported features */ +	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) { +		int err = phy_set_max_speed(phydev, SPEED_100); +		if (err) { +			netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n"); +			phy_disconnect(phydev); +			return err; +		} +	} +  	phy_attached_info(phydev);  	return 0; @@ -2441,9 +2452,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)  	/* soft swap. */  	if (!mdp->cd->hw_swap)  		sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); -	dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, +	dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,  				  DMA_TO_DEVICE); -	if (dma_mapping_error(&ndev->dev, dma_addr)) { +	if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {  		kfree_skb(skb);  		return NETDEV_TX_OK;  	} @@ -3214,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)  	/* ioremap the TSU registers */  	if (mdp->cd->tsu) {  		struct resource *rtsu; +  		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); -		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); -		if (IS_ERR(mdp->tsu_addr)) { -			ret = PTR_ERR(mdp->tsu_addr); +		if (!rtsu) { +			dev_err(&pdev->dev, "no TSU resource\n"); +			ret = -ENODEV; +			goto out_release; +		} +		/* We can only request the  TSU region  for the first port +		 * of the two  sharing this TSU for the probe to succeed... +		 */ +		if (devno % 2 == 0 && +		    !devm_request_mem_region(&pdev->dev, rtsu->start, +					     resource_size(rtsu), +					     dev_name(&pdev->dev))) { +			dev_err(&pdev->dev, "can't request TSU resource.\n"); +			ret = -EBUSY; +			goto out_release; +		} +		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, +					     resource_size(rtsu)); +		if (!mdp->tsu_addr) { +			dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); +			ret = -ENOMEM;  			goto out_release;  		}  		mdp->port = devno % 2;  		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;  	} -	/* initialize first or needed device */ -	if (!devno || pd->needs_init) { +	/* Need to init only the first port of the two sharing a TSU */ +	if (devno % 2 == 0) {  		if (mdp->cd->chip_reset)  			mdp->cd->chip_reset(ndev);  | 

