diff options
Diffstat (limited to 'net/core/dev.c')
| -rw-r--r-- | net/core/dev.c | 111 | 
1 files changed, 56 insertions, 55 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 2c277b8aba38..81befd0c2510 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5491,9 +5491,29 @@ static void flush_all_backlogs(void)  	put_online_cpus();  } +/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ +static void gro_normal_list(struct napi_struct *napi) +{ +	if (!napi->rx_count) +		return; +	netif_receive_skb_list_internal(&napi->rx_list); +	INIT_LIST_HEAD(&napi->rx_list); +	napi->rx_count = 0; +} + +/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, + * pass the whole batch up to the stack. + */ +static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) +{ +	list_add_tail(&skb->list, &napi->rx_list); +	if (++napi->rx_count >= gro_normal_batch) +		gro_normal_list(napi); +} +  INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));  INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); -static int napi_gro_complete(struct sk_buff *skb) +static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)  {  	struct packet_offload *ptype;  	__be16 type = skb->protocol; @@ -5526,7 +5546,8 @@ static int napi_gro_complete(struct sk_buff *skb)  	}  out: -	return netif_receive_skb_internal(skb); +	gro_normal_one(napi, skb); +	return NET_RX_SUCCESS;  }  static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, @@ -5539,7 +5560,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,  		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)  			return;  		skb_list_del_init(skb); -		napi_gro_complete(skb); +		napi_gro_complete(napi, skb);  		napi->gro_hash[index].count--;  	} @@ -5641,7 +5662,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)  	}  } -static void gro_flush_oldest(struct list_head *head) +static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)  {  	struct sk_buff *oldest; @@ -5657,7 +5678,7 @@ static void gro_flush_oldest(struct list_head *head)  	 * SKB to the chain.  	 */  	skb_list_del_init(oldest); -	napi_gro_complete(oldest); +	napi_gro_complete(napi, oldest);  }  INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *, @@ -5733,7 +5754,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff  	if (pp) {  		skb_list_del_init(pp); -		napi_gro_complete(pp); +		napi_gro_complete(napi, pp);  		napi->gro_hash[hash].count--;  	} @@ -5744,7 +5765,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff  		goto normal;  	if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { -		gro_flush_oldest(gro_head); +		gro_flush_oldest(napi, gro_head);  	} else {  		napi->gro_hash[hash].count++;  	} @@ -5802,26 +5823,6 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)  }  EXPORT_SYMBOL(gro_find_complete_by_type); -/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ -static void gro_normal_list(struct napi_struct *napi) -{ -	if (!napi->rx_count) -		return; -	netif_receive_skb_list_internal(&napi->rx_list); -	INIT_LIST_HEAD(&napi->rx_list); -	napi->rx_count = 0; -} - -/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, - * pass the whole batch up to the stack. - */ -static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) -{ -	list_add_tail(&skb->list, &napi->rx_list); -	if (++napi->rx_count >= gro_normal_batch) -		gro_normal_list(napi); -} -  static void napi_skb_free_stolen_head(struct sk_buff *skb)  {  	skb_dst_drop(skb); @@ -6200,8 +6201,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)  				 NAPIF_STATE_IN_BUSY_POLL)))  		return false; -	gro_normal_list(n); -  	if (n->gro_bitmask) {  		unsigned long timeout = 0; @@ -6217,6 +6216,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done)  			hrtimer_start(&n->timer, ns_to_ktime(timeout),  				      HRTIMER_MODE_REL_PINNED);  	} + +	gro_normal_list(n); +  	if (unlikely(!list_empty(&n->poll_list))) {  		/* If n->poll_list is not empty, we need to mask irqs */  		local_irq_save(flags); @@ -6548,8 +6550,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)  		goto out_unlock;  	} -	gro_normal_list(n); -  	if (n->gro_bitmask) {  		/* flush too old packets  		 * If HZ < 1000, flush all packets. @@ -6557,6 +6557,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)  		napi_gro_flush(n, HZ >= 1000);  	} +	gro_normal_list(n); +  	/* Some drivers may have called napi_schedule  	 * prior to exhausting their budget.  	 */ @@ -8194,6 +8196,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)  }  EXPORT_SYMBOL(__dev_set_mtu); +int dev_validate_mtu(struct net_device *dev, int new_mtu, +		     struct netlink_ext_ack *extack) +{ +	/* MTU must be positive, and in range */ +	if (new_mtu < 0 || new_mtu < dev->min_mtu) { +		NL_SET_ERR_MSG(extack, "mtu less than device minimum"); +		return -EINVAL; +	} + +	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { +		NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); +		return -EINVAL; +	} +	return 0; +} +  /**   *	dev_set_mtu_ext - Change maximum transfer unit   *	@dev: device @@ -8210,16 +8228,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,  	if (new_mtu == dev->mtu)  		return 0; -	/* MTU must be positive, and in range */ -	if (new_mtu < 0 || new_mtu < dev->min_mtu) { -		NL_SET_ERR_MSG(extack, "mtu less than device minimum"); -		return -EINVAL; -	} - -	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { -		NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); -		return -EINVAL; -	} +	err = dev_validate_mtu(dev, new_mtu, extack); +	if (err) +		return err;  	if (!netif_device_present(dev))  		return -ENODEV; @@ -9177,22 +9188,10 @@ static void netdev_unregister_lockdep_key(struct net_device *dev)  void netdev_update_lockdep_key(struct net_device *dev)  { -	struct netdev_queue *queue; -	int i; - -	lockdep_unregister_key(&dev->qdisc_xmit_lock_key);  	lockdep_unregister_key(&dev->addr_list_lock_key); - -	lockdep_register_key(&dev->qdisc_xmit_lock_key);  	lockdep_register_key(&dev->addr_list_lock_key);  	lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key); -	for (i = 0; i < dev->num_tx_queues; i++) { -		queue = netdev_get_tx_queue(dev, i); - -		lockdep_set_class(&queue->_xmit_lock, -				  &dev->qdisc_xmit_lock_key); -	}  }  EXPORT_SYMBOL(netdev_update_lockdep_key); @@ -9314,8 +9313,10 @@ int register_netdevice(struct net_device *dev)  		goto err_uninit;  	ret = netdev_register_kobject(dev); -	if (ret) +	if (ret) { +		dev->reg_state = NETREG_UNREGISTERED;  		goto err_uninit; +	}  	dev->reg_state = NETREG_REGISTERED;  	__netdev_update_features(dev); @@ -10165,7 +10166,7 @@ static struct hlist_head * __net_init netdev_create_hash(void)  static int __net_init netdev_init(struct net *net)  {  	BUILD_BUG_ON(GRO_HASH_BUCKETS > -		     8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask)); +		     8 * sizeof_field(struct napi_struct, gro_bitmask));  	if (net != &init_net)  		INIT_LIST_HEAD(&net->dev_base_head);  | 

