diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2018-07-23 15:21:39 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2018-07-23 15:21:39 +1000 |
commit | d5e748ff2b996d83489ac76c072e8b99f9ecef13 (patch) | |
tree | e2dfdf187d2200898be728b97a31843f3fc26808 /drivers/net/ethernet/cavium | |
parent | fea9cf321c916e9372874e6f2af1bf0b5beb89fb (diff) | |
parent | a7ca13826e478f9b201eb2f9f20de0b978a82ad9 (diff) | |
download | talos-op-linux-d5e748ff2b996d83489ac76c072e8b99f9ecef13.tar.gz talos-op-linux-d5e748ff2b996d83489ac76c072e8b99f9ecef13.zip |
Merge remote-tracking branch 'gpio/ib-aspeed' into upstream-ready
Merge the GPIO tree "ib-aspeed" topic branch which contains pre-requisites
for subsequent changes. This branch is also in gpio "next".
Diffstat (limited to 'drivers/net/ethernet/cavium')
5 files changed, 47 insertions, 23 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index f044718cea52..a71dbb7ab6af 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -281,13 +281,12 @@ int octeon_init_droq(struct octeon_device *oct, droq->max_count); droq->recv_buf_list = (struct octeon_recv_buffer *) - vzalloc_node(droq->max_count * - OCT_DROQ_RECVBUF_SIZE, - numa_node); + vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE), + numa_node); if (!droq->recv_buf_list) droq->recv_buf_list = (struct octeon_recv_buffer *) - vzalloc(droq->max_count * - OCT_DROQ_RECVBUF_SIZE); + vzalloc(array_size(droq->max_count, + OCT_DROQ_RECVBUF_SIZE)); if (!droq->recv_buf_list) { dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); goto init_droq_fail; diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index b1270355b0b1..1f2e75da28f8 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -98,8 +98,9 @@ int octeon_init_instr_queue(struct octeon_device *oct, iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), numa_node); if (!iq->request_list) - iq->request_list = vmalloc(sizeof(*iq->request_list) * - num_descs); + iq->request_list = + vmalloc(array_size(num_descs, + sizeof(*iq->request_list))); if (!iq->request_list) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 448d1fafc827..f4d81765221e 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -325,6 +325,8 @@ struct nicvf { struct tasklet_struct qs_err_task; struct work_struct reset_task; struct nicvf_work rx_mode_work; + /* spinlock to protect workqueue arguments from concurrent access */ + spinlock_t rx_mode_wq_lock; /* PTP timestamp */ struct cavium_ptp *ptp_clock; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 7135db45927e..135766c4296b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1923,17 +1923,12 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) } } -static void nicvf_set_rx_mode_task(struct work_struct *work_arg) +static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, + struct nicvf *nic) { - struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, - work.work); - struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); union nic_mbx mbx = {}; int idx; - if (!vf_work) - return; - /* From the inside of VM code flow we have only 128 bits memory * available to send message to host's PF, so send all mc addrs * one by one, starting from flush command in case if kernel @@ -1944,7 +1939,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; nicvf_send_msg_to_pf(nic, &mbx); - if (vf_work->mode & BGX_XCAST_MCAST_FILTER) { + if (mode & BGX_XCAST_MCAST_FILTER) { /* once enabling filtering, we need to signal to PF to add * its' own LMAC to the filter to accept packets for it. */ @@ -1954,23 +1949,46 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) } /* check if we have any specific MACs to be added to PF DMAC filter */ - if (vf_work->mc) { + if (mc_addrs) { /* now go through kernel list of MACs and add them one by one */ - for (idx = 0; idx < vf_work->mc->count; idx++) { + for (idx = 0; idx < mc_addrs->count; idx++) { mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; - mbx.xcast.data.mac = vf_work->mc->mc[idx]; + mbx.xcast.data.mac = mc_addrs->mc[idx]; nicvf_send_msg_to_pf(nic, &mbx); } - kfree(vf_work->mc); + kfree(mc_addrs); } /* and finally set rx mode for PF accordingly */ mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; - mbx.xcast.data.mode = vf_work->mode; + mbx.xcast.data.mode = mode; nicvf_send_msg_to_pf(nic, &mbx); } +static void nicvf_set_rx_mode_task(struct work_struct *work_arg) +{ + struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, + work.work); + struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); + u8 mode; + struct xcast_addr_list *mc; + + if (!vf_work) + return; + + /* Save message data locally to prevent them from + * being overwritten by next ndo_set_rx_mode call(). + */ + spin_lock(&nic->rx_mode_wq_lock); + mode = vf_work->mode; + mc = vf_work->mc; + vf_work->mc = NULL; + spin_unlock(&nic->rx_mode_wq_lock); + + __nicvf_set_rx_mode_task(mode, mc, nic); +} + static void nicvf_set_rx_mode(struct net_device *netdev) { struct nicvf *nic = netdev_priv(netdev); @@ -2004,9 +2022,12 @@ static void nicvf_set_rx_mode(struct net_device *netdev) } } } + spin_lock(&nic->rx_mode_wq_lock); + kfree(nic->rx_mode_work.mc); nic->rx_mode_work.mc = mc_list; nic->rx_mode_work.mode = mode; - queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ); + queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); + spin_unlock(&nic->rx_mode_wq_lock); } static const struct net_device_ops nicvf_netdev_ops = { @@ -2163,6 +2184,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->reset_task, nicvf_reset_task); INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); + spin_lock_init(&nic->rx_mode_wq_lock); err = register_netdev(netdev); if (err) { diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d42704d07484..187a249ff2d1 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -292,8 +292,8 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, rbdr->is_xdp = true; } rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt); - rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) * - rbdr->pgcnt, GFP_KERNEL); + rbdr->pgcache = kcalloc(rbdr->pgcnt, sizeof(*rbdr->pgcache), + GFP_KERNEL); if (!rbdr->pgcache) return -ENOMEM; rbdr->pgidx = 0; |