summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c250
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.h6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c16
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c8
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c40
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
16 files changed, 152 insertions, 220 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 8c81992fa6db..e4a73158fc7f 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)
/*
* Called by c2_probe to initialize the RNIC. This principally
- * involves initalizing the various limits and resouce pools that
+ * involves initializing the various limits and resource pools that
* comprise the RNIC instance.
*/
int __devinit c2_rnic_init(struct c2_dev *c2dev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 77b6b182778a..aaf88ef9409c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
* T3A does 3 things when a TERM is received:
* 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
* 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcde cqe into the associated CQ.
+ * 3) post a TERMINATE opcode cqe into the associated CQ.
*
* For (1), we save the message in the qp for later consumer consumption.
* For (2), we move the QP into TERMINATE, post a QP event and disconnect.
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 51f42061dae9..6cfd4d8fd0bd 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1361,11 +1361,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct tid_info *t = dev->rdev.lldi.tids;
ep = lookup_tid(t, tid);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (!ep) {
printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
return 0;
}
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 53589000fd07..8615d7cf7e01 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -42,6 +42,7 @@
*/
#include <linux/slab.h>
+#include <linux/smpboot.h>
#include "ehca_classes.h"
#include "ehca_irq.h"
@@ -652,7 +653,7 @@ void ehca_tasklet_eq(unsigned long data)
ehca_process_eq((struct ehca_shca*)data, 1);
}
-static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
+static int find_next_online_cpu(struct ehca_comp_pool *pool)
{
int cpu;
unsigned long flags;
@@ -662,17 +663,20 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
ehca_dmp(cpu_online_mask, cpumask_size(), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags);
- cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_online_mask);
- pool->last_cpu = cpu;
+ do {
+ cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_online_mask);
+ pool->last_cpu = cpu;
+ } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
return cpu;
}
static void __queue_comp_task(struct ehca_cq *__cq,
- struct ehca_cpu_comp_task *cct)
+ struct ehca_cpu_comp_task *cct,
+ struct task_struct *thread)
{
unsigned long flags;
@@ -683,7 +687,7 @@ static void __queue_comp_task(struct ehca_cq *__cq,
__cq->nr_callbacks++;
list_add_tail(&__cq->entry, &cct->cq_list);
cct->cq_jobs++;
- wake_up(&cct->wait_queue);
+ wake_up_process(thread);
} else
__cq->nr_callbacks++;
@@ -695,6 +699,7 @@ static void queue_comp_task(struct ehca_cq *__cq)
{
int cpu_id;
struct ehca_cpu_comp_task *cct;
+ struct task_struct *thread;
int cq_jobs;
unsigned long flags;
@@ -702,7 +707,8 @@ static void queue_comp_task(struct ehca_cq *__cq)
BUG_ON(!cpu_online(cpu_id));
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
- BUG_ON(!cct);
+ thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
+ BUG_ON(!cct || !thread);
spin_lock_irqsave(&cct->task_lock, flags);
cq_jobs = cct->cq_jobs;
@@ -710,28 +716,25 @@ static void queue_comp_task(struct ehca_cq *__cq)
if (cq_jobs > 0) {
cpu_id = find_next_online_cpu(pool);
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
- BUG_ON(!cct);
+ thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
+ BUG_ON(!cct || !thread);
}
-
- __queue_comp_task(__cq, cct);
+ __queue_comp_task(__cq, cct, thread);
}
static void run_comp_task(struct ehca_cpu_comp_task *cct)
{
struct ehca_cq *cq;
- unsigned long flags;
-
- spin_lock_irqsave(&cct->task_lock, flags);
while (!list_empty(&cct->cq_list)) {
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
- spin_unlock_irqrestore(&cct->task_lock, flags);
+ spin_unlock_irq(&cct->task_lock);
comp_event_callback(cq);
if (atomic_dec_and_test(&cq->nr_events))
wake_up(&cq->wait_completion);
- spin_lock_irqsave(&cct->task_lock, flags);
+ spin_lock_irq(&cct->task_lock);
spin_lock(&cq->task_lock);
cq->nr_callbacks--;
if (!cq->nr_callbacks) {
@@ -740,159 +743,76 @@ static void run_comp_task(struct ehca_cpu_comp_task *cct)
}
spin_unlock(&cq->task_lock);
}
-
- spin_unlock_irqrestore(&cct->task_lock, flags);
}
-static int comp_task(void *__cct)
+static void comp_task_park(unsigned int cpu)
{
- struct ehca_cpu_comp_task *cct = __cct;
- int cql_empty;
- DECLARE_WAITQUEUE(wait, current);
-
- set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
- add_wait_queue(&cct->wait_queue, &wait);
-
- spin_lock_irq(&cct->task_lock);
- cql_empty = list_empty(&cct->cq_list);
- spin_unlock_irq(&cct->task_lock);
- if (cql_empty)
- schedule();
- else
- __set_current_state(TASK_RUNNING);
-
- remove_wait_queue(&cct->wait_queue, &wait);
+ struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+ struct ehca_cpu_comp_task *target;
+ struct task_struct *thread;
+ struct ehca_cq *cq, *tmp;
+ LIST_HEAD(list);
- spin_lock_irq(&cct->task_lock);
- cql_empty = list_empty(&cct->cq_list);
- spin_unlock_irq(&cct->task_lock);
- if (!cql_empty)
- run_comp_task(__cct);
+ spin_lock_irq(&cct->task_lock);
+ cct->cq_jobs = 0;
+ cct->active = 0;
+ list_splice_init(&cct->cq_list, &list);
+ spin_unlock_irq(&cct->task_lock);
- set_current_state(TASK_INTERRUPTIBLE);
+ cpu = find_next_online_cpu(pool);
+ target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+ thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
+ spin_lock_irq(&target->task_lock);
+ list_for_each_entry_safe(cq, tmp, &list, entry) {
+ list_del(&cq->entry);
+ __queue_comp_task(cq, target, thread);
}
- __set_current_state(TASK_RUNNING);
-
- return 0;
-}
-
-static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
- int cpu)
-{
- struct ehca_cpu_comp_task *cct;
-
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- spin_lock_init(&cct->task_lock);
- INIT_LIST_HEAD(&cct->cq_list);
- init_waitqueue_head(&cct->wait_queue);
- cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu),
- "ehca_comp/%d", cpu);
-
- return cct->task;
+ spin_unlock_irq(&target->task_lock);
}
-static void destroy_comp_task(struct ehca_comp_pool *pool,
- int cpu)
+static void comp_task_stop(unsigned int cpu, bool online)
{
- struct ehca_cpu_comp_task *cct;
- struct task_struct *task;
- unsigned long flags_cct;
-
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
- spin_lock_irqsave(&cct->task_lock, flags_cct);
+ struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- task = cct->task;
- cct->task = NULL;
+ spin_lock_irq(&cct->task_lock);
cct->cq_jobs = 0;
-
- spin_unlock_irqrestore(&cct->task_lock, flags_cct);
-
- if (task)
- kthread_stop(task);
+ cct->active = 0;
+ WARN_ON(!list_empty(&cct->cq_list));
+ spin_unlock_irq(&cct->task_lock);
}
-static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
+static int comp_task_should_run(unsigned int cpu)
{
struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- LIST_HEAD(list);
- struct ehca_cq *cq;
- unsigned long flags_cct;
-
- spin_lock_irqsave(&cct->task_lock, flags_cct);
-
- list_splice_init(&cct->cq_list, &list);
-
- while (!list_empty(&list)) {
- cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
-
- list_del(&cq->entry);
- __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
- }
-
- spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+ return cct->cq_jobs;
}
-static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static void comp_task(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- struct ehca_cpu_comp_task *cct;
+ struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
+ int cql_empty;
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
- if (!create_comp_task(pool, cpu)) {
- ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
- return notifier_from_errno(-ENOMEM);
- }
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- kthread_bind(cct->task, cpumask_any(cpu_online_mask));
- destroy_comp_task(pool, cpu);
- break;
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- kthread_bind(cct->task, cpu);
- wake_up_process(cct->task);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
- break;
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
- destroy_comp_task(pool, cpu);
- take_over_work(pool, cpu);
- break;
+ spin_lock_irq(&cct->task_lock);
+ cql_empty = list_empty(&cct->cq_list);
+ if (!cql_empty) {
+ __set_current_state(TASK_RUNNING);
+ run_comp_task(cct);
}
-
- return NOTIFY_OK;
+ spin_unlock_irq(&cct->task_lock);
}
-static struct notifier_block comp_pool_callback_nb __cpuinitdata = {
- .notifier_call = comp_pool_callback,
- .priority = 0,
+static struct smp_hotplug_thread comp_pool_threads = {
+ .thread_should_run = comp_task_should_run,
+ .thread_fn = comp_task,
+ .thread_comm = "ehca_comp/%u",
+ .cleanup = comp_task_stop,
+ .park = comp_task_park,
};
int ehca_create_comp_pool(void)
{
- int cpu;
- struct task_struct *task;
+ int cpu, ret = -ENOMEM;
if (!ehca_scaling_code)
return 0;
@@ -905,38 +825,46 @@ int ehca_create_comp_pool(void)
pool->last_cpu = cpumask_any(cpu_online_mask);
pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
- if (pool->cpu_comp_tasks == NULL) {
- kfree(pool);
- return -EINVAL;
- }
+ if (!pool->cpu_comp_tasks)
+ goto out_pool;
- for_each_online_cpu(cpu) {
- task = create_comp_task(pool, cpu);
- if (task) {
- kthread_bind(task, cpu);
- wake_up_process(task);
- }
+ pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
+ if (!pool->cpu_comp_threads)
+ goto out_tasks;
+
+ for_each_present_cpu(cpu) {
+ struct ehca_cpu_comp_task *cct;
+
+ cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+ spin_lock_init(&cct->task_lock);
+ INIT_LIST_HEAD(&cct->cq_list);
}
- register_hotcpu_notifier(&comp_pool_callback_nb);
+ comp_pool_threads.store = pool->cpu_comp_threads;
+ ret = smpboot_register_percpu_thread(&comp_pool_threads);
+ if (ret)
+ goto out_threads;
- printk(KERN_INFO "eHCA scaling code enabled\n");
+ pr_info("eHCA scaling code enabled\n");
+ return ret;
- return 0;
+out_threads:
+ free_percpu(pool->cpu_comp_threads);
+out_tasks:
+ free_percpu(pool->cpu_comp_tasks);
+out_pool:
+ kfree(pool);
+ return ret;
}
void ehca_destroy_comp_pool(void)
{
- int i;
-
if (!ehca_scaling_code)
return;
- unregister_hotcpu_notifier(&comp_pool_callback_nb);
-
- for_each_online_cpu(i)
- destroy_comp_task(pool, i);
+ smpboot_unregister_percpu_thread(&comp_pool_threads);
+ free_percpu(pool->cpu_comp_threads);
free_percpu(pool->cpu_comp_tasks);
kfree(pool);
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h
index 3346cb06cea6..5370199f08c7 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.h
+++ b/drivers/infiniband/hw/ehca/ehca_irq.h
@@ -58,15 +58,15 @@ void ehca_tasklet_eq(unsigned long data);
void ehca_process_eq(struct ehca_shca *shca, int is_irq);
struct ehca_cpu_comp_task {
- wait_queue_head_t wait_queue;
struct list_head cq_list;
- struct task_struct *task;
spinlock_t task_lock;
int cq_jobs;
+ int active;
};
struct ehca_comp_pool {
- struct ehca_cpu_comp_task *cpu_comp_tasks;
+ struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
+ struct task_struct * __percpu *cpu_comp_threads;
int last_cpu;
spinlock_t last_cpu_lock;
};
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c27141fef1ab..9c2ae7efd00f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
{
struct ib_ah *new_ah;
struct ib_ah_attr ah_attr;
+ unsigned long flags;
if (!dev->send_agent[port_num - 1][0])
return;
@@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
if (IS_ERR(new_ah))
return;
- spin_lock(&dev->sm_lock);
+ spin_lock_irqsave(&dev->sm_lock, flags);
if (dev->sm_ah[port_num - 1])
ib_destroy_ah(dev->sm_ah[port_num - 1]);
dev->sm_ah[port_num - 1] = new_ah;
- spin_unlock(&dev->sm_lock);
+ spin_unlock_irqrestore(&dev->sm_lock, flags);
}
/*
@@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
static void node_desc_override(struct ib_device *dev,
struct ib_mad *mad)
{
+ unsigned long flags;
+
if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
- spin_lock(&to_mdev(dev)->sm_lock);
+ spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
- spin_unlock(&to_mdev(dev)->sm_lock);
+ spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
}
}
@@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
struct ib_mad_send_buf *send_buf;
struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
int ret;
+ unsigned long flags;
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
@@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
* wrong following the IB spec strictly, but we know
* it's OK for our devices).
*/
- spin_lock(&dev->sm_lock);
+ spin_lock_irqsave(&dev->sm_lock, flags);
memcpy(send_buf->mad, mad, sizeof *mad);
if ((send_buf->ah = dev->sm_ah[port_num - 1]))
ret = ib_post_send_mad(send_buf, NULL);
else
ret = -EINVAL;
- spin_unlock(&dev->sm_lock);
+ spin_unlock_irqrestore(&dev->sm_lock, flags);
if (ret)
ib_free_send_mad(send_buf);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index fe2088cfa6ee..cc05579ebce7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props)
{
struct mlx4_cmd_mailbox *mailbox;
+ unsigned long flags;
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
return -EOPNOTSUPP;
@@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
return 0;
- spin_lock(&to_mdev(ibdev)->sm_lock);
+ spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
memcpy(ibdev->node_desc, props->node_desc, 64);
- spin_unlock(&to_mdev(ibdev)->sm_lock);
+ spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
/*
* If possible, pass node desc to FW, so it can generate
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a6d8ea060ea8..f585eddef4b7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+ struct net_device *ndev;
union ib_gid sgid;
u16 pkey;
int send_size;
@@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
/* FIXME: cache smac value? */
- smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+ ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
+ if (!ndev)
+ return -ENODEV;
+ smac = ndev->dev_addr;
memcpy(sqp->ud_header.eth.smac_h, smac, 6);
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 4fa3534ec233..74c6a9426047 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -241,16 +241,16 @@ good:
if (hca_pcie_cap) {
devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4];
- if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL,
- devctl)) {
+ if (pcie_capability_write_word(mdev->pdev, PCI_EXP_DEVCTL,
+ devctl)) {
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Device Control register, aborting.\n");
goto out;
}
linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
- if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL,
- linkctl)) {
+ if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL,
+ linkctl)) {
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Link control register, aborting.\n");
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 5a044526e4f4..c4e0131f1b57 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -161,7 +161,7 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
ocrdma_get_guid(dev, &sgid->raw[8]);
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{
struct net_device *netdev, *tmp;
@@ -202,14 +202,13 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
return 0;
}
-#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q)
+#if IS_ENABLED(CONFIG_IPV6)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
- struct net_device *event_netdev = ifa->idev->dev;
- struct net_device *netdev = NULL;
+ struct net_device *netdev = ifa->idev->dev;
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
@@ -217,11 +216,12 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
bool is_vlan = false;
u16 vid = 0;
- netdev = vlan_dev_real_dev(event_netdev);
- if (netdev != event_netdev) {
- is_vlan = true;
- vid = vlan_dev_vlan_id(event_netdev);
+ is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
+ if (is_vlan) {
+ vid = vlan_dev_vlan_id(netdev);
+ netdev = vlan_dev_real_dev(netdev);
}
+
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
if (dev->nic_info.netdev == netdev) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index cb5b7f7d4d38..b29a4246ef41 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -2219,7 +2219,6 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
u32 wqe_idx;
if (!qp->wqe_wr_id_tbl[tail].signaled) {
- expand = true; /* CQE cannot be consumed yet */
*polled = false; /* WC cannot be consumed yet */
} else {
ibwc->status = IB_WC_SUCCESS;
@@ -2227,10 +2226,11 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
ibwc->qp = &qp->ibqp;
ocrdma_update_wc(qp, ibwc, tail);
*polled = true;
- wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
- if (tail != wqe_idx)
- expand = true; /* Coalesced CQE can't be consumed yet */
}
+ wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
+ if (tail != wqe_idx)
+ expand = true; /* Coalesced CQE can't be consumed yet */
+
ocrdma_hwq_inc_tail(&qp->sq);
return expand;
}
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 7b1b86690024..4d11575c2010 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -87,7 +87,7 @@ struct qlogic_ib_stats {
};
extern struct qlogic_ib_stats qib_stats;
-extern struct pci_error_handlers qib_pci_err_handler;
+extern const struct pci_error_handlers qib_pci_err_handler;
extern struct pci_driver qib_driver;
#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 0d7280af99bc..3f6b21e9dc11 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6346,8 +6346,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
dd->piobcnt4k * dd->align4k;
dd->piovl15base = ioremap_nocache(vl15off,
NUM_VL15_BUFS * dd->align4k);
- if (!dd->piovl15base)
+ if (!dd->piovl15base) {
+ ret = -ENOMEM;
goto bail;
+ }
}
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 19f1e6c45fb6..ccb119143d20 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -471,9 +471,10 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (port_num != port) {
ibp = to_iport(ibdev, port_num);
ret = check_mkey(ibp, smp, 0);
- if (ret)
+ if (ret) {
ret = IB_MAD_RESULT_FAILURE;
goto bail;
+ }
}
}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 062c301ebf53..c574ec7c85e6 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -273,10 +273,9 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
struct qib_msix_entry *entry)
{
u16 linkstat, speed;
- int pos = 0, pose, ret = 1;
+ int pos = 0, ret = 1;
- pose = pci_pcie_cap(dd->pcidev);
- if (!pose) {
+ if (!pci_is_pcie(dd->pcidev)) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
dd->lbus_width = 1;
@@ -298,7 +297,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
if (!pos)
qib_enable_intx(dd->pcidev);
- pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
/*
* speed is bits 0-3, linkwidth is bits 4-8
* no defines for them in headers
@@ -516,7 +515,6 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
{
int r;
struct pci_dev *parent;
- int ppos;
u16 devid;
u32 mask, bits, val;
@@ -529,8 +527,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
qib_devinfo(dd->pcidev, "Parent not root\n");
return 1;
}
- ppos = pci_pcie_cap(parent);
- if (!ppos)
+ if (!pci_is_pcie(parent))
return 1;
if (parent->vendor != 0x8086)
return 1;
@@ -587,7 +584,6 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
{
int ret = 1; /* Assume the worst */
struct pci_dev *parent;
- int ppos, epos;
u16 pcaps, pctl, ecaps, ectl;
int rc_sup, ep_sup;
int rc_cur, ep_cur;
@@ -598,19 +594,15 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
qib_devinfo(dd->pcidev, "Parent not root\n");
goto bail;
}
- ppos = pci_pcie_cap(parent);
- if (ppos) {
- pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
- pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
- } else
+
+ if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
goto bail;
+ pcie_capability_read_word(parent, PCI_EXP_DEVCAP, &pcaps);
+ pcie_capability_read_word(parent, PCI_EXP_DEVCTL, &pctl);
/* Find out supported and configured values for endpoint (us) */
- epos = pci_pcie_cap(dd->pcidev);
- if (epos) {
- pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
- pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
- } else
- goto bail;
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCAP, &ecaps);
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+
ret = 0;
/* Find max payload supported by root, endpoint */
rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
@@ -629,14 +621,14 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
rc_cur = rc_sup;
pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
- pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+ pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
}
/* If less than (allowed, supported), bump endpoint payload */
if (rc_sup > ep_cur) {
ep_cur = rc_sup;
ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
- pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+ pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
}
/*
@@ -654,13 +646,13 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
rc_cur = rc_sup;
pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
- pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+ pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
}
if (rc_sup > ep_cur) {
ep_cur = rc_sup;
ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
- pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+ pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
}
bail:
return ret;
@@ -753,7 +745,7 @@ qib_pci_resume(struct pci_dev *pdev)
qib_init(dd, 1); /* same as re-init after reset */
}
-struct pci_error_handlers qib_pci_err_handler = {
+const struct pci_error_handlers qib_pci_err_handler = {
.error_detected = qib_pci_error_detected,
.mmio_enabled = qib_pci_mmio_enabled,
.link_reset = qib_pci_link_reset,
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index a322d5171a2c..50a8a0d4fe67 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -372,7 +372,7 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
/* Read CTRL reg for each channel to check TRIMDONE */
if (baduns & (1 << chn)) {
qib_dev_err(dd,
- "Reseting TRIMDONE on chn %d (%s)\n",
+ "Resetting TRIMDONE on chn %d (%s)\n",
chn, where);
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
OpenPOWER on IntegriCloud