diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-26 12:13:57 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-26 12:13:57 -0700 |
commit | 4c171acc20794af16a27da25e11ec4e9cad5d9fa (patch) | |
tree | fb097384d709b7bda982902d999f658bb4f07b2c /drivers/infiniband | |
parent | 20e0ec119b2c6cc412addefbe169f4f5e38701e8 (diff) | |
parent | 8dc4abdf4c82d0e1c47f14b6615406d31975ea66 (diff) | |
download | blackbird-op-linux-4c171acc20794af16a27da25e11ec4e9cad5d9fa.tar.gz blackbird-op-linux-4c171acc20794af16a27da25e11ec4e9cad5d9fa.zip |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA/cma: Save PID of ID's owner
RDMA/cma: Add support for netlink statistics export
RDMA/cma: Pass QP type into rdma_create_id()
RDMA: Update exported headers list
RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h>
RDMA/nes: Add a check for strict_strtoul()
RDMA/cxgb3: Don't post zero-byte read if endpoint is going away
RDMA/cxgb4: Use completion objects for event blocking
IB/srp: Fix integer -> pointer cast warnings
IB: Add devnode methods to cm_class and umad_class
IB/mad: Return EPROTONOSUPPORT when an RDMA device lacks the QP required
IB/uverbs: Add devnode method to set path/mode
RDMA/ucma: Add .nodename/.mode to tell userspace where to create device node
RDMA: Add netlink infrastructure
RDMA: Add error handling to ib_core_init()
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/Kconfig | 1 | ||||
-rw-r--r-- | drivers/infiniband/core/Makefile | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/cm.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/core/cma.c | 308 | ||||
-rw-r--r-- | drivers/infiniband/core/device.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/core/netlink.c | 190 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 35 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_cm.c | 26 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/Kconfig | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 4 |
18 files changed, 505 insertions, 150 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 6e35eccc9caa..0f9a84c1046a 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -2,6 +2,7 @@ menuconfig INFINIBAND tristate "InfiniBand support" depends on PCI || BROKEN depends on HAS_IOMEM + depends on NET ---help--- Core support for InfiniBand (IB). Make sure to also select any protocols you wish to use as well as drivers for your diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index cb1ab3ea4998..c8bbaef1becb 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ $(user_access-y) ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ - device.o fmr_pool.o cache.o + device.o fmr_pool.o cache.o netlink.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_mad-y := mad.o smi.o agent.o mad_rmpp.o diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index f804e28e1ebb..f62f52fb9ece 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = { .release = cm_release_port_obj }; +static char *cm_devnode(struct device *dev, mode_t *mode) +{ + *mode = 0666; + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); +} + struct class cm_class = { + .owner = THIS_MODULE, .name = "infiniband_cm", + .devnode = cm_devnode, }; EXPORT_SYMBOL(cm_class); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 99dde874fbbd..b6a33b3c516d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -47,6 +47,7 @@ #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> +#include <rdma/rdma_netlink.h> #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include <rdma/ib_sa.h> @@ -89,20 +90,6 @@ struct cma_device { struct list_head id_list; }; -enum cma_state { - CMA_IDLE, - CMA_ADDR_QUERY, - CMA_ADDR_RESOLVED, - CMA_ROUTE_QUERY, - CMA_ROUTE_RESOLVED, - CMA_CONNECT, - CMA_DISCONNECT, - CMA_ADDR_BOUND, - CMA_LISTEN, - CMA_DEVICE_REMOVAL, - CMA_DESTROYING -}; - struct rdma_bind_list { struct idr *ps; struct hlist_head owners; @@ -126,7 +113,7 @@ struct rdma_id_private { struct list_head mc_list; int internal_id; - enum cma_state state; + enum rdma_cm_state state; spinlock_t lock; struct mutex qp_mutex; @@ -146,6 +133,7 @@ struct rdma_id_private { u32 seq_num; u32 qkey; u32 qp_num; + pid_t owner; u8 srq; u8 tos; u8 reuseaddr; @@ -165,8 +153,8 @@ struct cma_multicast { struct cma_work { struct work_struct work; struct rdma_id_private *id; - enum cma_state old_state; - enum cma_state new_state; + enum rdma_cm_state old_state; + enum rdma_cm_state new_state; struct rdma_cm_event event; }; @@ -217,7 +205,7 @@ struct sdp_hah { #define CMA_VERSION 0x00 #define SDP_MAJ_VERSION 0x2 -static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) +static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) { unsigned long flags; int ret; @@ -229,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) } static int cma_comp_exch(struct rdma_id_private *id_priv, - enum cma_state comp, enum cma_state exch) + enum rdma_cm_state comp, enum rdma_cm_state exch) { unsigned long flags; int ret; @@ -241,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv, return ret; } -static enum cma_state cma_exch(struct rdma_id_private *id_priv, - enum cma_state exch) +static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, + enum rdma_cm_state exch) { unsigned long flags; - enum cma_state old; + enum rdma_cm_state old; spin_lock_irqsave(&id_priv->lock, flags); old = id_priv->state; @@ -279,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); } -static inline int cma_is_ud_ps(enum rdma_port_space ps) -{ - return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); -} - static void cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { @@ -413,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv) } static int cma_disable_callback(struct rdma_id_private *id_priv, - enum cma_state state) + enum rdma_cm_state state) { mutex_lock(&id_priv->handler_mutex); if (id_priv->state != state) { @@ -429,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv) } struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, - void *context, enum rdma_port_space ps) + void *context, enum rdma_port_space ps, + enum ib_qp_type qp_type) { struct rdma_id_private *id_priv; @@ -437,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, if (!id_priv) return ERR_PTR(-ENOMEM); - id_priv->state = CMA_IDLE; + id_priv->owner = task_pid_nr(current); + id_priv->state = RDMA_CM_IDLE; id_priv->id.context = context; id_priv->id.event_handler = event_handler; id_priv->id.ps = ps; + id_priv->id.qp_type = qp_type; spin_lock_init(&id_priv->lock); mutex_init(&id_priv->qp_mutex); init_completion(&id_priv->comp); @@ -508,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, if (IS_ERR(qp)) return PTR_ERR(qp); - if (cma_is_ud_ps(id_priv->id.ps)) + if (id->qp_type == IB_QPT_UD) ret = cma_init_ud_qp(id_priv, qp); else ret = cma_init_conn_qp(id_priv, qp); @@ -636,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, qp_attr->port_num = id_priv->id.port_num; *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; - if (cma_is_ud_ps(id_priv->id.ps)) { + if (id_priv->id.qp_type == IB_QPT_UD) { ret = cma_set_qkey(id_priv); if (ret) return ret; @@ -659,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, id_priv = container_of(id, struct rdma_id_private, id); switch (rdma_node_get_transport(id_priv->id.device->node_type)) { case RDMA_TRANSPORT_IB: - if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) + if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); else ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, @@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv) } static void cma_cancel_operation(struct rdma_id_private *id_priv, - enum cma_state state) + enum rdma_cm_state state) { switch (state) { - case CMA_ADDR_QUERY: + case RDMA_CM_ADDR_QUERY: rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); break; - case CMA_ROUTE_QUERY: + case RDMA_CM_ROUTE_QUERY: cma_cancel_route(id_priv); break; - case CMA_LISTEN: + case RDMA_CM_LISTEN: if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) && !id_priv->cma_dev) cma_cancel_listens(id_priv); @@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) void rdma_destroy_id(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; - enum cma_state state; + enum rdma_cm_state state; id_priv = container_of(id, struct rdma_id_private, id); - state = cma_exch(id_priv, CMA_DESTROYING); + state = cma_exch(id_priv, RDMA_CM_DESTROYING); cma_cancel_operation(id_priv, state); /* @@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) int ret = 0; if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && - cma_disable_callback(id_priv, CMA_CONNECT)) || + cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || (ib_event->event == IB_CM_TIMEWAIT_EXIT && - cma_disable_callback(id_priv, CMA_DISCONNECT))) + cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) return 0; memset(&event, 0, sizeof event); @@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) event.status = -ETIMEDOUT; /* fall through */ case IB_CM_DREQ_RECEIVED: case IB_CM_DREP_RECEIVED: - if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) + if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, + RDMA_CM_DISCONNECT)) goto out; event.event = RDMA_CM_EVENT_DISCONNECTED; break; @@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.ib = NULL; - cma_exch(id_priv, CMA_DESTROYING); + cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id(&id_priv->id); return ret; @@ -1101,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, goto err; id = rdma_create_id(listen_id->event_handler, listen_id->context, - listen_id->ps); + listen_id->ps, ib_event->param.req_rcvd.qp_type); if (IS_ERR(id)) goto err; @@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); id_priv = container_of(id, struct rdma_id_private, id); - id_priv->state = CMA_CONNECT; + id_priv->state = RDMA_CM_CONNECT; return id_priv; destroy_id: @@ -1152,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, int ret; id = rdma_create_id(listen_id->event_handler, listen_id->context, - listen_id->ps); + listen_id->ps, IB_QPT_UD); if (IS_ERR(id)) return NULL; @@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, } id_priv = container_of(id, struct rdma_id_private, id); - id_priv->state = CMA_CONNECT; + id_priv->state = RDMA_CM_CONNECT; return id_priv; err: rdma_destroy_id(id); @@ -1201,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) int offset, ret; listen_id = cm_id->context; - if (cma_disable_callback(listen_id, CMA_LISTEN)) + if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) return -ECONNABORTED; memset(&event, 0, sizeof event); offset = cma_user_data_offset(listen_id->id.ps); event.event = RDMA_CM_EVENT_CONNECT_REQUEST; - if (cma_is_ud_ps(listen_id->id.ps)) { + if (listen_id->id.qp_type == IB_QPT_UD) { conn_id = cma_new_udp_id(&listen_id->id, ib_event); event.param.ud.private_data = ib_event->private_data + offset; event.param.ud.private_data_len = @@ -1243,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) * while we're accessing the cm_id. */ mutex_lock(&lock); - if (cma_comp(conn_id, CMA_CONNECT) && - !cma_is_ud_ps(conn_id->id.ps)) + if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); @@ -1257,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) conn_id->cm_id.ib = NULL; release_conn_id: - cma_exch(conn_id, CMA_DESTROYING); + cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(&conn_id->id); @@ -1328,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) struct sockaddr_in *sin; int ret = 0; - if (cma_disable_callback(id_priv, CMA_CONNECT)) + if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) return 0; memset(&event, 0, sizeof event); @@ -1371,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.iw = NULL; - cma_exch(id_priv, CMA_DESTROYING); + cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id(&id_priv->id); return ret; @@ -1393,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, struct ib_device_attr attr; listen_id = cm_id->context; - if (cma_disable_callback(listen_id, CMA_LISTEN)) + if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) return -ECONNABORTED; /* Create a new RDMA id for the new IW CM ID */ new_cm_id = rdma_create_id(listen_id->id.event_handler, listen_id->id.context, - RDMA_PS_TCP); + RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(new_cm_id)) { ret = -ENOMEM; goto out; } conn_id = container_of(new_cm_id, struct rdma_id_private, id); mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); - conn_id->state = CMA_CONNECT; + conn_id->state = RDMA_CM_CONNECT; dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); if (!dev) { @@ -1461,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, if (ret) { /* User wants to destroy the CM ID */ conn_id->cm_id.iw = NULL; - cma_exch(conn_id, CMA_DESTROYING); + cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); @@ -1548,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct rdma_cm_id *id; int ret; - id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); + id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, + id_priv->id.qp_type); if (IS_ERR(id)) return; dev_id_priv = container_of(id, struct rdma_id_private, id); - dev_id_priv->state = CMA_ADDR_BOUND; + dev_id_priv->state = RDMA_CM_ADDR_BOUND; memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); @@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, route->num_paths = 1; *route->path_rec = *path_rec; } else { - work->old_state = CMA_ROUTE_QUERY; - work->new_state = CMA_ADDR_RESOLVED; + work->old_state = RDMA_CM_ROUTE_QUERY; + work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; work->event.status = status; } @@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work) goto out; if (id_priv->id.event_handler(&id_priv->id, &work->event)) { - cma_exch(id_priv, CMA_DESTROYING); + cma_exch(id_priv, RDMA_CM_DESTROYING); destroy = 1; } out: @@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work) int destroy = 0; mutex_lock(&id_priv->handler_mutex); - if (id_priv->state == CMA_DESTROYING || - id_priv->state == CMA_DEVICE_REMOVAL) + if (id_priv->state == RDMA_CM_DESTROYING || + id_priv->state == RDMA_CM_DEVICE_REMOVAL) goto out; if (id_priv->id.event_handler(&id_priv->id, &work->event)) { - cma_exch(id_priv, CMA_DESTROYING); + cma_exch(id_priv, RDMA_CM_DESTROYING); destroy = 1; } @@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); - work->old_state = CMA_ROUTE_QUERY; - work->new_state = CMA_ROUTE_RESOLVED; + work->old_state = RDMA_CM_ROUTE_QUERY; + work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); @@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, int ret; id_priv = container_of(id, struct rdma_id_private, id); - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, + RDMA_CM_ROUTE_RESOLVED)) return -EINVAL; id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, @@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, id->route.num_paths = num_paths; return 0; err: - cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); + cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); return ret; } EXPORT_SYMBOL(rdma_set_ib_paths); @@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); - work->old_state = CMA_ROUTE_QUERY; - work->new_state = CMA_ROUTE_RESOLVED; + work->old_state = RDMA_CM_ROUTE_QUERY; + work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; queue_work(cma_wq, &work->work); return 0; @@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) goto err2; } - work->old_state = CMA_ROUTE_QUERY; - work->new_state = CMA_ROUTE_RESOLVED; + work->old_state = RDMA_CM_ROUTE_QUERY; + work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work->event.status = 0; @@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) int ret; id_priv = container_of(id, struct rdma_id_private, id); - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) return -EINVAL; atomic_inc(&id_priv->refcount); @@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) return 0; err: - cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); + cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); cma_deref_id(id_priv); return ret; } @@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr, memset(&event, 0, sizeof event); mutex_lock(&id_priv->handler_mutex); - if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, + RDMA_CM_ADDR_RESOLVED)) goto out; if (!status && !id_priv->cma_dev) status = cma_acquire_dev(id_priv); if (status) { - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, + RDMA_CM_ADDR_BOUND)) goto out; event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = status; @@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, } if (id_priv->id.event_handler(&id_priv->id, &event)) { - cma_exch(id_priv, CMA_DESTROYING); + cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); cma_deref_id(id_priv); rdma_destroy_id(&id_priv->id); @@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); - work->old_state = CMA_ADDR_QUERY; - work->new_state = CMA_ADDR_RESOLVED; + work->old_state = RDMA_CM_ADDR_QUERY; + work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; queue_work(cma_wq, &work->work); return 0; @@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, int ret; id_priv = container_of(id, struct rdma_id_private, id); - if (id_priv->state == CMA_IDLE) { + if (id_priv->state == RDMA_CM_IDLE) { ret = cma_bind_addr(id, src_addr, dst_addr); if (ret) return ret; } - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) return -EINVAL; atomic_inc(&id_priv->refcount); @@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, return 0; err: - cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); + cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); cma_deref_id(id_priv); return ret; } @@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irqsave(&id_priv->lock, flags); - if (id_priv->state == CMA_IDLE) { + if (id_priv->state == RDMA_CM_IDLE) { id_priv->reuseaddr = reuse; ret = 0; } else { @@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list, if (id_priv == cur_id) continue; - if ((cur_id->state == CMA_LISTEN) || + if ((cur_id->state == RDMA_CM_LISTEN) || !reuseaddr || !cur_id->reuseaddr) { cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; if (cma_any_addr(cur_addr)) @@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) int ret; id_priv = container_of(id, struct rdma_id_private, id); - if (id_priv->state == CMA_IDLE) { + if (id_priv->state == RDMA_CM_IDLE) { ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); if (ret) return ret; } - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) return -EINVAL; if (id_priv->reuseaddr) { @@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) return 0; err: id_priv->backlog = 0; - cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); + cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); return ret; } EXPORT_SYMBOL(rdma_listen); @@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) return -EAFNOSUPPORT; id_priv = container_of(id, struct rdma_id_private, id); - if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) + if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) return -EINVAL; ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); @@ -2360,7 +2350,7 @@ err2: if (id_priv->cma_dev) cma_release_dev(id_priv); err1: - cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); + cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); return ret; } EXPORT_SYMBOL(rdma_bind_addr); @@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; int ret = 0; - if (cma_disable_callback(id_priv, CMA_CONNECT)) + if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) return 0; memset(&event, 0, sizeof event); @@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.ib = NULL; - cma_exch(id_priv, CMA_DESTROYING); + cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id(&id_priv->id); return ret; @@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) int ret; id_priv = container_of(id, struct rdma_id_private, id); - if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) + if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) return -EINVAL; if (!id->qp) { @@ -2655,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: - if (cma_is_ud_ps(id->ps)) + if (id->qp_type == IB_QPT_UD) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); @@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) return 0; err: - cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); + cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); return ret; } EXPORT_SYMBOL(rdma_connect); @@ -2758,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) int ret; id_priv = container_of(id, struct rdma_id_private, id); - if (!cma_comp(id_priv, CMA_CONNECT)) + + id_priv->owner = task_pid_nr(current); + + if (!cma_comp(id_priv, RDMA_CM_CONNECT)) return -EINVAL; if (!id->qp && conn_param) { @@ -2768,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: - if (cma_is_ud_ps(id->ps)) + if (id->qp_type == IB_QPT_UD) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, conn_param->private_data, conn_param->private_data_len); @@ -2829,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: - if (cma_is_ud_ps(id->ps)) + if (id->qp_type == IB_QPT_UD) ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, private_data, private_data_len); else @@ -2887,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) int ret; id_priv = mc->id_priv; - if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && - cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) + if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && + cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) return 0; mutex_lock(&id_priv->qp_mutex); @@ -2912,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) ret = id_priv->id.event_handler(&id_priv->id, &event); if (ret) { - cma_exch(id_priv, CMA_DESTROYING); + cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id(&id_priv->id); return 0; @@ -3095,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, int ret; id_priv = container_of(id, struct rdma_id_private, id); - if (!cma_comp(id_priv, CMA_ADDR_BOUND) && - !cma_comp(id_priv, CMA_ADDR_RESOLVED)) + if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && + !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) return -EINVAL; mc = kmalloc(sizeof *mc, GFP_KERNEL); @@ -3261,19 +3254,19 @@ static void cma_add_one(struct ib_device *device) static int cma_remove_id_dev(struct rdma_id_private *id_priv) { struct rdma_cm_event event; - enum cma_state state; + enum rdma_cm_state state; int ret = 0; /* Record that we want to remove the device */ - state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); - if (state == CMA_DESTROYING) + state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); + if (state == RDMA_CM_DESTROYING) return 0; cma_cancel_operation(id_priv, state); mutex_lock(&id_priv->handler_mutex); /* Check for destruction from another callback. */ - if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) + if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) goto out; memset(&event, 0, sizeof event); @@ -3328,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device) kfree(cma_dev); } +static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct nlmsghdr *nlh; + struct rdma_cm_id_stats *id_stats; + struct rdma_id_private *id_priv; + struct rdma_cm_id *id = NULL; + struct cma_device *cma_dev; + int i_dev = 0, i_id = 0; + + /* + * We export all of the IDs as a sequence of messages. Each + * ID gets its own netlink message. + */ + mutex_lock(&lock); + + list_for_each_entry(cma_dev, &dev_list, list) { + if (i_dev < cb->args[0]) { + i_dev++; + continue; + } + + i_id = 0; + list_for_each_entry(id_priv, &cma_dev->id_list, list) { + if (i_id < cb->args[1]) { + i_id++; + continue; + } + + id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, + sizeof *id_stats, RDMA_NL_RDMA_CM, + RDMA_NL_RDMA_CM_ID_STATS); + if (!id_stats) + goto out; + + memset(id_stats, 0, sizeof *id_stats); + id = &id_priv->id; + id_stats->node_type = id->route.addr.dev_addr.dev_type; + id_stats->port_num = id->port_num; + id_stats->bound_dev_if = + id->route.addr.dev_addr.bound_dev_if; + + if (id->route.addr.src_addr.ss_family == AF_INET) { + if (ibnl_put_attr(skb, nlh, + sizeof(struct sockaddr_in), + &id->route.addr.src_addr, + RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { + goto out; + } + if (ibnl_put_attr(skb, nlh, + sizeof(struct sockaddr_in), + &id->route.addr.dst_addr, + RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { + goto out; + } + } else if (id->route.addr.src_addr.ss_family == AF_INET6) { + if (ibnl_put_attr(skb, nlh, + sizeof(struct sockaddr_in6), + &id->route.addr.src_addr, + RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { + goto out; + } + if (ibnl_put_attr(skb, nlh, + sizeof(struct sockaddr_in6), + &id->route.addr.dst_addr, + RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { + goto out; + } + } + + id_stats->pid = id_priv->owner; + id_stats->port_space = id->ps; + id_stats->cm_state = id_priv->state; + id_stats->qp_num = id_priv->qp_num; + id_stats->qp_type = id->qp_type; + + i_id++; + } + + cb->args[1] = 0; + i_dev++; + } + +out: + mutex_unlock(&lock); + cb->args[0] = i_dev; + cb->args[1] = i_id; + + return skb->len; +} + +static const struct ibnl_client_cbs cma_cb_table[] = { + [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, +}; + static int __init cma_init(void) { int ret; @@ -3343,6 +3430,10 @@ static int __init cma_init(void) ret = ib_register_client(&cma_client); if (ret) goto err; + + if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) + printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); + return 0; err: @@ -3355,6 +3446,7 @@ err: static void __exit cma_cleanup(void) { + ibnl_remove_client(RDMA_NL_RDMA_CM); ib_unregister_client(&cma_client); unregister_netdevice_notifier(&cma_nb); rdma_addr_unregister_client(&addr_client); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f793bf2f5da7..4007f721d25d 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -38,6 +38,7 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/mutex.h> +#include <rdma/rdma_netlink.h> #include "core_priv.h" @@ -725,22 +726,40 @@ static int __init ib_core_init(void) return -ENOMEM; ret = ib_sysfs_setup(); - if (ret) + if (ret) { printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); + goto err; + } + + ret = ibnl_init(); + if (ret) { + printk(KERN_WARNING "Couldn't init IB netlink interface\n"); + goto err_sysfs; + } ret = ib_cache_setup(); if (ret) { printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); - ib_sysfs_cleanup(); - destroy_workqueue(ib_wq); + goto err_nl; } + return 0; + +err_nl: + ibnl_cleanup(); + +err_sysfs: + ib_sysfs_cleanup(); + +err: + destroy_workqueue(ib_wq); return ret; } static void __exit ib_core_cleanup(void) { ib_cache_cleanup(); + ibnl_cleanup(); ib_sysfs_cleanup(); /* Make sure that any pending umem accounting work is done. */ destroy_workqueue(ib_wq); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 822cfdcd9f78..b4d8672a3e4e 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -276,6 +276,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, goto error1; } + /* Verify the QP requested is supported. For example, Ethernet devices + * will not have QP0 */ + if (!port_priv->qp_info[qpn].qp) { + ret = ERR_PTR(-EPROTONOSUPPORT); + goto error1; + } + /* Allocate structures */ mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); if (!mad_agent_priv) { diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c new file mode 100644 index 000000000000..4a5abaf0a25c --- /dev/null +++ b/drivers/infiniband/core/netlink.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2010 Voltaire Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include <net/netlink.h> +#include <net/net_namespace.h> +#include <net/sock.h> +#include <rdma/rdma_netlink.h> + +struct ibnl_client { + struct list_head list; + int index; + int nops; + const struct ibnl_client_cbs *cb_table; +}; + +static DEFINE_MUTEX(ibnl_mutex); +static struct sock *nls; +static LIST_HEAD(client_list); + +int ibnl_add_client(int index, int nops, + const struct ibnl_client_cbs cb_table[]) +{ + struct ibnl_client *cur; + struct ibnl_client *nl_client; + + nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL); + if (!nl_client) + return -ENOMEM; + + nl_client->index = index; + nl_client->nops = nops; + nl_client->cb_table = cb_table; + + mutex_lock(&ibnl_mutex); + + list_for_each_entry(cur, &client_list, list) { + if (cur->index == index) { + pr_warn("Client for %d already exists\n", index); + mutex_unlock(&ibnl_mutex); + kfree(nl_client); + return -EINVAL; + } + } + + list_add_tail(&nl_client->list, &client_list); + + mutex_unlock(&ibnl_mutex); + + return 0; +} +EXPORT_SYMBOL(ibnl_add_client); + +int ibnl_remove_client(int index) +{ + struct ibnl_client *cur, *next; + + mutex_lock(&ibnl_mutex); + list_for_each_entry_safe(cur, next, &client_list, list) { + if (cur->index == index) { + list_del(&(cur->list)); + mutex_unlock(&ibnl_mutex); + kfree(cur); + return 0; + } + } + pr_warn("Can't remove callback for client idx %d. Not found\n", index); + mutex_unlock(&ibnl_mutex); + + return -EINVAL; +} +EXPORT_SYMBOL(ibnl_remove_client); + +void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, + int len, int client, int op) +{ + unsigned char *prev_tail; + + prev_tail = skb_tail_pointer(skb); + *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), + len, NLM_F_MULTI); + (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; + return NLMSG_DATA(*nlh); + +nlmsg_failure: + nlmsg_trim(skb, prev_tail); + return NULL; +} +EXPORT_SYMBOL(ibnl_put_msg); + +int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, + int len, void *data, int type) +{ + unsigned char *prev_tail; + + prev_tail = skb_tail_pointer(skb); + NLA_PUT(skb, type, len, data); + nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; + return 0; + +nla_put_failure: + nlmsg_trim(skb, prev_tail - nlh->nlmsg_len); + return -EMSGSIZE; +} +EXPORT_SYMBOL(ibnl_put_attr); + +static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + struct ibnl_client *client; + int type = nlh->nlmsg_type; + int index = RDMA_NL_GET_CLIENT(type); + int op = RDMA_NL_GET_OP(type); + + list_for_each_entry(client, &client_list, list) { + if (client->index == index) { + if (op < 0 || op >= client->nops || + !client->cb_table[RDMA_NL_GET_OP(op)].dump) + return -EINVAL; + return netlink_dump_start(nls, skb, nlh, + client->cb_table[op].dump, + NULL); + } + } + + pr_info("Index %d wasn't found in client list\n", index); + return -EINVAL; +} + +static void ibnl_rcv(struct sk_buff *skb) +{ + mutex_lock(&ibnl_mutex); + netlink_rcv_skb(skb, &ibnl_rcv_msg); + mutex_unlock(&ibnl_mutex); +} + +int __init ibnl_init(void) +{ + nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, + NULL, THIS_MODULE); + if (!nls) { + pr_warn("Failed to create netlink socket\n"); + return -ENOMEM; + } + + return 0; +} + +void ibnl_cleanup(void) +{ + struct ibnl_client *cur, *next; + + mutex_lock(&ibnl_mutex); + list_for_each_entry_safe(cur, next, &client_list, list) { + list_del(&(cur->list)); + kfree(cur); + } + mutex_unlock(&ibnl_mutex); + + netlink_kernel_release(nls); +} diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index b3fa798525b2..71be5eebd683 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -367,13 +367,28 @@ done: return ret; } -static ssize_t ucma_create_id(struct ucma_file *file, - const char __user *inbuf, - int in_len, int out_len) +static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) +{ + switch (cmd->ps) { + case RDMA_PS_TCP: + *qp_type = IB_QPT_RC; + return 0; + case RDMA_PS_UDP: + case RDMA_PS_IPOIB: + *qp_type = IB_QPT_UD; + return 0; + default: + return -EINVAL; + } +} + +static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, + int in_len, int out_len) { struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; + enum ib_qp_type qp_type; int ret; if (out_len < sizeof(resp)) @@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + ret = ucma_get_qp_type(&cmd, &qp_type); + if (ret) + return ret; + mutex_lock(&file->mut); ctx = ucma_alloc_ctx(file); mutex_unlock(&file->mut); @@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, return -ENOMEM; ctx->uid = cmd.uid; - ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); + ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type); if (IS_ERR(ctx->cm_id)) { ret = PTR_ERR(ctx->cm_id); goto err1; @@ -1338,9 +1357,11 @@ static const struct file_operations ucma_fops = { }; static struct miscdevice ucma_misc = { - .minor = MISC_DYNAMIC_MINOR, - .name = "rdma_cm", - .fops = &ucma_fops, + .minor = MISC_DYNAMIC_MINOR, + .name = "rdma_cm", + .nodename = "infiniband/rdma_cm", + .mode = 0666, + .fops = &ucma_fops, }; static ssize_t show_abi_version(struct device *dev, diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index cd1996d0ad08..8d261b6ea5fe 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1176,6 +1176,11 @@ static void ib_umad_remove_one(struct ib_device *device) kref_put(&umad_dev->ref, ib_umad_release_dev); } +static char *umad_devnode(struct device *dev, mode_t *mode) +{ + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); +} + static int __init ib_umad_init(void) { int ret; @@ -1194,6 +1199,8 @@ static int __init ib_umad_init(void) goto out_chrdev; } + umad_class->devnode = umad_devnode; + ret = class_create_file(umad_class, &class_attr_abi_version.attr); if (ret) { printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index ec83e9fe387b..e49a85f8a44d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -824,6 +824,12 @@ static void ib_uverbs_remove_one(struct ib_device *device) kfree(uverbs_dev); } +static char *uverbs_devnode(struct device *dev, mode_t *mode) +{ + *mode = 0666; + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); +} + static int __init ib_uverbs_init(void) { int ret; @@ -842,6 +848,8 @@ static int __init ib_uverbs_init(void) goto out_chrdev; } + uverbs_class->devnode = uverbs_devnode; + ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); if (ret) { printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 239184138994..0a5008fbebac 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) goto err; if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { - iwch_post_zb_read(ep->com.qp); + iwch_post_zb_read(ep); } goto out; @@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) struct iwch_ep *ep = ctx; struct cpl_wr_ack *hdr = cplhdr(skb); unsigned int credits = ntohs(hdr->credits); + unsigned long flags; + int post_zb = 0; PDBG("%s ep %p credits %u\n", __func__, ep, credits); @@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) return CPL_RET_BUF_DONE; } + spin_lock_irqsave(&ep->com.lock, flags); BUG_ON(credits != 1); dst_confirm(ep->dst); if (!ep->mpa_skb) { PDBG("%s rdma_init wr_ack ep %p state %u\n", - __func__, ep, state_read(&ep->com)); + __func__, ep, ep->com.state); if (ep->mpa_attr.initiator) { PDBG("%s initiator ep %p state %u\n", - __func__, ep, state_read(&ep->com)); - if (peer2peer) - iwch_post_zb_read(ep->com.qp); + __func__, ep, ep->com.state); + if (peer2peer && ep->com.state == FPDU_MODE) + post_zb = 1; } else { PDBG("%s responder ep %p state %u\n", - __func__, ep, state_read(&ep->com)); - ep->com.rpl_done = 1; - wake_up(&ep->com.waitq); + __func__, ep, ep->com.state); + if (ep->com.state == MPA_REQ_RCVD) { + ep->com.rpl_done = 1; + wake_up(&ep->com.waitq); + } } } else { PDBG("%s lsm ack ep %p state %u freeing skb\n", - __func__, ep, state_read(&ep->com)); + __func__, ep, ep->com.state); kfree_skb(ep->mpa_skb); ep->mpa_skb = NULL; } + spin_unlock_irqrestore(&ep->com.lock, flags); + if (post_zb) + iwch_post_zb_read(ep); return CPL_RET_BUF_DONE; } diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index c5406da3f4cd..9a342c9b220d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h @@ -332,7 +332,7 @@ int iwch_bind_mw(struct ib_qp *qp, struct ib_mw_bind *mw_bind); int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); -int iwch_post_zb_read(struct iwch_qp *qhp); +int iwch_post_zb_read(struct iwch_ep *ep); int iwch_register_device(struct iwch_dev *dev); void iwch_unregister_device(struct iwch_dev *dev); void stop_read_rep_timer(struct iwch_qp *qhp); diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 1b4cd09f74dc..ecd313f359a4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg, } } -int iwch_post_zb_read(struct iwch_qp *qhp) +int iwch_post_zb_read(struct iwch_ep *ep) { union t3_wr *wqe; struct sk_buff *skb; @@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp) wqe->read.local_len = cpu_to_be32(0); wqe->read.local_to = cpu_to_be64(1); wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); - wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| + wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)| V_FW_RIWR_LEN(flit_cnt)); skb->priority = CPL_PRIORITY_DATA; - return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); + return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); } /* diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 35d2a5dd9bb4..4f045375c8e2 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -35,7 +35,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/idr.h> -#include <linux/workqueue.h> +#include <linux/completion.h> #include <linux/netdevice.h> #include <linux/sched.h> #include <linux/pci.h> @@ -131,28 +131,21 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) #define C4IW_WR_TO (10*HZ) -enum { - REPLY_READY = 0, -}; - struct c4iw_wr_wait { - wait_queue_head_t wait; - unsigned long status; + struct completion completion; int ret; }; static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) { wr_waitp->ret = 0; - wr_waitp->status = 0; - init_waitqueue_head(&wr_waitp->wait); + init_completion(&wr_waitp->completion); } static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) { wr_waitp->ret = ret; - set_bit(REPLY_READY, &wr_waitp->status); - wake_up(&wr_waitp->wait); + complete(&wr_waitp->completion); } static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, @@ -164,8 +157,7 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, int ret; do { - ret = wait_event_timeout(wr_waitp->wait, - test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); + ret = wait_for_completion_timeout(&wr_waitp->completion, to); if (!ret) { printk(KERN_ERR MOD "%s - Device %s not responding - " "tid %u qpid %u\n", func, diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 13de1192927c..2d668c69f6d9 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -1138,7 +1138,9 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, u32 i = 0; struct nes_device *nesdev; - strict_strtoul(buf, 0, &wqm_quanta_value); + if (kstrtoul(buf, 0, &wqm_quanta_value) < 0) + return -EINVAL; + list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nesdev->nesadapter->wqm_quanta = wqm_quanta_value; diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 7c03a70c55a2..8349f9c5064c 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_QIB tristate "QLogic PCIe HCA support" - depends on 64BIT && NET + depends on 64BIT ---help--- This is a low-level driver for QLogic PCIe QLE InfiniBand host channel adapters. This driver does not support the QLogic diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 9876865732f7..ede1475bee09 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -548,7 +548,7 @@ int iser_connect(struct iser_conn *ib_conn, iser_conn_get(ib_conn); /* ref ib conn's cma id */ ib_conn->cma_id = rdma_create_id(iser_cma_handler, (void *)ib_conn, - RDMA_PS_TCP); + RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ib_conn->cma_id)) { err = PTR_ERR(ib_conn->cma_id); iser_err("rdma_create_id failed: %d\n", err); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 376d640487d2..ee165fdcb596 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1147,7 +1147,7 @@ static void srp_process_aer_req(struct srp_target_port *target, static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) { struct ib_device *dev = target->srp_host->srp_dev->dev; - struct srp_iu *iu = (struct srp_iu *) wc->wr_id; + struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; int res; u8 opcode; @@ -1231,7 +1231,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) break; } - iu = (struct srp_iu *) wc.wr_id; + iu = (struct srp_iu *) (uintptr_t) wc.wr_id; list_add(&iu->list, &target->free_tx); } } |