summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c16
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c17
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c463
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h31
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c12
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c103
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h8
11 files changed, 560 insertions, 101 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index bbcfa76c2b62..9729639df407 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1384,15 +1384,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
return cma_protocol_roce_dev_port(device, port_num);
}
-static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
- const struct net_device *net_dev)
+static bool cma_match_net_dev(const struct rdma_cm_id *id,
+ const struct net_device *net_dev,
+ u8 port_num)
{
- const struct rdma_addr *addr = &id_priv->id.route.addr;
+ const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
/* This request is an AF_IB request or a RoCE request */
- return addr->src_addr.ss_family == AF_IB ||
- cma_protocol_roce(&id_priv->id);
+ return (!id->port_num || id->port_num == port_num) &&
+ (addr->src_addr.ss_family == AF_IB ||
+ cma_protocol_roce_dev_port(id->device, port_num));
return !addr->dev_addr.bound_dev_if ||
(net_eq(dev_net(net_dev), addr->dev_addr.net) &&
@@ -1414,13 +1416,13 @@ static struct rdma_id_private *cma_find_listener(
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
if (cma_match_private_data(id_priv, ib_event->private_data)) {
if (id_priv->id.device == cm_id->device &&
- cma_match_net_dev(id_priv, net_dev))
+ cma_match_net_dev(&id_priv->id, net_dev, req->port))
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
listen_list) {
if (id_priv_dev->id.device == cm_id->device &&
- cma_match_net_dev(id_priv_dev, net_dev))
+ cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
return id_priv_dev;
}
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 704680471567..cd2ff5f9518a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -449,7 +449,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
struct c4iw_ep *ep = handle;
- printk(KERN_ERR MOD "ARP failure duing connect\n");
+ printk(KERN_ERR MOD "ARP failure during connect\n");
kfree_skb(skb);
connect_reply_upcall(ep, -EHOSTUNREACH);
state_set(&ep->com, DEAD);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 870e56b6b25f..26833bfa639b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -40,6 +40,7 @@
#include <linux/gfp.h>
#include <rdma/ib_pma.h>
+#include <linux/mlx4/driver.h>
#include "mlx4_ib.h"
enum {
@@ -606,8 +607,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
struct ib_mad *mad)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- int err;
- int slave;
+ int err, other_port;
+ int slave = -1;
u8 *slave_id;
int is_eth = 0;
@@ -625,7 +626,17 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
return -EINVAL;
}
- if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
+ err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
+ if (err && mlx4_is_mf_bonded(dev->dev)) {
+ other_port = (port == 1) ? 2 : 1;
+ err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
+ if (!err) {
+ port = other_port;
+ pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
+ slave, grh->dgid.raw, port, other_port);
+ }
+ }
+ if (err) {
mlx4_ib_warn(ibdev, "failed matching grh\n");
return -ENOENT;
}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 68d5a5fda271..0597f3eef5d0 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -287,7 +287,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
ib_umem_release(msrq->umem);
} else {
- kfree(msrq->wrid);
+ kvfree(msrq->wrid);
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
&msrq->buf);
mlx4_db_free(dev->dev, &msrq->db);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 7ddc790b1819..fd1de31e0611 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -770,7 +770,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int uninitialized_var(index);
int uninitialized_var(inlen);
int cqe_size;
- int irqn;
+ unsigned int irqn;
int eqn;
int err;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a55bf05c8522..ec737e2287fe 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -45,6 +45,9 @@
#include <linux/mlx5/vport.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/fs.h>
#include "user.h"
#include "mlx5_ib.h"
@@ -1153,6 +1156,457 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
return 0;
}
+static bool outer_header_zero(u32 *match_criteria)
+{
+ int size = MLX5_ST_SZ_BYTES(fte_match_param);
+ char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+
+ return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
+ outer_headers_c + 1,
+ size - 1);
+}
+
+static int parse_flow_attr(u32 *match_c, u32 *match_v,
+ union ib_flow_spec *ib_spec)
+{
+ void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ switch (ib_spec->type) {
+ case IB_FLOW_SPEC_ETH:
+ if (ib_spec->size != sizeof(ib_spec->eth))
+ return -EINVAL;
+
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dmac_47_16),
+ ib_spec->eth.mask.dst_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ dmac_47_16),
+ ib_spec->eth.val.dst_mac);
+
+ if (ib_spec->eth.mask.vlan_tag) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ vlan_tag, 1);
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ first_vid, ntohs(ib_spec->eth.val.vlan_tag));
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ first_cfi,
+ ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ first_cfi,
+ ntohs(ib_spec->eth.val.vlan_tag) >> 12);
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ first_prio,
+ ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ first_prio,
+ ntohs(ib_spec->eth.val.vlan_tag) >> 13);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ ethertype, ntohs(ib_spec->eth.mask.ether_type));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ethertype, ntohs(ib_spec->eth.val.ether_type));
+ break;
+ case IB_FLOW_SPEC_IPV4:
+ if (ib_spec->size != sizeof(ib_spec->ipv4))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ ethertype, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ethertype, ETH_P_IP);
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.mask.src_ip,
+ sizeof(ib_spec->ipv4.mask.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.val.src_ip,
+ sizeof(ib_spec->ipv4.val.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.mask.dst_ip,
+ sizeof(ib_spec->ipv4.mask.dst_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.val.dst_ip,
+ sizeof(ib_spec->ipv4.val.dst_ip));
+ break;
+ case IB_FLOW_SPEC_TCP:
+ if (ib_spec->size != sizeof(ib_spec->tcp_udp))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ IPPROTO_TCP);
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
+ ntohs(ib_spec->tcp_udp.mask.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
+ ntohs(ib_spec->tcp_udp.val.src_port));
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
+ ntohs(ib_spec->tcp_udp.mask.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
+ ntohs(ib_spec->tcp_udp.val.dst_port));
+ break;
+ case IB_FLOW_SPEC_UDP:
+ if (ib_spec->size != sizeof(ib_spec->tcp_udp))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ IPPROTO_UDP);
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
+ ntohs(ib_spec->tcp_udp.mask.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
+ ntohs(ib_spec->tcp_udp.val.src_port));
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
+ ntohs(ib_spec->tcp_udp.mask.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
+ ntohs(ib_spec->tcp_udp.val.dst_port));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* If a flow could catch both multicast and unicast packets,
+ * it won't fall into the multicast flow steering table and this rule
+ * could steal other multicast packets.
+ */
+static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
+{
+ struct ib_flow_spec_eth *eth_spec;
+
+ if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
+ ib_attr->size < sizeof(struct ib_flow_attr) +
+ sizeof(struct ib_flow_spec_eth) ||
+ ib_attr->num_of_specs < 1)
+ return false;
+
+ eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
+ if (eth_spec->type != IB_FLOW_SPEC_ETH ||
+ eth_spec->size != sizeof(*eth_spec))
+ return false;
+
+ return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
+ is_multicast_ether_addr(eth_spec->val.dst_mac);
+}
+
+static bool is_valid_attr(struct ib_flow_attr *flow_attr)
+{
+ union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+ bool has_ipv4_spec = false;
+ bool eth_type_ipv4 = true;
+ unsigned int spec_index;
+
+ /* Validate that ethertype is correct */
+ for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
+ if (ib_spec->type == IB_FLOW_SPEC_ETH &&
+ ib_spec->eth.mask.ether_type) {
+ if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) &&
+ ib_spec->eth.val.ether_type == htons(ETH_P_IP)))
+ eth_type_ipv4 = false;
+ } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) {
+ has_ipv4_spec = true;
+ }
+ ib_spec = (void *)ib_spec + ib_spec->size;
+ }
+ return !has_ipv4_spec || eth_type_ipv4;
+}
+
+static void put_flow_table(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *prio, bool ft_added)
+{
+ prio->refcount -= !!ft_added;
+ if (!prio->refcount) {
+ mlx5_destroy_flow_table(prio->flow_table);
+ prio->flow_table = NULL;
+ }
+}
+
+static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
+{
+ struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
+ struct mlx5_ib_flow_handler *handler = container_of(flow_id,
+ struct mlx5_ib_flow_handler,
+ ibflow);
+ struct mlx5_ib_flow_handler *iter, *tmp;
+
+ mutex_lock(&dev->flow_db.lock);
+
+ list_for_each_entry_safe(iter, tmp, &handler->list, list) {
+ mlx5_del_flow_rule(iter->rule);
+ list_del(&iter->list);
+ kfree(iter);
+ }
+
+ mlx5_del_flow_rule(handler->rule);
+ put_flow_table(dev, &dev->flow_db.prios[handler->prio], true);
+ mutex_unlock(&dev->flow_db.lock);
+
+ kfree(handler);
+
+ return 0;
+}
+
+#define MLX5_FS_MAX_TYPES 10
+#define MLX5_FS_MAX_ENTRIES 32000UL
+static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
+ struct ib_flow_attr *flow_attr)
+{
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_table *ft;
+ int num_entries;
+ int num_groups;
+ int priority;
+ int err = 0;
+
+ if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+ if (flow_is_multicast_only(flow_attr))
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = flow_attr->priority;
+ ns = mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_BYPASS);
+ num_entries = MLX5_FS_MAX_ENTRIES;
+ num_groups = MLX5_FS_MAX_TYPES;
+ prio = &dev->flow_db.prios[priority];
+ } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
+ flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
+ ns = mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_LEFTOVERS);
+ build_leftovers_ft_param(&priority,
+ &num_entries,
+ &num_groups);
+ prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
+ }
+
+ if (!ns)
+ return ERR_PTR(-ENOTSUPP);
+
+ ft = prio->flow_table;
+ if (!ft) {
+ ft = mlx5_create_auto_grouped_flow_table(ns, priority,
+ num_entries,
+ num_groups);
+
+ if (!IS_ERR(ft)) {
+ prio->refcount = 0;
+ prio->flow_table = ft;
+ } else {
+ err = PTR_ERR(ft);
+ }
+ }
+
+ return err ? ERR_PTR(err) : prio;
+}
+
+static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_flow_table *ft = ft_prio->flow_table;
+ struct mlx5_ib_flow_handler *handler;
+ void *ib_flow = flow_attr + 1;
+ u8 match_criteria_enable = 0;
+ unsigned int spec_index;
+ u32 *match_c;
+ u32 *match_v;
+ int err = 0;
+
+ if (!is_valid_attr(flow_attr))
+ return ERR_PTR(-EINVAL);
+
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler || !match_c || !match_v) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ INIT_LIST_HEAD(&handler->list);
+
+ for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
+ err = parse_flow_attr(match_c, match_v, ib_flow);
+ if (err < 0)
+ goto free;
+
+ ib_flow += ((union ib_flow_spec *)ib_flow)->size;
+ }
+
+ /* Outer header support only */
+ match_criteria_enable = (!outer_header_zero(match_c)) << 0;
+ handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
+ match_c, match_v,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ dst);
+
+ if (IS_ERR(handler->rule)) {
+ err = PTR_ERR(handler->rule);
+ goto free;
+ }
+
+ handler->prio = ft_prio - dev->flow_db.prios;
+
+ ft_prio->flow_table = ft;
+free:
+ if (err)
+ kfree(handler);
+ kfree(match_c);
+ kfree(match_v);
+ return err ? ERR_PTR(err) : handler;
+}
+
+enum {
+ LEFTOVERS_MC,
+ LEFTOVERS_UC,
+};
+
+static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_ucast = NULL;
+ struct mlx5_ib_flow_handler *handler = NULL;
+
+ static struct {
+ struct ib_flow_attr flow_attr;
+ struct ib_flow_spec_eth eth_flow;
+ } leftovers_specs[] = {
+ [LEFTOVERS_MC] = {
+ .flow_attr = {
+ .num_of_specs = 1,
+ .size = sizeof(leftovers_specs[0])
+ },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = {.dst_mac = {0x1} },
+ .val = {.dst_mac = {0x1} }
+ }
+ },
+ [LEFTOVERS_UC] = {
+ .flow_attr = {
+ .num_of_specs = 1,
+ .size = sizeof(leftovers_specs[0])
+ },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = {.dst_mac = {0x1} },
+ .val = {.dst_mac = {} }
+ }
+ }
+ };
+
+ handler = create_flow_rule(dev, ft_prio,
+ &leftovers_specs[LEFTOVERS_MC].flow_attr,
+ dst);
+ if (!IS_ERR(handler) &&
+ flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
+ handler_ucast = create_flow_rule(dev, ft_prio,
+ &leftovers_specs[LEFTOVERS_UC].flow_attr,
+ dst);
+ if (IS_ERR(handler_ucast)) {
+ kfree(handler);
+ handler = handler_ucast;
+ } else {
+ list_add(&handler_ucast->list, &handler->list);
+ }
+ }
+
+ return handler;
+}
+
+static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr,
+ int domain)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_flow_handler *handler = NULL;
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_ib_flow_prio *ft_prio;
+ int err;
+
+ if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
+ return ERR_PTR(-ENOSPC);
+
+ if (domain != IB_FLOW_DOMAIN_USER ||
+ flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
+ flow_attr->flags)
+ return ERR_PTR(-EINVAL);
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&dev->flow_db.lock);
+
+ ft_prio = get_flow_table(dev, flow_attr);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto unlock;
+ }
+
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
+
+ if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+ handler = create_flow_rule(dev, ft_prio, flow_attr,
+ dst);
+ } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
+ flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
+ handler = create_leftovers_rule(dev, ft_prio, flow_attr,
+ dst);
+ } else {
+ err = -EINVAL;
+ goto destroy_ft;
+ }
+
+ if (IS_ERR(handler)) {
+ err = PTR_ERR(handler);
+ handler = NULL;
+ goto destroy_ft;
+ }
+
+ ft_prio->refcount++;
+ mutex_unlock(&dev->flow_db.lock);
+ kfree(dst);
+
+ return &handler->ibflow;
+
+destroy_ft:
+ put_flow_table(dev, ft_prio, false);
+unlock:
+ mutex_unlock(&dev->flow_db.lock);
+ kfree(dst);
+ kfree(handler);
+ return ERR_PTR(err);
+}
+
static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -1819,10 +2273,19 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
}
+ if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
+ IB_LINK_LAYER_ETHERNET) {
+ dev->ib_dev.create_flow = mlx5_ib_create_flow;
+ dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
+ dev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
+ }
err = init_node_data(dev);
if (err)
goto err_dealloc;
+ mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
if (ll == IB_LINK_LAYER_ETHERNET) {
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d475f83c295b..d2b9737baa36 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -125,6 +125,36 @@ struct mlx5_ib_pd {
u32 pdn;
};
+#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
+#define MLX5_IB_FLOW_LAST_PRIO (MLX5_IB_FLOW_MCAST_PRIO - 1)
+#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
+#error "Invalid number of bypass priorities"
+#endif
+#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
+
+#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
+struct mlx5_ib_flow_prio {
+ struct mlx5_flow_table *flow_table;
+ unsigned int refcount;
+};
+
+struct mlx5_ib_flow_handler {
+ struct list_head list;
+ struct ib_flow ibflow;
+ unsigned int prio;
+ struct mlx5_flow_rule *rule;
+};
+
+struct mlx5_ib_flow_db {
+ struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
+ /* Protect flow steering bypass flow tables
+ * when add/del flow rules.
+ * only single add/removal of flow steering rule could be done
+ * simultaneously.
+ */
+ struct mutex lock;
+};
+
/* Use macros here so that don't have to duplicate
* enum ib_send_flags and enum ib_qp_type for low-level driver
*/
@@ -500,6 +530,7 @@ struct mlx5_ib_dev {
*/
struct srcu_struct mr_srcu;
#endif
+ struct mlx5_ib_flow_db flow_db;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 13ef22bd9459..fcdf37913a26 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -89,14 +89,14 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- mutex_lock(&d_inode(parent)->i_mutex);
+ inode_lock(d_inode(parent));
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
error = qibfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
else
error = PTR_ERR(*dentry);
- mutex_unlock(&d_inode(parent)->i_mutex);
+ inode_unlock(d_inode(parent));
return error;
}
@@ -481,7 +481,7 @@ static int remove_device_files(struct super_block *sb,
int ret, i;
root = dget(sb->s_root);
- mutex_lock(&d_inode(root)->i_mutex);
+ inode_lock(d_inode(root));
snprintf(unit, sizeof(unit), "%u", dd->unit);
dir = lookup_one_len(unit, root, strlen(unit));
@@ -491,7 +491,7 @@ static int remove_device_files(struct super_block *sb,
goto bail;
}
- mutex_lock(&d_inode(dir)->i_mutex);
+ inode_lock(d_inode(dir));
remove_file(dir, "counters");
remove_file(dir, "counter_names");
remove_file(dir, "portcounter_names");
@@ -506,13 +506,13 @@ static int remove_device_files(struct super_block *sb,
}
}
remove_file(dir, "flash");
- mutex_unlock(&d_inode(dir)->i_mutex);
+ inode_unlock(d_inode(dir));
ret = simple_rmdir(d_inode(root), dir);
d_delete(dir);
dput(dir);
bail:
- mutex_unlock(&d_inode(root)->i_mutex);
+ inode_unlock(d_inode(root));
dput(root);
return ret;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index abb3124f89bb..f121e6129339 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -346,7 +346,7 @@ isert_create_device_ib_res(struct isert_device *device)
ret = isert_alloc_comps(device);
if (ret)
- return ret;
+ goto out;
device->pd = ib_alloc_pd(ib_dev);
if (IS_ERR(device->pd)) {
@@ -364,6 +364,9 @@ isert_create_device_ib_res(struct isert_device *device)
out_cq:
isert_free_comps(device);
+out:
+ if (ret > 0)
+ ret = -EINVAL;
return ret;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2b5e0023dabf..0c37fee363b1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2223,31 +2223,6 @@ static void srpt_release_channel_work(struct work_struct *w)
kfree(ch);
}
-static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
- u8 i_port_id[16])
-{
- struct srpt_node_acl *nacl;
-
- list_for_each_entry(nacl, &sport->port_acl_list, list)
- if (memcmp(nacl->i_port_id, i_port_id,
- sizeof(nacl->i_port_id)) == 0)
- return nacl;
-
- return NULL;
-}
-
-static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
- u8 i_port_id[16])
-{
- struct srpt_node_acl *nacl;
-
- spin_lock_irq(&sport->port_acl_lock);
- nacl = __srpt_lookup_acl(sport, i_port_id);
- spin_unlock_irq(&sport->port_acl_lock);
-
- return nacl;
-}
-
/**
* srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
*
@@ -2265,10 +2240,10 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct srp_login_rej *rej;
struct ib_cm_rep_param *rep_param;
struct srpt_rdma_ch *ch, *tmp_ch;
- struct srpt_node_acl *nacl;
+ struct se_node_acl *se_acl;
u32 it_iu_len;
- int i;
- int ret = 0;
+ int i, ret = 0;
+ unsigned char *p;
WARN_ON_ONCE(irqs_disabled());
@@ -2418,33 +2393,47 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
" RTR failed (error code = %d)\n", ret);
goto destroy_ib;
}
+
/*
- * Use the initator port identifier as the session name.
+ * Use the initator port identifier as the session name, when
+ * checking against se_node_acl->initiatorname[] this can be
+ * with or without preceeding '0x'.
*/
snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
be64_to_cpu(*(__be64 *)ch->i_port_id),
be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
pr_debug("registering session %s\n", ch->sess_name);
+ p = &ch->sess_name[0];
- nacl = srpt_lookup_acl(sport, ch->i_port_id);
- if (!nacl) {
- pr_info("Rejected login because no ACL has been"
- " configured yet for initiator %s.\n", ch->sess_name);
+ ch->sess = transport_init_session(TARGET_PROT_NORMAL);
+ if (IS_ERR(ch->sess)) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_debug("Failed to create session\n");
goto destroy_ib;
}
- ch->sess = transport_init_session(TARGET_PROT_NORMAL);
- if (IS_ERR(ch->sess)) {
+try_again:
+ se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
+ if (!se_acl) {
+ pr_info("Rejected login because no ACL has been"
+ " configured yet for initiator %s.\n", ch->sess_name);
+ /*
+ * XXX: Hack to retry of ch->i_port_id without leading '0x'
+ */
+ if (p == &ch->sess_name[0]) {
+ p += 2;
+ goto try_again;
+ }
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_debug("Failed to create session\n");
- goto deregister_session;
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ transport_free_session(ch->sess);
+ goto destroy_ib;
}
- ch->sess->se_node_acl = &nacl->nacl;
- transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
+ ch->sess->se_node_acl = se_acl;
+
+ transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
ch->sess_name, ch->cm_id);
@@ -2488,8 +2477,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
release_channel:
srpt_set_ch_state(ch, CH_RELEASING);
transport_deregister_session_configfs(ch->sess);
-
-deregister_session:
transport_deregister_session(ch->sess);
ch->sess = NULL;
@@ -3092,8 +3079,6 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
INIT_WORK(&sport->work, srpt_refresh_port_work);
- INIT_LIST_HEAD(&sport->port_acl_list);
- spin_lock_init(&sport->port_acl_lock);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
@@ -3327,42 +3312,15 @@ out:
*/
static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
- struct srpt_port *sport =
- container_of(se_nacl->se_tpg, struct srpt_port, port_tpg_1);
- struct srpt_node_acl *nacl =
- container_of(se_nacl, struct srpt_node_acl, nacl);
u8 i_port_id[16];
if (srpt_parse_i_port_id(i_port_id, name) < 0) {
pr_err("invalid initiator port ID %s\n", name);
return -EINVAL;
}
-
- memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
- nacl->sport = sport;
-
- spin_lock_irq(&sport->port_acl_lock);
- list_add_tail(&nacl->list, &sport->port_acl_list);
- spin_unlock_irq(&sport->port_acl_lock);
-
return 0;
}
-/*
- * configfs callback function invoked for
- * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
- */
-static void srpt_cleanup_nodeacl(struct se_node_acl *se_nacl)
-{
- struct srpt_node_acl *nacl =
- container_of(se_nacl, struct srpt_node_acl, nacl);
- struct srpt_port *sport = nacl->sport;
-
- spin_lock_irq(&sport->port_acl_lock);
- list_del(&nacl->list);
- spin_unlock_irq(&sport->port_acl_lock);
-}
-
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
char *page)
{
@@ -3639,7 +3597,6 @@ static const struct target_core_fabric_ops srpt_template = {
.fabric_make_tpg = srpt_make_tpg,
.fabric_drop_tpg = srpt_drop_tpg,
.fabric_init_nodeacl = srpt_init_nodeacl,
- .fabric_cleanup_nodeacl = srpt_cleanup_nodeacl,
.tfc_wwn_attrs = srpt_wwn_attrs,
.tfc_tpg_base_attrs = srpt_tpg_attrs,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 247c80712357..09037f2b0b51 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -326,11 +326,9 @@ struct srpt_port {
u16 sm_lid;
u16 lid;
union ib_gid gid;
- spinlock_t port_acl_lock;
struct work_struct work;
struct se_portal_group port_tpg_1;
struct se_wwn port_wwn;
- struct list_head port_acl_list;
struct srpt_port_attrib port_attrib;
};
@@ -368,15 +366,9 @@ struct srpt_device {
/**
* struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
* @nacl: Target core node ACL information.
- * @i_port_id: 128-bit SRP initiator port ID.
- * @sport: port information.
- * @list: Element of the per-HCA ACL list.
*/
struct srpt_node_acl {
struct se_node_acl nacl;
- u8 i_port_id[16];
- struct srpt_port *sport;
- struct list_head list;
};
#endif /* IB_SRPT_H */
OpenPOWER on IntegriCloud