summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx4/eq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 17:20:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 17:20:40 -0700
commit7a9a2970b5c1c2ce73d4bb84edaa7ebf13e0c841 (patch)
treebd4909abfcd759b376cfd2fab06281df366f6a0f /drivers/net/ethernet/mellanox/mlx4/eq.c
parentfc47912d9cda50ae6bd9ca30e97e8c03de5b7b60 (diff)
parentd172f5a4ab151a952a0d898ba3b0ff6a020171a6 (diff)
downloadblackbird-op-linux-7a9a2970b5c1c2ce73d4bb84edaa7ebf13e0c841.tar.gz
blackbird-op-linux-7a9a2970b5c1c2ce73d4bb84edaa7ebf13e0c841.zip
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband updates from Roland Dreier: "First batch of InfiniBand/RDMA changes for the 3.7 merge window: - mlx4 IB support for SR-IOV - A couple of SRP initiator fixes - Batch of nes hardware driver fixes - Fix for long-standing use-after-free crash in IPoIB - Other miscellaneous fixes" This merge also removes a new use of __cancel_delayed_work(), and replaces it with the regular cancel_delayed_work() that is now irq-safe thanks to the workqueue updates. That said, I suspect the sequence in question should probably use "mod_delayed_work()". I just did the minimal "don't use deprecated functions" fixup, though. * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (45 commits) IB/qib: Fix local access validation for user MRs mlx4_core: Disable SENSE_PORT for multifunction devices mlx4_core: Clean up enabling of SENSE_PORT for older (ConnectX-1/-2) HCAs mlx4_core: Stash PCI ID driver_data in mlx4_priv structure IB/srp: Avoid having aborted requests hang IB/srp: Fix use-after-free in srp_reset_req() IB/qib: Add a qib driver version RDMA/nes: Fix compilation error when nes_debug is enabled RDMA/nes: Print hardware resource type RDMA/nes: Fix for crash when TX checksum offload is off RDMA/nes: Cosmetic changes RDMA/nes: Fix for incorrect MSS when TSO is on RDMA/nes: Fix incorrect resolving of the loopback MAC address mlx4_core: Fix crash on uninitialized priv->cmd.slave_sem mlx4_core: Trivial cleanups to driver log messages mlx4_core: Trivial readability fix: "0X30" -> "0x30" IB/mlx4: Create paravirt contexts for VFs when master IB driver initializes mlx4: Modify proxy/tunnel QP mechanism so that guests do no calculations mlx4: Paravirtualize Node Guids for slaves mlx4: Activate SR-IOV mode for IB ...
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/eq.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c245
1 files changed, 227 insertions, 18 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 99a04648fab0..51c764901ad2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -164,13 +164,16 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
- struct mlx4_eqe *s_eqe =
- &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+ struct mlx4_eqe *s_eqe;
+ unsigned long flags;
+ spin_lock_irqsave(&slave_eq->event_lock, flags);
+ s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
"No free EQE on slave events queue\n", slave);
+ spin_unlock_irqrestore(&slave_eq->event_lock, flags);
return;
}
@@ -183,6 +186,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_event_work);
+ spin_unlock_irqrestore(&slave_eq->event_lock, flags);
}
static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
@@ -200,6 +204,196 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe);
}
+int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
+{
+ struct mlx4_eqe eqe;
+
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
+
+ if (!s_slave->active)
+ return 0;
+
+ memset(&eqe, 0, sizeof eqe);
+
+ eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+ eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
+ eqe.event.port_mgmt_change.port = port;
+
+ return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
+
+int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
+{
+ struct mlx4_eqe eqe;
+
+ /*don't send if we don't have the that slave */
+ if (dev->num_vfs < slave)
+ return 0;
+ memset(&eqe, 0, sizeof eqe);
+
+ eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+ eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
+ eqe.event.port_mgmt_change.port = port;
+
+ return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
+
+int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
+ u8 port_subtype_change)
+{
+ struct mlx4_eqe eqe;
+
+ /*don't send if we don't have the that slave */
+ if (dev->num_vfs < slave)
+ return 0;
+ memset(&eqe, 0, sizeof eqe);
+
+ eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
+ eqe.subtype = port_subtype_change;
+ eqe.event.port_change.port = cpu_to_be32(port << 28);
+
+ mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
+ port_subtype_change, slave, port);
+ return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
+
+enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+ if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
+ pr_err("%s: Error: asking for slave:%d, port:%d\n",
+ __func__, slave, port);
+ return SLAVE_PORT_DOWN;
+ }
+ return s_state[slave].port_state[port];
+}
+EXPORT_SYMBOL(mlx4_get_slave_port_state);
+
+static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
+ enum slave_port_state state)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+
+ if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+ pr_err("%s: Error: asking for slave:%d, port:%d\n",
+ __func__, slave, port);
+ return -1;
+ }
+ s_state[slave].port_state[port] = state;
+
+ return 0;
+}
+
+static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
+{
+ int i;
+ enum slave_port_gen_event gen_event;
+
+ for (i = 0; i < dev->num_slaves; i++)
+ set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
+}
+/**************************************************************************
+ The function get as input the new event to that port,
+ and according to the prev state change the slave's port state.
+ The events are:
+ MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+ MLX4_PORT_STATE_DEV_EVENT_PORT_UP
+ MLX4_PORT_STATE_IB_EVENT_GID_VALID
+ MLX4_PORT_STATE_IB_EVENT_GID_INVALID
+***************************************************************************/
+int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
+ u8 port, int event,
+ enum slave_port_gen_event *gen_event)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *ctx = NULL;
+ unsigned long flags;
+ int ret = -1;
+ enum slave_port_state cur_state =
+ mlx4_get_slave_port_state(dev, slave, port);
+
+ *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
+
+ if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+ pr_err("%s: Error: asking for slave:%d, port:%d\n",
+ __func__, slave, port);
+ return ret;
+ }
+
+ ctx = &priv->mfunc.master.slave_state[slave];
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
+ __func__, slave, cur_state, event);
+
+ switch (cur_state) {
+ case SLAVE_PORT_DOWN:
+ if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PENDING_UP);
+ break;
+ case SLAVE_PENDING_UP:
+ if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PORT_DOWN);
+ else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PORT_UP);
+ *gen_event = SLAVE_PORT_GEN_EVENT_UP;
+ }
+ break;
+ case SLAVE_PORT_UP:
+ if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PORT_DOWN);
+ *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
+ } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
+ event) {
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PENDING_UP);
+ *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
+ }
+ break;
+ default:
+ pr_err("%s: BUG!!! UNKNOWN state: "
+ "slave:%d, port:%d\n", __func__, slave, port);
+ goto out;
+ }
+ ret = mlx4_get_slave_port_state(dev, slave, port);
+ mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
+ " :%d gen_event: %d\n",
+ __func__, slave, cur_state, event, *gen_event);
+
+out:
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return ret;
+}
+
+EXPORT_SYMBOL(set_and_calc_slave_port_state);
+
+int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
+{
+ struct mlx4_eqe eqe;
+
+ memset(&eqe, 0, sizeof eqe);
+
+ eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+ eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
+ eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.params.port_info.changed_attr =
+ cpu_to_be32((u32) attr);
+
+ slave_event(dev, ALL_SLAVES, &eqe);
+ return 0;
+}
+EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
+
void mlx4_master_handle_slave_flr(struct work_struct *work)
{
struct mlx4_mfunc_master_ctx *master =
@@ -251,6 +445,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
u32 flr_slave;
u8 update_slave_state;
int i;
+ enum slave_port_gen_event gen_event;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -347,35 +542,49 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PORT_CHANGE:
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
- mlx4_dispatch_event(dev,
- MLX4_DEV_EVENT_PORT_DOWN,
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
- if (mlx4_is_master(dev))
- /*change the state of all slave's port
- * to down:*/
- for (i = 0; i < dev->num_slaves; i++) {
- mlx4_dbg(dev, "%s: Sending "
- "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+ if (!mlx4_is_master(dev))
+ break;
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
+ if (i == mlx4_master_func_num(dev))
+ continue;
+ mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
" to slave: %d, port:%d\n",
__func__, i, port);
- if (i == dev->caps.function)
- continue;
mlx4_slave_event(dev, i, eqe);
+ } else { /* IB port */
+ set_and_calc_slave_port_state(dev, i, port,
+ MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+ &gen_event);
+ /*we can be in pending state, then do not send port_down event*/
+ if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
+ if (i == mlx4_master_func_num(dev))
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
}
+ }
} else {
- mlx4_dispatch_event(dev,
- MLX4_DEV_EVENT_PORT_UP,
- port);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
+
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
- if (mlx4_is_master(dev)) {
+ if (!mlx4_is_master(dev))
+ break;
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
for (i = 0; i < dev->num_slaves; i++) {
- if (i == dev->caps.function)
+ if (i == mlx4_master_func_num(dev))
continue;
mlx4_slave_event(dev, i, eqe);
}
- }
+ else /* IB port */
+ /* port-up event will be sent to a slave when the
+ * slave's alias-guid is set. This is done in alias_GUID.c
+ */
+ set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
}
break;
OpenPOWER on IntegriCloud