summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-06-18 09:23:47 -0700
committerRoland Dreier <rolandd@cisco.com>2007-06-18 09:23:47 -0700
commite61ef2416b0b92828512b6cfcd0104a02b6431fe (patch)
tree51d3307aa5be5591f5859f96a3bd1dd20231b9b0 /drivers/infiniband
parent5ae2a7a836be660ff1621cce1c46930f19200589 (diff)
downloadblackbird-op-linux-e61ef2416b0b92828512b6cfcd0104a02b6431fe.tar.gz
blackbird-op-linux-e61ef2416b0b92828512b6cfcd0104a02b6431fe.zip
IB/mlx4: Make sure inline data segments don't cross a 64 byte boundary
Inline data segments in send WQEs are not allowed to cross a 64 byte boundary. We use inline data segments to hold the UD headers for MLX QPs (QP0 and QP1). A send with GRH on QP1 will have a UD header that is too big to fit in a single inline data segment without crossing a 64 byte boundary, so split the header into two inline data segments. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c42
1 files changed, 39 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 355a31f9c03c..28a08bdd1800 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -192,6 +192,8 @@ static int send_wqe_overhead(enum ib_qp_type type)
case IB_QPT_GSI:
return sizeof (struct mlx4_wqe_ctrl_seg) +
ALIGN(MLX4_IB_UD_HEADER_SIZE +
+ DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
+ MLX4_INLINE_ALIGN) *
sizeof (struct mlx4_wqe_inline_seg),
sizeof (struct mlx4_wqe_data_seg)) +
ALIGN(4 +
@@ -1049,6 +1051,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
u16 pkey;
int send_size;
int header_size;
+ int spc;
int i;
send_size = 0;
@@ -1124,10 +1127,43 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
printk("\n");
}
- inl->byte_count = cpu_to_be32(1 << 31 | header_size);
- memcpy(inl + 1, sqp->header_buf, header_size);
+ /*
+ * Inline data segments may not cross a 64 byte boundary. If
+ * our UD header is bigger than the space available up to the
+ * next 64 byte boundary in the WQE, use two inline data
+ * segments to hold the UD header.
+ */
+ spc = MLX4_INLINE_ALIGN -
+ ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
+ if (header_size <= spc) {
+ inl->byte_count = cpu_to_be32(1 << 31 | header_size);
+ memcpy(inl + 1, sqp->header_buf, header_size);
+ i = 1;
+ } else {
+ inl->byte_count = cpu_to_be32(1 << 31 | spc);
+ memcpy(inl + 1, sqp->header_buf, spc);
+
+ inl = (void *) (inl + 1) + spc;
+ memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
+ /*
+ * Need a barrier here to make sure all the data is
+ * visible before the byte_count field is set.
+ * Otherwise the HCA prefetcher could grab the 64-byte
+ * chunk with this inline segment and get a valid (!=
+ * 0xffffffff) byte count but stale data, and end up
+ * generating a packet with bad headers.
+ *
+ * The first inline segment's byte_count field doesn't
+ * need a barrier, because it comes after a
+ * control/MLX segment and therefore is at an offset
+ * of 16 mod 64.
+ */
+ wmb();
+ inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
+ i = 2;
+ }
- return ALIGN(sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
+ return ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
}
static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
OpenPOWER on IntegriCloud