diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/Kconfig | 1 | ||||
-rw-r--r-- | drivers/infiniband/core/ucm.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/file_ops.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/mmu_rb.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/rc.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/ruc.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/sdma.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/uc.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/ud.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/main.c | 61 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 125 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_file_ops.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_rc.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_ruc.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_uc.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_ud.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/sw/rdmavt/qp.c | 1 |
20 files changed, 108 insertions, 151 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 857beed1459c..8517d6ea91a6 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -4,6 +4,7 @@ menuconfig INFINIBAND depends on NET depends on INET depends on m || IPV6 != m + depends on !ALPHA select IRQ_POLL ---help--- Core support for InfiniBand (IB). Make sure to also select diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f3eb1c3b617d..8ae636bb09e5 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1135,11 +1135,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, return result; } -static unsigned int ib_ucm_poll(struct file *filp, +static __poll_t ib_ucm_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_ucm_file *file = filp->private_data; - unsigned int mask = 0; + __poll_t mask = 0; poll_wait(filp, &file->poll_wait, wait); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index d67219db99be..6ba4231f2b07 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1631,10 +1631,10 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, return ret; } -static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait) +static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait) { struct ucma_file *file = filp->private_data; - unsigned int mask = 0; + __poll_t mask = 0; poll_wait(filp, &file->poll_wait, wait); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 5e8ffd12c04e..78c77962422e 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -628,12 +628,12 @@ err: return ret; } -static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait) +static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_umad_file *file = filp->private_data; /* we will always be able to post a MAD send */ - unsigned int mask = POLLOUT | POLLWRNORM; + __poll_t mask = POLLOUT | POLLWRNORM; poll_wait(filp, &file->recv_wait, wait); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 899102ad6bb6..5b811bf574d6 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -341,11 +341,11 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf, sizeof(struct ib_uverbs_comp_event_desc)); } -static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, +static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, struct file *filp, struct poll_table_struct *wait) { - unsigned int pollflags = 0; + __poll_t pollflags = 0; poll_wait(filp, &ev_queue->poll_wait, wait); @@ -357,13 +357,13 @@ static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, return pollflags; } -static unsigned int ib_uverbs_async_event_poll(struct file *filp, +static __poll_t ib_uverbs_async_event_poll(struct file *filp, struct poll_table_struct *wait) { return ib_uverbs_event_poll(filp->private_data, filp, wait); } -static unsigned int ib_uverbs_comp_event_poll(struct file *filp, +static __poll_t ib_uverbs_comp_event_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_uverbs_completion_event_file *comp_ev_file = diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index acf0ba56c309..d9a0f2590294 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -74,7 +74,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp); static int hfi1_file_close(struct inode *inode, struct file *fp); static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from); -static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt); +static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt); static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma); static u64 kvirt_to_phys(void *addr); @@ -102,8 +102,8 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, struct hfi1_user_info *uinfo, struct hfi1_ctxtdata **cd); static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt); -static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt); -static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt); +static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt); +static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt); static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, unsigned long arg); static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg); @@ -605,10 +605,10 @@ static int vma_fault(struct vm_fault *vmf) return 0; } -static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt) +static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt) { struct hfi1_ctxtdata *uctxt; - unsigned pollflag; + __poll_t pollflag; uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; if (!uctxt) @@ -1423,13 +1423,13 @@ static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg, return ret; } -static unsigned int poll_urgent(struct file *fp, +static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; - unsigned pollflag; + __poll_t pollflag; poll_wait(fp, &uctxt->wait, pt); @@ -1446,13 +1446,13 @@ static unsigned int poll_urgent(struct file *fp, return pollflag; } -static unsigned int poll_next(struct file *fp, +static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; - unsigned pollflag; + __poll_t pollflag; poll_wait(fp, &uctxt->wait, pt); diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index e7b3ce123da6..70aceefe14d5 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -77,6 +77,7 @@ static void do_remove(struct mmu_rb_handler *handler, static void handle_remove(struct work_struct *work); static const struct mmu_notifier_ops mn_opts = { + .flags = MMU_INVALIDATE_DOES_NOT_BLOCK, .invalidate_range_start = mmu_notifier_range_start, }; diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 93ea03c2129c..da58046a02ea 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -300,7 +300,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - smp_read_barrier_depends(); /* see post_one_send() */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ @@ -344,7 +343,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) newreq = 0; if (qp->s_cur == qp->s_tail) { /* Check if send work queue is empty. */ - smp_read_barrier_depends(); /* see post_one_send() */ if (qp->s_tail == READ_ONCE(qp->s_head)) { clear_ahg(qp); goto bail; @@ -899,7 +897,6 @@ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn) } /* Ensure s_rdma_ack_cnt changes are committed */ - smp_read_barrier_depends(); if (qp->s_rdma_ack_cnt) { hfi1_queue_rc_ack(packet, is_fecn); return; @@ -1561,7 +1558,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet) trace_hfi1_ack(qp, psn); /* Ignore invalid responses. */ - smp_read_barrier_depends(); /* see post_one_send */ if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) goto ack_done; diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index edef320d745d..3daa94bdae3a 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -351,7 +351,6 @@ static void ruc_loopback(struct rvt_qp *sqp) sqp->s_flags |= RVT_S_BUSY; again: - smp_read_barrier_depends(); /* see post_one_send() */ if (sqp->s_last == READ_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index d8ddbfdf3a4d..1f203309cf24 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque) static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) { - smp_read_barrier_depends(); /* see sdma_update_tail() */ return sde->tx_ring[sde->tx_head & sde->sdma_mask]; } diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 8030c38a27f2..9d7a3110c14c 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - smp_read_barrier_depends(); /* see post_one_send() */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ @@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ - smp_read_barrier_depends(); /* see post_one_send() */ if (qp->s_cur == READ_ONCE(qp->s_head)) { clear_ahg(qp); goto bail; diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 066afbcfc064..bcf3b0bebac8 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -489,7 +489,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - smp_read_barrier_depends(); /* see post_one_send */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ @@ -503,7 +502,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) } /* see post_one_send() */ - smp_read_barrier_depends(); if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 50812b33291b..db4bf97c0e15 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev) static int qedr_alloc_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block *sb_virt; + struct status_block_e4 *sb_virt; dma_addr_t sb_phys; int rc; @@ -430,59 +430,16 @@ static void qedr_remove_sysfiles(struct qedr_dev *dev) static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) { - struct pci_dev *bridge; - u32 ctl2, cap2; - u16 flags; - int rc; - - bridge = pdev->bus->self; - if (!bridge) - goto disable; - - /* Check atomic routing support all the way to root complex */ - while (bridge->bus->parent) { - rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags); - if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2)) - goto disable; - - rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2); - if (rc) - goto disable; + int rc = pci_enable_atomic_ops_to_root(pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP64); - rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); - if (rc) - goto disable; - - if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) || - (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)) - goto disable; - bridge = bridge->bus->parent->self; + if (rc) { + dev->atomic_cap = IB_ATOMIC_NONE; + DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); + } else { + dev->atomic_cap = IB_ATOMIC_GLOB; + DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); } - - rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags); - if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2)) - goto disable; - - rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2); - if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64)) - goto disable; - - /* Set atomic operations */ - pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_ATOMIC_REQ); - dev->atomic_cap = IB_ATOMIC_GLOB; - - DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); - - return; - -disable: - pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_ATOMIC_REQ); - dev->atomic_cap = IB_ATOMIC_NONE; - - DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); - } static const struct qed_rdma_ops *qed_ops; diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index b7587f10e7de..78b49002fbd2 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -164,6 +164,13 @@ struct rdma_srq_sge { __le32 l_key; }; +/* Rdma doorbell data for flags update */ +struct rdma_pwm_flags_data { + __le16 icid; /* internal CID */ + u8 agg_flags; /* aggregative flags */ + u8 reserved; +}; + /* Rdma doorbell data for SQ and RQ */ struct rdma_pwm_val16_data { __le16 icid; @@ -180,12 +187,16 @@ struct rdma_pwm_val32_data { __le16 icid; u8 agg_flags; u8 params; -#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 -#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 -#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 -#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 -#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F -#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3 +#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 +#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 +#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3 +#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4 +#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7 +#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5 __le32 value; }; @@ -478,23 +489,25 @@ struct rdma_sq_fmr_wqe { __le16 dif_app_tag_mask; __le16 dif_runt_crc_value; __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 - __le32 Reserved5; +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF +#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 + __le32 reserved5; }; /* First element (16 bytes) of fmr wqe */ @@ -558,23 +571,25 @@ struct rdma_sq_fmr_wqe_3rd { __le16 dif_app_tag_mask; __le16 dif_runt_crc_value; __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 - __le32 Reserved5; +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF +#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 + __le32 reserved5; }; struct rdma_sq_local_inv_wqe { @@ -606,20 +621,22 @@ struct rdma_sq_rdma_wqe { __le32 xrc_srq; u8 req_type; u8 flags; -#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 -#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 -#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 -#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 -#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 -#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 -#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3 -#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6 +#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6 +#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7 u8 wqe_size; u8 prev_wqe_size; struct regpair remote_va; diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index a9a48b393323..f7593b5e2b76 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -58,7 +58,7 @@ static int qib_open(struct inode *, struct file *); static int qib_close(struct inode *, struct file *); static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *); -static unsigned int qib_poll(struct file *, struct poll_table_struct *); +static __poll_t qib_poll(struct file *, struct poll_table_struct *); static int qib_mmapf(struct file *, struct vm_area_struct *); /* @@ -1074,12 +1074,12 @@ bail: return ret; } -static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd, +static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd, struct file *fp, struct poll_table_struct *pt) { struct qib_devdata *dd = rcd->dd; - unsigned pollflag; + __poll_t pollflag; poll_wait(fp, &rcd->wait, pt); @@ -1096,12 +1096,12 @@ static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd, return pollflag; } -static unsigned int qib_poll_next(struct qib_ctxtdata *rcd, +static __poll_t qib_poll_next(struct qib_ctxtdata *rcd, struct file *fp, struct poll_table_struct *pt) { struct qib_devdata *dd = rcd->dd; - unsigned pollflag; + __poll_t pollflag; poll_wait(fp, &rcd->wait, pt); @@ -1117,10 +1117,10 @@ static unsigned int qib_poll_next(struct qib_ctxtdata *rcd, return pollflag; } -static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt) +static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt) { struct qib_ctxtdata *rcd; - unsigned pollflag; + __poll_t pollflag; rcd = ctxt_fp(fp); if (!rcd) diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 61d3bc6771f0..c9955d48c50f 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - smp_read_barrier_depends(); /* see post_one_send() */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ @@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) newreq = 0; if (qp->s_cur == qp->s_tail) { /* Check if send work queue is empty. */ - smp_read_barrier_depends(); /* see post_one_send() */ if (qp->s_tail == READ_ONCE(qp->s_head)) goto bail; /* @@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, goto ack_done; /* Ignore invalid responses. */ - smp_read_barrier_depends(); /* see post_one_send */ if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0) goto ack_done; diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 9a37e844d4c8..4662cc7bde92 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) sqp->s_flags |= RVT_S_BUSY; again: - smp_read_barrier_depends(); /* see post_one_send() */ if (sqp->s_last == READ_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index d311db6487cf..840eec6ebc33 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - smp_read_barrier_depends(); /* see post_one_send() */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ @@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ - smp_read_barrier_depends(); /* see post_one_send() */ if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; /* diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 59a4f0333803..3e4ff77260c2 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - smp_read_barrier_depends(); /* see post_one_send */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ @@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) } /* see post_one_send() */ - smp_read_barrier_depends(); if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 4c548bf1f032..c82e6bb3d77c 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail( /* non-reserved operations */ if (likely(qp->s_avail)) return 0; - smp_read_barrier_depends(); /* see rc.c */ slast = READ_ONCE(qp->s_last); if (qp->s_head >= slast) avail = qp->s_size - (qp->s_head - slast); |