diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/mlx5/driver.h | 49 | ||||
-rw-r--r-- | include/linux/mlx5/eq.h | 21 |
2 files changed, 21 insertions, 49 deletions
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index fe9b552aa649..f41e6713df10 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -510,7 +510,6 @@ struct mlx5_fc_stats { struct mlx5_mpfs; struct mlx5_eswitch; struct mlx5_lag; -struct mlx5_pagefault; struct mlx5_eq_table; struct mlx5_rate_limit { @@ -619,13 +618,6 @@ struct mlx5_priv { struct mlx5_port_module_event_stats pme_stats; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - void (*pfault)(struct mlx5_core_dev *dev, - void *context, - struct mlx5_pagefault *pfault); - void *pfault_ctx; - struct srcu_struct pfault_srcu; -#endif struct mlx5_bfreg_data bfregs; struct mlx5_uars_page *uar; }; @@ -650,44 +642,6 @@ enum mlx5_pagefault_type_flags { MLX5_PFAULT_RDMA = 1 << 2, }; -/* Contains the details of a pagefault. */ -struct mlx5_pagefault { - u32 bytes_committed; - u32 token; - u8 event_subtype; - u8 type; - union { - /* Initiator or send message responder pagefault details. */ - struct { - /* Received packet size, only valid for responders. */ - u32 packet_size; - /* - * Number of resource holding WQE, depends on type. - */ - u32 wq_num; - /* - * WQE index. Refers to either the send queue or - * receive queue, according to event_subtype. - */ - u16 wqe_index; - } wqe; - /* RDMA responder pagefault details */ - struct { - u32 r_key; - /* - * Received packet size, minimal size page fault - * resolution required for forward progress. - */ - u32 packet_size; - u32 rdma_op_len; - u64 rdma_va; - } rdma; - }; - - struct mlx5_eq_pagefault *eq; - struct work_struct work; -}; - struct mlx5_td { struct list_head tirs_list; u32 tdn; @@ -1118,9 +1072,6 @@ struct mlx5_interface { void (*detach)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); - void (*pfault)(struct mlx5_core_dev *dev, - void *context, - struct mlx5_pagefault *pfault); void * (*get_dev)(void *context); int protocol; struct list_head list; diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h index c733673ba5f6..71d82c5a1a02 100644 --- a/include/linux/mlx5/eq.h +++ b/include/linux/mlx5/eq.h @@ -17,6 +17,10 @@ enum { MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS, }; +#define MLX5_NUM_CMD_EQE (32) +#define MLX5_NUM_ASYNC_EQE (0x1000) +#define MLX5_NUM_SPARE_EQE (0x80) + struct mlx5_eq; struct mlx5_eq_param { @@ -36,4 +40,21 @@ mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); +/* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create EQs with MLX5_NUM_SPARE_EQE extra entries, + * so we must update our consumer index at + * least that often. + * + * mlx5_eq_update_cc must be called on every EQE @EQ irq handler + */ +static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) +{ + if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) { + mlx5_eq_update_ci(eq, cc, 0); + cc = 0; + } + return cc; +} + #endif /* MLX5_CORE_EQ_H */ |