diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2020-07-27 16:44:36 +0200 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2020-07-27 16:44:36 +0200 |
commit | 79237743687178a56b282e6a8b9845ab0a54e598 (patch) | |
tree | d85a63da78278f7d986545338eb0bdf6b7104790 /drivers/infiniband | |
parent | RDMA/mlx5: Fix typo in enum name (diff) | |
parent | RDMA/mlx5: Set mkey relaxed ordering by UMR with ConnectX-7 (diff) | |
download | linux-79237743687178a56b282e6a8b9845ab0a54e598.tar.xz linux-79237743687178a56b282e6a8b9845ab0a54e598.zip |
Merge branch 'mlx5_uar' into rdma.git /for-next
Meir Lichtinger says:
====================
ConnectX-7 supports setting relaxed ordering read/write mkey attribute by
UMR, indicated by new HCA capabilities, so extend mlx5_ib driver to
configure UMR control segment
====================
Based on the mlx5-next branch at
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
due to dependencies.
* branch 'mlx5_uar':
RDMA/mlx5: Set mkey relaxed ordering by UMR with ConnectX-7
RDMA/mlx5: Use MLX5_SET macro instead of local structure
RDMA/mlx5: ConnectX-7 new capabilities to set relaxed ordering by UMR
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/wr.c | 68 |
2 files changed, 57 insertions, 29 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index b93f3aa9e218..5287fc868662 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1356,15 +1356,6 @@ static inline void init_query_mad(struct ib_smp *mad) mad->method = IB_MGMT_METHOD_GET; } -static inline u8 convert_access(int acc) -{ - return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | - (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | - (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | - (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | - MLX5_PERM_LOCAL_READ; -} - static inline int is_qp1(enum ib_qp_type qp_type) { return qp_type == MLX5_IB_QPT_HW_GSI; @@ -1463,8 +1454,13 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, return false; if (access_flags & IB_ACCESS_RELAXED_ORDERING && - (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) || - MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))) + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) + return false; + + if (access_flags & IB_ACCESS_RELAXED_ORDERING && + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) return false; return true; diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index 2c6df1c43b55..4d4f8c22b3e6 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -263,7 +263,9 @@ static __be64 get_umr_update_translation_mask(void) return cpu_to_be64(result); } -static __be64 get_umr_update_access_mask(int atomic) +static __be64 get_umr_update_access_mask(int atomic, + int relaxed_ordering_write, + int relaxed_ordering_read) { u64 result; @@ -275,6 +277,12 @@ static __be64 get_umr_update_access_mask(int atomic) if (atomic) result |= MLX5_MKEY_MASK_A; + if (relaxed_ordering_write) + result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE; + + if (relaxed_ordering_read) + result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ; + return cpu_to_be64(result); } @@ -289,17 +297,28 @@ static __be64 get_umr_update_pd_mask(void) static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask) { - if ((mask & MLX5_MKEY_MASK_PAGE_SIZE && - MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) || - (mask & MLX5_MKEY_MASK_A && - MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))) + if (mask & MLX5_MKEY_MASK_PAGE_SIZE && + MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) return -EPERM; + + if (mask & MLX5_MKEY_MASK_A && + MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) + return -EPERM; + + if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE && + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) + return -EPERM; + + if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ && + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) + return -EPERM; + return 0; } static int set_reg_umr_segment(struct mlx5_ib_dev *dev, struct mlx5_wqe_umr_ctrl_seg *umr, - const struct ib_send_wr *wr, int atomic) + const struct ib_send_wr *wr) { const struct mlx5_umr_wr *umrwr = umr_wr(wr); @@ -325,7 +344,10 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev, if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) umr->mkey_mask |= get_umr_update_translation_mask(); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { - umr->mkey_mask |= get_umr_update_access_mask(atomic); + umr->mkey_mask |= get_umr_update_access_mask( + !!(MLX5_CAP_GEN(dev->mdev, atomic)), + !!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)), + !!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))); umr->mkey_mask |= get_umr_update_pd_mask(); } if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) @@ -383,20 +405,31 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, memset(seg, 0, sizeof(*seg)); if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) - seg->status = MLX5_MKEY_STATUS_FREE; + MLX5_SET(mkc, seg, free, 1); + + MLX5_SET(mkc, seg, a, + !!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC)); + MLX5_SET(mkc, seg, rw, + !!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE)); + MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ)); + MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE)); + MLX5_SET(mkc, seg, lr, 1); + MLX5_SET(mkc, seg, relaxed_ordering_write, + !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING)); + MLX5_SET(mkc, seg, relaxed_ordering_read, + !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING)); - seg->flags = convert_access(umrwr->access_flags); if (umrwr->pd) - seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); + MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && !umrwr->length) - seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64); + MLX5_SET(mkc, seg, length64, 1); - seg->start_addr = cpu_to_be64(umrwr->virt_addr); - seg->len = cpu_to_be64(umrwr->length); - seg->log2_page_size = umrwr->page_shift; - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | - mlx5_mkey_variant(umrwr->mkey)); + MLX5_SET64(mkc, seg, start_addr, umrwr->virt_addr); + MLX5_SET64(mkc, seg, len, umrwr->length); + MLX5_SET(mkc, seg, log_page_size, umrwr->page_shift); + MLX5_SET(mkc, seg, qpn, 0xffffff); + MLX5_SET(mkc, seg, mkey_7_0, mlx5_mkey_variant(umrwr->mkey)); } static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, @@ -1224,8 +1257,7 @@ static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey); - err = set_reg_umr_segment(dev, *seg, wr, - !!(MLX5_CAP_GEN(dev->mdev, atomic))); + err = set_reg_umr_segment(dev, *seg, wr); if (unlikely(err)) goto out; *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); |