diff options
author | Or Har-Toov <ohartoov@nvidia.com> | 2024-04-03 12:36:00 +0200 |
---|---|---|
committer | Leon Romanovsky <leon@kernel.org> | 2024-04-08 12:34:51 +0200 |
commit | 8c1185fef68cc603b954fece2a434c9f851d6a86 (patch) | |
tree | 0c0689156190ab0f8e0d20e5154c1d27edc9a4e3 /drivers/infiniband/hw/mlx5 | |
parent | RDMA/mlx5: Uncacheable mkey has neither rb_key or cache_ent (diff) | |
download | linux-8c1185fef68cc603b954fece2a434c9f851d6a86.tar.xz linux-8c1185fef68cc603b954fece2a434c9f851d6a86.zip |
RDMA/mlx5: Change check for cacheable mkeys
umem can be NULL for user application mkeys in some cases. Therefore
umem can't be used for checking if the mkey is cacheable and it is
changed for checking a flag that indicates it. Also make sure that
all mkeys which are not returned to the cache will be destroyed.
Fixes: dd1b913fb0d0 ("RDMA/mlx5: Cache all user cacheable mkeys on dereg MR flow")
Signed-off-by: Or Har-Toov <ohartoov@nvidia.com>
Link: https://lore.kernel.org/r/2690bc5c6896bcb937f89af16a1ff0343a7ab3d0.1712140377.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 32 |
2 files changed, 23 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index e74f04865062..f255a12e26a0 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -646,6 +646,7 @@ struct mlx5_ib_mkey { /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */ struct mlx5r_cache_rb_key rb_key; struct mlx5_cache_ent *cache_ent; + u8 cacheable : 1; }; #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index a8ee2ca1f4a1..7f7b1f59b5f0 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1158,6 +1158,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, if (IS_ERR(mr)) return mr; mr->mmkey.rb_key = rb_key; + mr->mmkey.cacheable = true; return mr; } @@ -1168,6 +1169,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, mr->ibmr.pd = pd; mr->umem = umem; mr->page_shift = order_base_2(page_size); + mr->mmkey.cacheable = true; set_mr_fields(dev, mr, umem->length, access_flags, iova); return mr; @@ -1835,6 +1837,23 @@ end: return ret; } +static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) +{ + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); + struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; + + if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) + return 0; + + if (ent) { + spin_lock_irq(&ent->mkeys_queue.lock); + ent->in_use--; + mr->mmkey.cache_ent = NULL; + spin_unlock_irq(&ent->mkeys_queue.lock); + } + return destroy_mkey(dev, mr); +} + int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct mlx5_ib_mr *mr = to_mmr(ibmr); @@ -1880,16 +1899,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) } /* Stop DMA */ - if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) - if (mlx5r_umr_revoke_mr(mr) || - cache_ent_find_and_store(dev, mr)) - mr->mmkey.cache_ent = NULL; - - if (!mr->mmkey.cache_ent) { - rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); - if (rc) - return rc; - } + rc = mlx5_revoke_mr(mr); + if (rc) + return rc; if (mr->umem) { bool is_odp = is_odp_mr(mr); |