[xenial:azure][PATCH] IB/mlx5: Fix MR registration flow to use UMR properly

classic Classic list List threaded Threaded
7 messages Options
Reply | Threaded
Open this post in threaded view
|

[xenial:azure][PATCH] IB/mlx5: Fix MR registration flow to use UMR properly

Marcelo Henrique Cerri
From: Guy Levi <[hidden email]>

BugLink: https://bugs.launchpad.net/bugs/1840189

Driver shouldn't allow to use UMR to register a MR when
umr_modify_atomic_disabled is set. Otherwise it will always end up with a
failure in the post send flow which sets the UMR WQE to modify atomic access
right.

Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
Signed-off-by: Guy Levi <[hidden email]>
Reviewed-by: Moni Shoua <[hidden email]>
Signed-off-by: Leon Romanovsky <[hidden email]>
Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@...
Signed-off-by: Doug Ledford <[hidden email]>
(cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
 1 file changed, 9 insertions(+), 18 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4bda44e5b602..e0a2262691fe 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
  return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
- return order <= mr_cache_max_order(dev) &&
- umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
  int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -1214,7 +1204,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
  struct mlx5_ib_dev *dev = to_mdev(pd->device);
  struct mlx5_ib_mr *mr = NULL;
- bool populate_mtts = false;
+ bool use_umr;
  struct ib_umem *umem;
  int page_shift;
  int npages;
@@ -1247,29 +1237,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  if (err < 0)
  return ERR_PTR(err);
 
- if (use_umr(dev, order)) {
+ use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+   !MLX5_CAP_GEN(dev->mdev, atomic));
+
+ if (order <= mr_cache_max_order(dev) && use_umr) {
  mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
  page_shift, order, access_flags);
  if (PTR_ERR(mr) == -EAGAIN) {
  mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
  mr = NULL;
  }
- populate_mtts = false;
  } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
  if (access_flags & IB_ACCESS_ON_DEMAND) {
  err = -EINVAL;
  pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
  goto error;
  }
- populate_mtts = true;
+ use_umr = false;
  }
 
  if (!mr) {
- if (!umr_can_modify_entity_size(dev))
- populate_mtts = true;
  mutex_lock(&dev->slow_path_mutex);
  mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags, populate_mtts);
+ page_shift, access_flags, !use_umr);
  mutex_unlock(&dev->slow_path_mutex);
  }
 
@@ -1287,7 +1278,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  update_odp_mr(mr);
 #endif
 
- if (!populate_mtts) {
+ if (use_umr) {
  int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
  if (access_flags & IB_ACCESS_ON_DEMAND)
--
2.20.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[disco:azure][PATCH] IB/mlx5: Fix MR registration flow to use UMR properly

Marcelo Henrique Cerri
From: Guy Levi <[hidden email]>

BugLink: https://bugs.launchpad.net/bugs/1840189

Driver shouldn't allow to use UMR to register a MR when
umr_modify_atomic_disabled is set. Otherwise it will always end up with a
failure in the post send flow which sets the UMR WQE to modify atomic access
right.

Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
Signed-off-by: Guy Levi <[hidden email]>
Reviewed-by: Moni Shoua <[hidden email]>
Signed-off-by: Leon Romanovsky <[hidden email]>
Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@...
Signed-off-by: Doug Ledford <[hidden email]>
(cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
 1 file changed, 9 insertions(+), 18 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index c2484cc9bc2f..c9ba5c9a5531 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
  return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
- return order <= mr_cache_max_order(dev) &&
- umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
  int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -1321,7 +1311,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
  struct mlx5_ib_dev *dev = to_mdev(pd->device);
  struct mlx5_ib_mr *mr = NULL;
- bool populate_mtts = false;
+ bool use_umr;
  struct ib_umem *umem;
  int page_shift;
  int npages;
@@ -1354,29 +1344,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  if (err < 0)
  return ERR_PTR(err);
 
- if (use_umr(dev, order)) {
+ use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+   !MLX5_CAP_GEN(dev->mdev, atomic));
+
+ if (order <= mr_cache_max_order(dev) && use_umr) {
  mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
  page_shift, order, access_flags);
  if (PTR_ERR(mr) == -EAGAIN) {
  mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
  mr = NULL;
  }
- populate_mtts = false;
  } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
  if (access_flags & IB_ACCESS_ON_DEMAND) {
  err = -EINVAL;
  pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
  goto error;
  }
- populate_mtts = true;
+ use_umr = false;
  }
 
  if (!mr) {
- if (!umr_can_modify_entity_size(dev))
- populate_mtts = true;
  mutex_lock(&dev->slow_path_mutex);
  mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags, populate_mtts);
+ page_shift, access_flags, !use_umr);
  mutex_unlock(&dev->slow_path_mutex);
  }
 
@@ -1394,7 +1385,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  update_odp_mr(mr);
 #endif
 
- if (!populate_mtts) {
+ if (use_umr) {
  int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
  if (access_flags & IB_ACCESS_ON_DEMAND)
--
2.20.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

ACK: [xenial:azure][PATCH] IB/mlx5: Fix MR registration flow to use UMR properly

Connor Kuehl
In reply to this post by Marcelo Henrique Cerri
On 8/22/19 8:25 AM, Marcelo Henrique Cerri wrote:

> From: Guy Levi <[hidden email]>
>
> BugLink: https://bugs.launchpad.net/bugs/1840189
>
> Driver shouldn't allow to use UMR to register a MR when
> umr_modify_atomic_disabled is set. Otherwise it will always end up with a
> failure in the post send flow which sets the UMR WQE to modify atomic access
> right.
>
> Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
> Signed-off-by: Guy Levi <[hidden email]>
> Reviewed-by: Moni Shoua <[hidden email]>
> Signed-off-by: Leon Romanovsky <[hidden email]>
> Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@...
> Signed-off-by: Doug Ledford <[hidden email]>
> (cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
> Signed-off-by: Marcelo Henrique Cerri <[hidden email]>

For Xenial & Disco: clean cherry picks, with rest of the patch set
already committed.

Acked-by: Connor Kuehl <[hidden email]>

> ---
>  drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
>  1 file changed, 9 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
> index 4bda44e5b602..e0a2262691fe 100644
> --- a/drivers/infiniband/hw/mlx5/mr.c
> +++ b/drivers/infiniband/hw/mlx5/mr.c
> @@ -51,22 +51,12 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static int mr_cache_max_order(struct mlx5_ib_dev *dev);
>  static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
> -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
> -{
> - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
> -}
>  
>  static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
>  {
>   return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
>  }
>  
> -static bool use_umr(struct mlx5_ib_dev *dev, int order)
> -{
> - return order <= mr_cache_max_order(dev) &&
> - umr_can_modify_entity_size(dev);
> -}
> -
>  static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
>  {
>   int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
> @@ -1214,7 +1204,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>  {
>   struct mlx5_ib_dev *dev = to_mdev(pd->device);
>   struct mlx5_ib_mr *mr = NULL;
> - bool populate_mtts = false;
> + bool use_umr;
>   struct ib_umem *umem;
>   int page_shift;
>   int npages;
> @@ -1247,29 +1237,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>   if (err < 0)
>   return ERR_PTR(err);
>  
> - if (use_umr(dev, order)) {
> + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
> +  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
> +   !MLX5_CAP_GEN(dev->mdev, atomic));
> +
> + if (order <= mr_cache_max_order(dev) && use_umr) {
>   mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
>   page_shift, order, access_flags);
>   if (PTR_ERR(mr) == -EAGAIN) {
>   mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
>   mr = NULL;
>   }
> - populate_mtts = false;
>   } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
>   if (access_flags & IB_ACCESS_ON_DEMAND) {
>   err = -EINVAL;
>   pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
>   goto error;
>   }
> - populate_mtts = true;
> + use_umr = false;
>   }
>  
>   if (!mr) {
> - if (!umr_can_modify_entity_size(dev))
> - populate_mtts = true;
>   mutex_lock(&dev->slow_path_mutex);
>   mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
> - page_shift, access_flags, populate_mtts);
> + page_shift, access_flags, !use_umr);
>   mutex_unlock(&dev->slow_path_mutex);
>   }
>  
> @@ -1287,7 +1278,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>   update_odp_mr(mr);
>  #endif
>  
> - if (!populate_mtts) {
> + if (use_umr) {
>   int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
>  
>   if (access_flags & IB_ACCESS_ON_DEMAND)
>


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

ACK/Cmnt: [xenial:azure][PATCH] IB/mlx5: Fix MR registration flow to use UMR properly

Stefan Bader-2
In reply to this post by Marcelo Henrique Cerri
On 22.08.19 17:25, Marcelo Henrique Cerri wrote:

> From: Guy Levi <[hidden email]>
>
> BugLink: https://bugs.launchpad.net/bugs/1840189
>
> Driver shouldn't allow to use UMR to register a MR when
> umr_modify_atomic_disabled is set. Otherwise it will always end up with a
> failure in the post send flow which sets the UMR WQE to modify atomic access
> right.
>
> Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
> Signed-off-by: Guy Levi <[hidden email]>
> Reviewed-by: Moni Shoua <[hidden email]>
> Signed-off-by: Leon Romanovsky <[hidden email]>
> Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@...
> Signed-off-by: Doug Ledford <[hidden email]>
> (cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
> Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
> ---
>  drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
>  1 file changed, 9 insertions(+), 18 deletions(-)
>
Submitting a xenial and disco patch without a cover-email is really asking for
trouble (or being ignored). For now anyway:

Acked-by: Stefan Bader <[hidden email]>


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team

signature.asc (849 bytes) Download Attachment
Reply | Threaded
Open this post in threaded view
|

APPLIED: [xenial:azure][PATCH] IB/mlx5: Fix MR registration flow to use UMR properly

Khaled Elmously
In reply to this post by Marcelo Henrique Cerri
On 2019-08-22 12:25:56 , Marcelo Henrique Cerri wrote:

> From: Guy Levi <[hidden email]>
>
> BugLink: https://bugs.launchpad.net/bugs/1840189
>
> Driver shouldn't allow to use UMR to register a MR when
> umr_modify_atomic_disabled is set. Otherwise it will always end up with a
> failure in the post send flow which sets the UMR WQE to modify atomic access
> right.
>
> Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
> Signed-off-by: Guy Levi <[hidden email]>
> Reviewed-by: Moni Shoua <[hidden email]>
> Signed-off-by: Leon Romanovsky <[hidden email]>
> Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@...
> Signed-off-by: Doug Ledford <[hidden email]>
> (cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
> Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
> ---
>  drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
>  1 file changed, 9 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
> index 4bda44e5b602..e0a2262691fe 100644
> --- a/drivers/infiniband/hw/mlx5/mr.c
> +++ b/drivers/infiniband/hw/mlx5/mr.c
> @@ -51,22 +51,12 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static int mr_cache_max_order(struct mlx5_ib_dev *dev);
>  static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
> -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
> -{
> - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
> -}
>  
>  static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
>  {
>   return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
>  }
>  
> -static bool use_umr(struct mlx5_ib_dev *dev, int order)
> -{
> - return order <= mr_cache_max_order(dev) &&
> - umr_can_modify_entity_size(dev);
> -}
> -
>  static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
>  {
>   int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
> @@ -1214,7 +1204,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>  {
>   struct mlx5_ib_dev *dev = to_mdev(pd->device);
>   struct mlx5_ib_mr *mr = NULL;
> - bool populate_mtts = false;
> + bool use_umr;
>   struct ib_umem *umem;
>   int page_shift;
>   int npages;
> @@ -1247,29 +1237,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>   if (err < 0)
>   return ERR_PTR(err);
>  
> - if (use_umr(dev, order)) {
> + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
> +  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
> +   !MLX5_CAP_GEN(dev->mdev, atomic));
> +
> + if (order <= mr_cache_max_order(dev) && use_umr) {
>   mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
>   page_shift, order, access_flags);
>   if (PTR_ERR(mr) == -EAGAIN) {
>   mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
>   mr = NULL;
>   }
> - populate_mtts = false;
>   } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
>   if (access_flags & IB_ACCESS_ON_DEMAND) {
>   err = -EINVAL;
>   pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
>   goto error;
>   }
> - populate_mtts = true;
> + use_umr = false;
>   }
>  
>   if (!mr) {
> - if (!umr_can_modify_entity_size(dev))
> - populate_mtts = true;
>   mutex_lock(&dev->slow_path_mutex);
>   mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
> - page_shift, access_flags, populate_mtts);
> + page_shift, access_flags, !use_umr);
>   mutex_unlock(&dev->slow_path_mutex);
>   }
>  
> @@ -1287,7 +1278,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>   update_odp_mr(mr);
>  #endif
>  
> - if (!populate_mtts) {
> + if (use_umr) {
>   int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
>  
>   if (access_flags & IB_ACCESS_ON_DEMAND)
> --
> 2.20.1
>
>
> --
> kernel-team mailing list
> [hidden email]
> https://lists.ubuntu.com/mailman/listinfo/kernel-team

--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

APPLIED: [xenial:azure][PATCH] IB/mlx5: Fix MR registration flow to use UMR properly

Khaled Elmously
In reply to this post by Marcelo Henrique Cerri
On 2019-08-22 12:25:56 , Marcelo Henrique Cerri wrote:

> From: Guy Levi <[hidden email]>
>
> BugLink: https://bugs.launchpad.net/bugs/1840189
>
> Driver shouldn't allow to use UMR to register a MR when
> umr_modify_atomic_disabled is set. Otherwise it will always end up with a
> failure in the post send flow which sets the UMR WQE to modify atomic access
> right.
>
> Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
> Signed-off-by: Guy Levi <[hidden email]>
> Reviewed-by: Moni Shoua <[hidden email]>
> Signed-off-by: Leon Romanovsky <[hidden email]>
> Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@...
> Signed-off-by: Doug Ledford <[hidden email]>
> (cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
> Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
> ---
>  drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
>  1 file changed, 9 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
> index 4bda44e5b602..e0a2262691fe 100644
> --- a/drivers/infiniband/hw/mlx5/mr.c
> +++ b/drivers/infiniband/hw/mlx5/mr.c
> @@ -51,22 +51,12 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static int mr_cache_max_order(struct mlx5_ib_dev *dev);
>  static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
> -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
> -{
> - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
> -}
>  
>  static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
>  {
>   return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
>  }
>  
> -static bool use_umr(struct mlx5_ib_dev *dev, int order)
> -{
> - return order <= mr_cache_max_order(dev) &&
> - umr_can_modify_entity_size(dev);
> -}
> -
>  static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
>  {
>   int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
> @@ -1214,7 +1204,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>  {
>   struct mlx5_ib_dev *dev = to_mdev(pd->device);
>   struct mlx5_ib_mr *mr = NULL;
> - bool populate_mtts = false;
> + bool use_umr;
>   struct ib_umem *umem;
>   int page_shift;
>   int npages;
> @@ -1247,29 +1237,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>   if (err < 0)
>   return ERR_PTR(err);
>  
> - if (use_umr(dev, order)) {
> + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
> +  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
> +   !MLX5_CAP_GEN(dev->mdev, atomic));
> +
> + if (order <= mr_cache_max_order(dev) && use_umr) {
>   mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
>   page_shift, order, access_flags);
>   if (PTR_ERR(mr) == -EAGAIN) {
>   mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
>   mr = NULL;
>   }
> - populate_mtts = false;
>   } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
>   if (access_flags & IB_ACCESS_ON_DEMAND) {
>   err = -EINVAL;
>   pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
>   goto error;
>   }
> - populate_mtts = true;
> + use_umr = false;
>   }
>  
>   if (!mr) {
> - if (!umr_can_modify_entity_size(dev))
> - populate_mtts = true;
>   mutex_lock(&dev->slow_path_mutex);
>   mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
> - page_shift, access_flags, populate_mtts);
> + page_shift, access_flags, !use_umr);
>   mutex_unlock(&dev->slow_path_mutex);
>   }
>  
> @@ -1287,7 +1278,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>   update_odp_mr(mr);
>  #endif
>  
> - if (!populate_mtts) {
> + if (use_umr) {
>   int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
>  
>   if (access_flags & IB_ACCESS_ON_DEMAND)
> --
> 2.20.1
>
>
> --
> kernel-team mailing list
> [hidden email]
> https://lists.ubuntu.com/mailman/listinfo/kernel-team

--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

APPLIED: [disco:azure][PATCH] IB/mlx5: Fix MR registration flow to use UMR properly

Khaled Elmously
In reply to this post by Marcelo Henrique Cerri
On 2019-08-22 12:25:57 , Marcelo Henrique Cerri wrote:

> From: Guy Levi <[hidden email]>
>
> BugLink: https://bugs.launchpad.net/bugs/1840189
>
> Driver shouldn't allow to use UMR to register a MR when
> umr_modify_atomic_disabled is set. Otherwise it will always end up with a
> failure in the post send flow which sets the UMR WQE to modify atomic access
> right.
>
> Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
> Signed-off-by: Guy Levi <[hidden email]>
> Reviewed-by: Moni Shoua <[hidden email]>
> Signed-off-by: Leon Romanovsky <[hidden email]>
> Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@...
> Signed-off-by: Doug Ledford <[hidden email]>
> (cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
> Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
> ---
>  drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
>  1 file changed, 9 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
> index c2484cc9bc2f..c9ba5c9a5531 100644
> --- a/drivers/infiniband/hw/mlx5/mr.c
> +++ b/drivers/infiniband/hw/mlx5/mr.c
> @@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static int mr_cache_max_order(struct mlx5_ib_dev *dev);
>  static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
> -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
> -{
> - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
> -}
>  
>  static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
>  {
>   return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
>  }
>  
> -static bool use_umr(struct mlx5_ib_dev *dev, int order)
> -{
> - return order <= mr_cache_max_order(dev) &&
> - umr_can_modify_entity_size(dev);
> -}
> -
>  static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
>  {
>   int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
> @@ -1321,7 +1311,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>  {
>   struct mlx5_ib_dev *dev = to_mdev(pd->device);
>   struct mlx5_ib_mr *mr = NULL;
> - bool populate_mtts = false;
> + bool use_umr;
>   struct ib_umem *umem;
>   int page_shift;
>   int npages;
> @@ -1354,29 +1344,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>   if (err < 0)
>   return ERR_PTR(err);
>  
> - if (use_umr(dev, order)) {
> + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
> +  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
> +   !MLX5_CAP_GEN(dev->mdev, atomic));
> +
> + if (order <= mr_cache_max_order(dev) && use_umr) {
>   mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
>   page_shift, order, access_flags);
>   if (PTR_ERR(mr) == -EAGAIN) {
>   mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
>   mr = NULL;
>   }
> - populate_mtts = false;
>   } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
>   if (access_flags & IB_ACCESS_ON_DEMAND) {
>   err = -EINVAL;
>   pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
>   goto error;
>   }
> - populate_mtts = true;
> + use_umr = false;
>   }
>  
>   if (!mr) {
> - if (!umr_can_modify_entity_size(dev))
> - populate_mtts = true;
>   mutex_lock(&dev->slow_path_mutex);
>   mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
> - page_shift, access_flags, populate_mtts);
> + page_shift, access_flags, !use_umr);
>   mutex_unlock(&dev->slow_path_mutex);
>   }
>  
> @@ -1394,7 +1385,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>   update_odp_mr(mr);
>  #endif
>  
> - if (!populate_mtts) {
> + if (use_umr) {
>   int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
>  
>   if (access_flags & IB_ACCESS_ON_DEMAND)
> --
> 2.20.1
>
>
> --
> kernel-team mailing list
> [hidden email]
> https://lists.ubuntu.com/mailman/listinfo/kernel-team

--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team