1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
10 #define MLX5_MAX_UMR_SHIFT 16
11 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
13 #define MLX5_IB_UMR_OCTOWORD 16
14 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
16 int mlx5r_umr_resource_init(struct mlx5_ib_dev
*dev
);
17 void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev
*dev
);
19 int mlx5r_umr_init(struct mlx5_ib_dev
*dev
);
20 void mlx5r_umr_cleanup(struct mlx5_ib_dev
*dev
);
22 static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev
*dev
,
26 * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
27 * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
28 * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
29 * can never be enabled without this capability. Simplify this weird
30 * quirky hardware by just saying it can't use PAS lists with UMR at
33 if (MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
))
37 * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
40 if (!MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
) &&
41 length
>= MLX5_MAX_UMR_PAGES
* PAGE_SIZE
)
47 * true if an existing MR can be reconfigured to new access_flags using UMR.
48 * Older HW cannot use UMR to update certain elements of the MKC. See
49 * get_umr_update_access_mask() and umr_check_mkey_mask()
51 static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev
*dev
,
52 unsigned int current_access_flags
,
53 unsigned int target_access_flags
)
55 unsigned int diffs
= current_access_flags
^ target_access_flags
;
57 if ((diffs
& IB_ACCESS_REMOTE_ATOMIC
) &&
58 MLX5_CAP_GEN(dev
->mdev
, atomic
) &&
59 MLX5_CAP_GEN(dev
->mdev
, umr_modify_atomic_disabled
))
62 if ((diffs
& IB_ACCESS_RELAXED_ORDERING
) &&
63 MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_write
) &&
64 !MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_write_umr
))
67 if ((diffs
& IB_ACCESS_RELAXED_ORDERING
) &&
68 (MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_read
) ||
69 MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_read_pci_enabled
)) &&
70 !MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_read_umr
))
76 static inline u64
mlx5r_umr_get_xlt_octo(u64 bytes
)
78 return ALIGN(bytes
, MLX5_IB_UMR_XLT_ALIGNMENT
) /
82 struct mlx5r_umr_context
{
84 enum ib_wc_status status
;
85 struct completion done
;
88 struct mlx5r_umr_wqe
{
89 struct mlx5_wqe_umr_ctrl_seg ctrl_seg
;
90 struct mlx5_mkey_seg mkey_seg
;
91 struct mlx5_wqe_data_seg data_seg
;
94 int mlx5r_umr_revoke_mr(struct mlx5_ib_mr
*mr
);
95 int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr
*mr
, struct ib_pd
*pd
,
97 int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr
*mr
, unsigned int flags
);
98 int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr
*mr
, unsigned int flags
);
99 int mlx5r_umr_update_xlt(struct mlx5_ib_mr
*mr
, u64 idx
, int npages
,
100 int page_shift
, int flags
);
102 #endif /* _MLX5_IB_UMR_H */