drm/rockchip: Don't change hdmi reference clock rate
[drm/drm-misc.git] / drivers / infiniband / hw / mana / mr.c
blob887b09dd86e7843e08ab7242a8fb862066164b39
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
6 #include "mana_ib.h"
8 #define VALID_MR_FLAGS \
9 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
11 static enum gdma_mr_access_flags
12 mana_ib_verbs_to_gdma_access_flags(int access_flags)
14 enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
16 if (access_flags & IB_ACCESS_LOCAL_WRITE)
17 flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
19 if (access_flags & IB_ACCESS_REMOTE_WRITE)
20 flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
22 if (access_flags & IB_ACCESS_REMOTE_READ)
23 flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
25 return flags;
28 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
29 struct gdma_create_mr_params *mr_params)
31 struct gdma_create_mr_response resp = {};
32 struct gdma_create_mr_request req = {};
33 struct gdma_context *gc = mdev_to_gc(dev);
34 int err;
36 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
37 sizeof(resp));
38 req.pd_handle = mr_params->pd_handle;
39 req.mr_type = mr_params->mr_type;
41 switch (mr_params->mr_type) {
42 case GDMA_MR_TYPE_GVA:
43 req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
44 req.gva.virtual_address = mr_params->gva.virtual_address;
45 req.gva.access_flags = mr_params->gva.access_flags;
46 break;
48 default:
49 ibdev_dbg(&dev->ib_dev,
50 "invalid param (GDMA_MR_TYPE) passed, type %d\n",
51 req.mr_type);
52 return -EINVAL;
55 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
57 if (err || resp.hdr.status) {
58 ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
59 resp.hdr.status);
60 if (!err)
61 err = -EPROTO;
63 return err;
66 mr->ibmr.lkey = resp.lkey;
67 mr->ibmr.rkey = resp.rkey;
68 mr->mr_handle = resp.mr_handle;
70 return 0;
73 static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
75 struct gdma_destroy_mr_response resp = {};
76 struct gdma_destroy_mr_request req = {};
77 struct gdma_context *gc = mdev_to_gc(dev);
78 int err;
80 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
81 sizeof(resp));
83 req.mr_handle = mr_handle;
85 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
86 if (err || resp.hdr.status) {
87 dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
88 resp.hdr.status);
89 if (!err)
90 err = -EPROTO;
91 return err;
94 return 0;
97 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
98 u64 iova, int access_flags,
99 struct ib_udata *udata)
101 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
102 struct gdma_create_mr_params mr_params = {};
103 struct ib_device *ibdev = ibpd->device;
104 struct mana_ib_dev *dev;
105 struct mana_ib_mr *mr;
106 u64 dma_region_handle;
107 int err;
109 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
111 ibdev_dbg(ibdev,
112 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
113 start, iova, length, access_flags);
115 access_flags &= ~IB_ACCESS_OPTIONAL;
116 if (access_flags & ~VALID_MR_FLAGS)
117 return ERR_PTR(-EINVAL);
119 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
120 if (!mr)
121 return ERR_PTR(-ENOMEM);
123 mr->umem = ib_umem_get(ibdev, start, length, access_flags);
124 if (IS_ERR(mr->umem)) {
125 err = PTR_ERR(mr->umem);
126 ibdev_dbg(ibdev,
127 "Failed to get umem for register user-mr, %d\n", err);
128 goto err_free;
131 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
132 if (err) {
133 ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
134 err);
135 goto err_umem;
138 ibdev_dbg(ibdev,
139 "created dma region for user-mr 0x%llx\n",
140 dma_region_handle);
142 mr_params.pd_handle = pd->pd_handle;
143 mr_params.mr_type = GDMA_MR_TYPE_GVA;
144 mr_params.gva.dma_region_handle = dma_region_handle;
145 mr_params.gva.virtual_address = iova;
146 mr_params.gva.access_flags =
147 mana_ib_verbs_to_gdma_access_flags(access_flags);
149 err = mana_ib_gd_create_mr(dev, mr, &mr_params);
150 if (err)
151 goto err_dma_region;
154 * There is no need to keep track of dma_region_handle after MR is
155 * successfully created. The dma_region_handle is tracked in the PF
156 * as part of the lifecycle of this MR.
159 return &mr->ibmr;
161 err_dma_region:
162 mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
164 err_umem:
165 ib_umem_release(mr->umem);
167 err_free:
168 kfree(mr);
169 return ERR_PTR(err);
172 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
174 struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
175 struct ib_device *ibdev = ibmr->device;
176 struct mana_ib_dev *dev;
177 int err;
179 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
181 err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
182 if (err)
183 return err;
185 if (mr->umem)
186 ib_umem_release(mr->umem);
188 kfree(mr);
190 return 0;