2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/pci.h>
34 #include "hns_roce_device.h"
36 void hns_roce_init_pd_table(struct hns_roce_dev
*hr_dev
)
38 struct hns_roce_ida
*pd_ida
= &hr_dev
->pd_ida
;
40 ida_init(&pd_ida
->ida
);
41 pd_ida
->max
= hr_dev
->caps
.num_pds
- 1;
42 pd_ida
->min
= hr_dev
->caps
.reserved_pds
;
45 int hns_roce_alloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
)
47 struct ib_device
*ib_dev
= ibpd
->device
;
48 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
49 struct hns_roce_ida
*pd_ida
= &hr_dev
->pd_ida
;
50 struct hns_roce_pd
*pd
= to_hr_pd(ibpd
);
54 id
= ida_alloc_range(&pd_ida
->ida
, pd_ida
->min
, pd_ida
->max
,
57 ibdev_err(ib_dev
, "failed to alloc pd, id = %d.\n", id
);
60 pd
->pdn
= (unsigned long)id
;
63 struct hns_roce_ib_alloc_pd_resp resp
= {.pdn
= pd
->pdn
};
65 ret
= ib_copy_to_udata(udata
, &resp
,
66 min(udata
->outlen
, sizeof(resp
)));
68 ida_free(&pd_ida
->ida
, id
);
69 ibdev_err(ib_dev
, "failed to copy to udata, ret = %d\n", ret
);
76 int hns_roce_dealloc_pd(struct ib_pd
*pd
, struct ib_udata
*udata
)
78 struct hns_roce_dev
*hr_dev
= to_hr_dev(pd
->device
);
80 ida_free(&hr_dev
->pd_ida
.ida
, (int)to_hr_pd(pd
)->pdn
);
85 int hns_roce_uar_alloc(struct hns_roce_dev
*hr_dev
, struct hns_roce_uar
*uar
)
87 struct hns_roce_ida
*uar_ida
= &hr_dev
->uar_ida
;
90 /* Using bitmap to manager UAR index */
91 id
= ida_alloc_range(&uar_ida
->ida
, uar_ida
->min
, uar_ida
->max
,
94 ibdev_err(&hr_dev
->ib_dev
, "failed to alloc uar id(%d).\n", id
);
97 uar
->logic_idx
= (unsigned long)id
;
99 if (uar
->logic_idx
> 0 && hr_dev
->caps
.phy_num_uars
> 1)
100 uar
->index
= (uar
->logic_idx
- 1) %
101 (hr_dev
->caps
.phy_num_uars
- 1) + 1;
105 uar
->pfn
= ((pci_resource_start(hr_dev
->pci_dev
, 2)) >> PAGE_SHIFT
);
106 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_DIRECT_WQE
)
107 hr_dev
->dwqe_page
= pci_resource_start(hr_dev
->pci_dev
, 4);
112 void hns_roce_init_uar_table(struct hns_roce_dev
*hr_dev
)
114 struct hns_roce_ida
*uar_ida
= &hr_dev
->uar_ida
;
116 ida_init(&uar_ida
->ida
);
117 uar_ida
->max
= hr_dev
->caps
.num_uars
- 1;
118 uar_ida
->min
= hr_dev
->caps
.reserved_uars
;
121 static int hns_roce_xrcd_alloc(struct hns_roce_dev
*hr_dev
, u32
*xrcdn
)
123 struct hns_roce_ida
*xrcd_ida
= &hr_dev
->xrcd_ida
;
126 id
= ida_alloc_range(&xrcd_ida
->ida
, xrcd_ida
->min
, xrcd_ida
->max
,
129 ibdev_err(&hr_dev
->ib_dev
, "failed to alloc xrcdn(%d).\n", id
);
137 void hns_roce_init_xrcd_table(struct hns_roce_dev
*hr_dev
)
139 struct hns_roce_ida
*xrcd_ida
= &hr_dev
->xrcd_ida
;
141 ida_init(&xrcd_ida
->ida
);
142 xrcd_ida
->max
= hr_dev
->caps
.num_xrcds
- 1;
143 xrcd_ida
->min
= hr_dev
->caps
.reserved_xrcds
;
146 int hns_roce_alloc_xrcd(struct ib_xrcd
*ib_xrcd
, struct ib_udata
*udata
)
148 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_xrcd
->device
);
149 struct hns_roce_xrcd
*xrcd
= to_hr_xrcd(ib_xrcd
);
152 if (!(hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_XRC
)) {
157 ret
= hns_roce_xrcd_alloc(hr_dev
, &xrcd
->xrcdn
);
161 atomic64_inc(&hr_dev
->dfx_cnt
[HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT
]);
166 int hns_roce_dealloc_xrcd(struct ib_xrcd
*ib_xrcd
, struct ib_udata
*udata
)
168 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_xrcd
->device
);
169 u32 xrcdn
= to_hr_xrcd(ib_xrcd
)->xrcdn
;
171 ida_free(&hr_dev
->xrcd_ida
.ida
, (int)xrcdn
);