2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include "mlx5_core.h"
41 MLX5_PAGES_CANT_GIVE
= 0,
49 MLX5_POST_INIT_PAGES
= 3
52 struct mlx5_pages_req
{
53 struct mlx5_core_dev
*dev
;
56 struct work_struct work
;
60 struct rb_node rb_node
;
64 unsigned long bitmask
;
65 struct list_head list
;
69 struct mlx5_query_pages_inbox
{
70 struct mlx5_inbox_hdr hdr
;
74 struct mlx5_query_pages_outbox
{
75 struct mlx5_outbox_hdr hdr
;
81 struct mlx5_manage_pages_inbox
{
82 struct mlx5_inbox_hdr hdr
;
89 struct mlx5_manage_pages_outbox
{
90 struct mlx5_outbox_hdr hdr
;
97 MAX_RECLAIM_TIME_MSECS
= 5000,
101 MLX5_MAX_RECLAIM_TIME_MILI
= 5000,
102 MLX5_NUM_4K_IN_PAGE
= PAGE_SIZE
/ MLX5_ADAPTER_PAGE_SIZE
,
105 static int insert_page(struct mlx5_core_dev
*dev
, u64 addr
, struct page
*page
, u16 func_id
)
107 struct rb_root
*root
= &dev
->priv
.page_root
;
108 struct rb_node
**new = &root
->rb_node
;
109 struct rb_node
*parent
= NULL
;
116 tfp
= rb_entry(parent
, struct fw_page
, rb_node
);
117 if (tfp
->addr
< addr
)
118 new = &parent
->rb_left
;
119 else if (tfp
->addr
> addr
)
120 new = &parent
->rb_right
;
125 nfp
= kzalloc(sizeof(*nfp
), GFP_KERNEL
);
131 nfp
->func_id
= func_id
;
132 nfp
->free_count
= MLX5_NUM_4K_IN_PAGE
;
133 for (i
= 0; i
< MLX5_NUM_4K_IN_PAGE
; i
++)
134 set_bit(i
, &nfp
->bitmask
);
136 rb_link_node(&nfp
->rb_node
, parent
, new);
137 rb_insert_color(&nfp
->rb_node
, root
);
138 list_add(&nfp
->list
, &dev
->priv
.free_list
);
143 static struct fw_page
*find_fw_page(struct mlx5_core_dev
*dev
, u64 addr
)
145 struct rb_root
*root
= &dev
->priv
.page_root
;
146 struct rb_node
*tmp
= root
->rb_node
;
147 struct fw_page
*result
= NULL
;
151 tfp
= rb_entry(tmp
, struct fw_page
, rb_node
);
152 if (tfp
->addr
< addr
) {
154 } else if (tfp
->addr
> addr
) {
165 static int mlx5_cmd_query_pages(struct mlx5_core_dev
*dev
, u16
*func_id
,
166 s32
*npages
, int boot
)
168 struct mlx5_query_pages_inbox in
;
169 struct mlx5_query_pages_outbox out
;
172 memset(&in
, 0, sizeof(in
));
173 memset(&out
, 0, sizeof(out
));
174 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES
);
175 in
.hdr
.opmod
= boot
? cpu_to_be16(MLX5_BOOT_PAGES
) : cpu_to_be16(MLX5_INIT_PAGES
);
177 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
182 return mlx5_cmd_status_to_err(&out
.hdr
);
184 *npages
= be32_to_cpu(out
.num_pages
);
185 *func_id
= be16_to_cpu(out
.func_id
);
190 static int alloc_4k(struct mlx5_core_dev
*dev
, u64
*addr
)
195 if (list_empty(&dev
->priv
.free_list
))
198 fp
= list_entry(dev
->priv
.free_list
.next
, struct fw_page
, list
);
199 n
= find_first_bit(&fp
->bitmask
, 8 * sizeof(fp
->bitmask
));
200 if (n
>= MLX5_NUM_4K_IN_PAGE
) {
201 mlx5_core_warn(dev
, "alloc 4k bug\n");
204 clear_bit(n
, &fp
->bitmask
);
209 *addr
= fp
->addr
+ n
* MLX5_ADAPTER_PAGE_SIZE
;
214 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
216 static void free_4k(struct mlx5_core_dev
*dev
, u64 addr
)
221 fwp
= find_fw_page(dev
, addr
& MLX5_U64_4K_PAGE_MASK
);
223 mlx5_core_warn(dev
, "page not found\n");
227 n
= (addr
& ~MLX5_U64_4K_PAGE_MASK
) >> MLX5_ADAPTER_PAGE_SHIFT
;
229 set_bit(n
, &fwp
->bitmask
);
230 if (fwp
->free_count
== MLX5_NUM_4K_IN_PAGE
) {
231 rb_erase(&fwp
->rb_node
, &dev
->priv
.page_root
);
232 if (fwp
->free_count
!= 1)
233 list_del(&fwp
->list
);
234 dma_unmap_page(&dev
->pdev
->dev
, addr
& MLX5_U64_4K_PAGE_MASK
,
235 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
236 __free_page(fwp
->page
);
238 } else if (fwp
->free_count
== 1) {
239 list_add(&fwp
->list
, &dev
->priv
.free_list
);
243 static int alloc_system_page(struct mlx5_core_dev
*dev
, u16 func_id
)
248 int nid
= dev_to_node(&dev
->pdev
->dev
);
250 page
= alloc_pages_node(nid
, GFP_HIGHUSER
, 0);
252 mlx5_core_warn(dev
, "failed to allocate page\n");
255 addr
= dma_map_page(&dev
->pdev
->dev
, page
, 0,
256 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
257 if (dma_mapping_error(&dev
->pdev
->dev
, addr
)) {
258 mlx5_core_warn(dev
, "failed dma mapping page\n");
262 err
= insert_page(dev
, addr
, page
, func_id
);
264 mlx5_core_err(dev
, "failed to track allocated page\n");
271 dma_unmap_page(&dev
->pdev
->dev
, addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
279 static void page_notify_fail(struct mlx5_core_dev
*dev
, u16 func_id
)
281 struct mlx5_manage_pages_inbox
*in
;
282 struct mlx5_manage_pages_outbox out
;
285 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
289 memset(&out
, 0, sizeof(out
));
290 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
291 in
->hdr
.opmod
= cpu_to_be16(MLX5_PAGES_CANT_GIVE
);
292 in
->func_id
= cpu_to_be16(func_id
);
293 err
= mlx5_cmd_exec(dev
, in
, sizeof(*in
), &out
, sizeof(out
));
295 err
= mlx5_cmd_status_to_err(&out
.hdr
);
298 mlx5_core_warn(dev
, "page notify failed\n");
303 static int give_pages(struct mlx5_core_dev
*dev
, u16 func_id
, int npages
,
306 struct mlx5_manage_pages_inbox
*in
;
307 struct mlx5_manage_pages_outbox out
;
313 inlen
= sizeof(*in
) + npages
* sizeof(in
->pas
[0]);
314 in
= mlx5_vzalloc(inlen
);
317 mlx5_core_warn(dev
, "vzalloc failed %d\n", inlen
);
320 memset(&out
, 0, sizeof(out
));
322 for (i
= 0; i
< npages
; i
++) {
324 err
= alloc_4k(dev
, &addr
);
327 err
= alloc_system_page(dev
, func_id
);
333 in
->pas
[i
] = cpu_to_be64(addr
);
336 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
337 in
->hdr
.opmod
= cpu_to_be16(MLX5_PAGES_GIVE
);
338 in
->func_id
= cpu_to_be16(func_id
);
339 in
->num_entries
= cpu_to_be32(npages
);
340 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
342 mlx5_core_warn(dev
, "func_id 0x%x, npages %d, err %d\n",
343 func_id
, npages
, err
);
346 dev
->priv
.fw_pages
+= npages
;
348 err
= mlx5_cmd_status_to_err(&out
.hdr
);
350 mlx5_core_warn(dev
, "func_id 0x%x, npages %d, status %d\n",
351 func_id
, npages
, out
.hdr
.status
);
355 mlx5_core_dbg(dev
, "err %d\n", err
);
361 for (i
--; i
>= 0; i
--)
362 free_4k(dev
, be64_to_cpu(in
->pas
[i
]));
366 page_notify_fail(dev
, func_id
);
370 static int reclaim_pages(struct mlx5_core_dev
*dev
, u32 func_id
, int npages
,
373 struct mlx5_manage_pages_inbox in
;
374 struct mlx5_manage_pages_outbox
*out
;
384 memset(&in
, 0, sizeof(in
));
385 outlen
= sizeof(*out
) + npages
* sizeof(out
->pas
[0]);
386 out
= mlx5_vzalloc(outlen
);
390 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
391 in
.hdr
.opmod
= cpu_to_be16(MLX5_PAGES_TAKE
);
392 in
.func_id
= cpu_to_be16(func_id
);
393 in
.num_entries
= cpu_to_be32(npages
);
394 mlx5_core_dbg(dev
, "npages %d, outlen %d\n", npages
, outlen
);
395 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), out
, outlen
);
397 mlx5_core_err(dev
, "failed reclaiming pages\n");
400 dev
->priv
.fw_pages
-= npages
;
402 if (out
->hdr
.status
) {
403 err
= mlx5_cmd_status_to_err(&out
->hdr
);
407 num_claimed
= be32_to_cpu(out
->num_entries
);
409 *nclaimed
= num_claimed
;
411 for (i
= 0; i
< num_claimed
; i
++) {
412 addr
= be64_to_cpu(out
->pas
[i
]);
421 static void pages_work_handler(struct work_struct
*work
)
423 struct mlx5_pages_req
*req
= container_of(work
, struct mlx5_pages_req
, work
);
424 struct mlx5_core_dev
*dev
= req
->dev
;
428 err
= reclaim_pages(dev
, req
->func_id
, -1 * req
->npages
, NULL
);
429 else if (req
->npages
> 0)
430 err
= give_pages(dev
, req
->func_id
, req
->npages
, 1);
433 mlx5_core_warn(dev
, "%s fail %d\n",
434 req
->npages
< 0 ? "reclaim" : "give", err
);
439 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
442 struct mlx5_pages_req
*req
;
444 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
446 mlx5_core_warn(dev
, "failed to allocate pages request\n");
451 req
->func_id
= func_id
;
452 req
->npages
= npages
;
453 INIT_WORK(&req
->work
, pages_work_handler
);
454 queue_work(dev
->priv
.pg_wq
, &req
->work
);
457 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
)
459 u16
uninitialized_var(func_id
);
460 s32
uninitialized_var(npages
);
463 err
= mlx5_cmd_query_pages(dev
, &func_id
, &npages
, boot
);
467 mlx5_core_dbg(dev
, "requested %d %s pages for func_id 0x%x\n",
468 npages
, boot
? "boot" : "init", func_id
);
470 return give_pages(dev
, func_id
, npages
, 0);
474 MLX5_BLKS_FOR_RECLAIM_PAGES
= 12
477 static int optimal_reclaimed_pages(void)
479 struct mlx5_cmd_prot_block
*block
;
480 struct mlx5_cmd_layout
*lay
;
483 ret
= (sizeof(lay
->out
) + MLX5_BLKS_FOR_RECLAIM_PAGES
* sizeof(block
->data
) -
484 sizeof(struct mlx5_manage_pages_outbox
)) /
485 FIELD_SIZEOF(struct mlx5_manage_pages_outbox
, pas
[0]);
490 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
)
492 unsigned long end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS
);
499 p
= rb_first(&dev
->priv
.page_root
);
501 fwp
= rb_entry(p
, struct fw_page
, rb_node
);
502 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
503 free_4k(dev
, fwp
->addr
);
506 err
= reclaim_pages(dev
, fwp
->func_id
,
507 optimal_reclaimed_pages(),
511 mlx5_core_warn(dev
, "failed reclaiming pages (%d)\n",
516 end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS
);
518 if (time_after(jiffies
, end
)) {
519 mlx5_core_warn(dev
, "FW did not return all pages. giving up...\n");
527 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
)
529 dev
->priv
.page_root
= RB_ROOT
;
530 INIT_LIST_HEAD(&dev
->priv
.free_list
);
533 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
)
538 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
)
540 dev
->priv
.pg_wq
= create_singlethread_workqueue("mlx5_page_allocator");
541 if (!dev
->priv
.pg_wq
)
547 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
)
549 destroy_workqueue(dev
->priv
.pg_wq
);