1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright(c) 2020 Cornelis Networks, Inc.
4 * Copyright(c) 2016 Intel Corporation.
17 struct mmu_rb_handler
*handler
;
18 struct list_head list
;
22 /* filter and evict must not sleep. Only remove is allowed to sleep. */
24 bool (*filter
)(struct mmu_rb_node
*node
, unsigned long addr
,
26 void (*remove
)(void *ops_arg
, struct mmu_rb_node
*mnode
);
27 int (*evict
)(void *ops_arg
, struct mmu_rb_node
*mnode
,
28 void *evict_arg
, bool *stop
);
31 struct mmu_rb_handler
{
33 * struct mmu_notifier is 56 bytes, and spinlock_t is 4 bytes, so
34 * they fit together in one cache line. mn is relatively rarely
35 * accessed, so co-locating the spinlock with it achieves much of
36 * the cacheline contention reduction of giving the spinlock its own
37 * cacheline without the overhead of doing so.
39 struct mmu_notifier mn
;
40 spinlock_t lock
; /* protect the RB tree */
42 /* Begin on a new cachline boundary here */
43 struct rb_root_cached root ____cacheline_aligned_in_smp
;
45 const struct mmu_rb_ops
*ops
;
46 struct list_head lru_list
;
47 struct work_struct del_work
;
48 struct list_head del_list
;
49 struct workqueue_struct
*wq
;
53 int hfi1_mmu_rb_register(void *ops_arg
,
54 const struct mmu_rb_ops
*ops
,
55 struct workqueue_struct
*wq
,
56 struct mmu_rb_handler
**handler
);
57 void hfi1_mmu_rb_unregister(struct mmu_rb_handler
*handler
);
58 int hfi1_mmu_rb_insert(struct mmu_rb_handler
*handler
,
59 struct mmu_rb_node
*mnode
);
60 void hfi1_mmu_rb_release(struct kref
*refcount
);
62 void hfi1_mmu_rb_evict(struct mmu_rb_handler
*handler
, void *evict_arg
);
63 struct mmu_rb_node
*hfi1_mmu_rb_get_first(struct mmu_rb_handler
*handler
,
67 #endif /* _HFI1_MMU_RB_H */