1 /* SPDX-License-Identifier: GPL-2.0
5 * Author: Mina Almasry <almasrymina@google.com>
12 #include <net/net_debug.h>
16 DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers
);
18 /* We overload the LSB of the struct page pointer to indicate whether it's
21 #define NET_IOV 0x01UL
24 unsigned long __unused_padding
;
25 unsigned long pp_magic
;
27 struct dmabuf_genpool_chunk_owner
*owner
;
28 unsigned long dma_addr
;
29 atomic_long_t pp_ref_count
;
32 /* These fields in struct page are used by the page_pool and net stack:
35 * unsigned long pp_magic;
36 * struct page_pool *pp;
37 * unsigned long _pp_mapping_pad;
38 * unsigned long dma_addr;
39 * atomic_long_t pp_ref_count;
42 * We mirror the page_pool fields here so the page_pool can access these fields
43 * without worrying whether the underlying fields belong to a page or net_iov.
45 * The non-net stack fields of struct page are private to the mm stack and must
46 * never be mirrored to net_iov.
48 #define NET_IOV_ASSERT_OFFSET(pg, iov) \
49 static_assert(offsetof(struct page, pg) == \
50 offsetof(struct net_iov, iov))
51 NET_IOV_ASSERT_OFFSET(pp_magic
, pp_magic
);
52 NET_IOV_ASSERT_OFFSET(pp
, pp
);
53 NET_IOV_ASSERT_OFFSET(dma_addr
, dma_addr
);
54 NET_IOV_ASSERT_OFFSET(pp_ref_count
, pp_ref_count
);
55 #undef NET_IOV_ASSERT_OFFSET
60 * typedef netmem_ref - a nonexistent type marking a reference to generic
63 * A netmem_ref currently is always a reference to a struct page. This
64 * abstraction is introduced so support for new memory types can be added.
66 * Use the supplied helpers to obtain the underlying memory pointer and fields.
68 typedef unsigned long __bitwise netmem_ref
;
70 static inline bool netmem_is_net_iov(const netmem_ref netmem
)
72 return (__force
unsigned long)netmem
& NET_IOV
;
75 /* This conversion fails (returns NULL) if the netmem_ref is not struct page
78 static inline struct page
*netmem_to_page(netmem_ref netmem
)
80 if (WARN_ON_ONCE(netmem_is_net_iov(netmem
)))
83 return (__force
struct page
*)netmem
;
86 static inline struct net_iov
*netmem_to_net_iov(netmem_ref netmem
)
88 if (netmem_is_net_iov(netmem
))
89 return (struct net_iov
*)((__force
unsigned long)netmem
&
92 DEBUG_NET_WARN_ON_ONCE(true);
96 static inline netmem_ref
net_iov_to_netmem(struct net_iov
*niov
)
98 return (__force netmem_ref
)((unsigned long)niov
| NET_IOV
);
101 static inline netmem_ref
page_to_netmem(struct page
*page
)
103 return (__force netmem_ref
)page
;
106 static inline int netmem_ref_count(netmem_ref netmem
)
108 /* The non-pp refcount of net_iov is always 1. On net_iov, we only
109 * support pp refcounting which uses the pp_ref_count field.
111 if (netmem_is_net_iov(netmem
))
114 return page_ref_count(netmem_to_page(netmem
));
117 static inline unsigned long netmem_pfn_trace(netmem_ref netmem
)
119 if (netmem_is_net_iov(netmem
))
122 return page_to_pfn(netmem_to_page(netmem
));
125 static inline struct net_iov
*__netmem_clear_lsb(netmem_ref netmem
)
127 return (struct net_iov
*)((__force
unsigned long)netmem
& ~NET_IOV
);
130 static inline struct page_pool
*netmem_get_pp(netmem_ref netmem
)
132 return __netmem_clear_lsb(netmem
)->pp
;
135 static inline atomic_long_t
*netmem_get_pp_ref_count_ref(netmem_ref netmem
)
137 return &__netmem_clear_lsb(netmem
)->pp_ref_count
;
140 static inline bool netmem_is_pref_nid(netmem_ref netmem
, int pref_nid
)
142 /* NUMA node preference only makes sense if we're allocating
143 * system memory. Memory providers (which give us net_iovs)
146 if (netmem_is_net_iov(netmem
))
149 return page_to_nid(netmem_to_page(netmem
)) == pref_nid
;
152 static inline netmem_ref
netmem_compound_head(netmem_ref netmem
)
154 /* niov are never compounded */
155 if (netmem_is_net_iov(netmem
))
158 return page_to_netmem(compound_head(netmem_to_page(netmem
)));
161 static inline void *netmem_address(netmem_ref netmem
)
163 if (netmem_is_net_iov(netmem
))
166 return page_address(netmem_to_page(netmem
));
169 static inline unsigned long netmem_get_dma_addr(netmem_ref netmem
)
171 return __netmem_clear_lsb(netmem
)->dma_addr
;
174 #endif /* _NET_NETMEM_H */