1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
14 #include <net/strparser.h>
16 #define MAX_MSG_FRAGS MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
32 DECLARE_BITMAP(copy
, MAX_MSG_FRAGS
+ 2);
33 /* The extra two elements:
34 * 1) used for chaining the front and sections when the list becomes
35 * partitioned (e.g. end < start). The crypto APIs require the
37 * 2) to chain tailer SG entries after the message.
39 struct scatterlist data
[MAX_MSG_FRAGS
+ 2];
42 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
51 struct sock
*sk_redir
;
53 struct list_head list
;
56 struct sk_psock_progs
{
57 struct bpf_prog
*msg_parser
;
58 struct bpf_prog
*stream_parser
;
59 struct bpf_prog
*stream_verdict
;
60 struct bpf_prog
*skb_verdict
;
61 struct bpf_link
*msg_parser_link
;
62 struct bpf_link
*stream_parser_link
;
63 struct bpf_link
*stream_verdict_link
;
64 struct bpf_link
*skb_verdict_link
;
67 enum sk_psock_state_bits
{
69 SK_PSOCK_RX_STRP_ENABLED
,
72 struct sk_psock_link
{
73 struct list_head list
;
78 struct sk_psock_work_state
{
85 struct sock
*sk_redir
;
89 bool redir_ingress
; /* undefined if sk_redir is null */
91 struct sk_psock_progs progs
;
92 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
93 struct strparser strp
;
95 struct sk_buff_head ingress_skb
;
96 struct list_head ingress_msg
;
97 spinlock_t ingress_lock
;
99 struct list_head link
;
100 spinlock_t link_lock
;
102 void (*saved_unhash
)(struct sock
*sk
);
103 void (*saved_destroy
)(struct sock
*sk
);
104 void (*saved_close
)(struct sock
*sk
, long timeout
);
105 void (*saved_write_space
)(struct sock
*sk
);
106 void (*saved_data_ready
)(struct sock
*sk
);
107 /* psock_update_sk_prot may be called with restore=false many times
108 * so the handler must be safe for this case. It will be called
109 * exactly once with restore=true when the psock is being destroyed
110 * and psock refcnt is zero, but before an RCU grace period.
112 int (*psock_update_sk_prot
)(struct sock
*sk
, struct sk_psock
*psock
,
114 struct proto
*sk_proto
;
115 struct mutex work_mutex
;
116 struct sk_psock_work_state work_state
;
117 struct delayed_work work
;
118 struct sock
*sk_pair
;
119 struct rcu_work rwork
;
122 int sk_msg_alloc(struct sock
*sk
, struct sk_msg
*msg
, int len
,
123 int elem_first_coalesce
);
124 int sk_msg_clone(struct sock
*sk
, struct sk_msg
*dst
, struct sk_msg
*src
,
126 void sk_msg_trim(struct sock
*sk
, struct sk_msg
*msg
, int len
);
127 int sk_msg_free(struct sock
*sk
, struct sk_msg
*msg
);
128 int sk_msg_free_nocharge(struct sock
*sk
, struct sk_msg
*msg
);
129 void sk_msg_free_partial(struct sock
*sk
, struct sk_msg
*msg
, u32 bytes
);
130 void sk_msg_free_partial_nocharge(struct sock
*sk
, struct sk_msg
*msg
,
133 void sk_msg_return(struct sock
*sk
, struct sk_msg
*msg
, int bytes
);
134 void sk_msg_return_zero(struct sock
*sk
, struct sk_msg
*msg
, int bytes
);
136 int sk_msg_zerocopy_from_iter(struct sock
*sk
, struct iov_iter
*from
,
137 struct sk_msg
*msg
, u32 bytes
);
138 int sk_msg_memcopy_from_iter(struct sock
*sk
, struct iov_iter
*from
,
139 struct sk_msg
*msg
, u32 bytes
);
140 int sk_msg_recvmsg(struct sock
*sk
, struct sk_psock
*psock
, struct msghdr
*msg
,
142 bool sk_msg_is_readable(struct sock
*sk
);
144 static inline void sk_msg_check_to_free(struct sk_msg
*msg
, u32 i
, u32 bytes
)
146 WARN_ON(i
== msg
->sg
.end
&& bytes
);
149 static inline void sk_msg_apply_bytes(struct sk_psock
*psock
, u32 bytes
)
151 if (psock
->apply_bytes
) {
152 if (psock
->apply_bytes
< bytes
)
153 psock
->apply_bytes
= 0;
155 psock
->apply_bytes
-= bytes
;
159 static inline u32
sk_msg_iter_dist(u32 start
, u32 end
)
161 return end
>= start
? end
- start
: end
+ (NR_MSG_FRAG_IDS
- start
);
164 #define sk_msg_iter_var_prev(var) \
167 var = NR_MSG_FRAG_IDS - 1; \
172 #define sk_msg_iter_var_next(var) \
175 if (var == NR_MSG_FRAG_IDS) \
179 #define sk_msg_iter_prev(msg, which) \
180 sk_msg_iter_var_prev(msg->sg.which)
182 #define sk_msg_iter_next(msg, which) \
183 sk_msg_iter_var_next(msg->sg.which)
185 static inline void sk_msg_init(struct sk_msg
*msg
)
187 BUILD_BUG_ON(ARRAY_SIZE(msg
->sg
.data
) - 1 != NR_MSG_FRAG_IDS
);
188 memset(msg
, 0, sizeof(*msg
));
189 sg_init_marker(msg
->sg
.data
, NR_MSG_FRAG_IDS
);
192 static inline void sk_msg_xfer(struct sk_msg
*dst
, struct sk_msg
*src
,
195 dst
->sg
.data
[which
] = src
->sg
.data
[which
];
196 dst
->sg
.data
[which
].length
= size
;
197 dst
->sg
.size
+= size
;
198 src
->sg
.size
-= size
;
199 src
->sg
.data
[which
].length
-= size
;
200 src
->sg
.data
[which
].offset
+= size
;
203 static inline void sk_msg_xfer_full(struct sk_msg
*dst
, struct sk_msg
*src
)
205 memcpy(dst
, src
, sizeof(*src
));
209 static inline bool sk_msg_full(const struct sk_msg
*msg
)
211 return sk_msg_iter_dist(msg
->sg
.start
, msg
->sg
.end
) == MAX_MSG_FRAGS
;
214 static inline u32
sk_msg_elem_used(const struct sk_msg
*msg
)
216 return sk_msg_iter_dist(msg
->sg
.start
, msg
->sg
.end
);
219 static inline struct scatterlist
*sk_msg_elem(struct sk_msg
*msg
, int which
)
221 return &msg
->sg
.data
[which
];
224 static inline struct scatterlist
sk_msg_elem_cpy(struct sk_msg
*msg
, int which
)
226 return msg
->sg
.data
[which
];
229 static inline struct page
*sk_msg_page(struct sk_msg
*msg
, int which
)
231 return sg_page(sk_msg_elem(msg
, which
));
234 static inline bool sk_msg_to_ingress(const struct sk_msg
*msg
)
236 return msg
->flags
& BPF_F_INGRESS
;
239 static inline void sk_msg_compute_data_pointers(struct sk_msg
*msg
)
241 struct scatterlist
*sge
= sk_msg_elem(msg
, msg
->sg
.start
);
243 if (test_bit(msg
->sg
.start
, msg
->sg
.copy
)) {
245 msg
->data_end
= NULL
;
247 msg
->data
= sg_virt(sge
);
248 msg
->data_end
= msg
->data
+ sge
->length
;
252 static inline void sk_msg_page_add(struct sk_msg
*msg
, struct page
*page
,
255 struct scatterlist
*sge
;
258 sge
= sk_msg_elem(msg
, msg
->sg
.end
);
259 sg_set_page(sge
, page
, len
, offset
);
262 __set_bit(msg
->sg
.end
, msg
->sg
.copy
);
264 sk_msg_iter_next(msg
, end
);
267 static inline void sk_msg_sg_copy(struct sk_msg
*msg
, u32 i
, bool copy_state
)
271 __set_bit(i
, msg
->sg
.copy
);
273 __clear_bit(i
, msg
->sg
.copy
);
274 sk_msg_iter_var_next(i
);
275 if (i
== msg
->sg
.end
)
280 static inline void sk_msg_sg_copy_set(struct sk_msg
*msg
, u32 start
)
282 sk_msg_sg_copy(msg
, start
, true);
285 static inline void sk_msg_sg_copy_clear(struct sk_msg
*msg
, u32 start
)
287 sk_msg_sg_copy(msg
, start
, false);
290 static inline struct sk_psock
*sk_psock(const struct sock
*sk
)
292 return __rcu_dereference_sk_user_data_with_flags(sk
,
296 static inline void sk_psock_set_state(struct sk_psock
*psock
,
297 enum sk_psock_state_bits bit
)
299 set_bit(bit
, &psock
->state
);
302 static inline void sk_psock_clear_state(struct sk_psock
*psock
,
303 enum sk_psock_state_bits bit
)
305 clear_bit(bit
, &psock
->state
);
308 static inline bool sk_psock_test_state(const struct sk_psock
*psock
,
309 enum sk_psock_state_bits bit
)
311 return test_bit(bit
, &psock
->state
);
314 static inline void sock_drop(struct sock
*sk
, struct sk_buff
*skb
)
316 sk_drops_add(sk
, skb
);
320 static inline void sk_psock_queue_msg(struct sk_psock
*psock
,
323 spin_lock_bh(&psock
->ingress_lock
);
324 if (sk_psock_test_state(psock
, SK_PSOCK_TX_ENABLED
))
325 list_add_tail(&msg
->list
, &psock
->ingress_msg
);
327 sk_msg_free(psock
->sk
, msg
);
330 spin_unlock_bh(&psock
->ingress_lock
);
333 static inline struct sk_msg
*sk_psock_dequeue_msg(struct sk_psock
*psock
)
337 spin_lock_bh(&psock
->ingress_lock
);
338 msg
= list_first_entry_or_null(&psock
->ingress_msg
, struct sk_msg
, list
);
340 list_del(&msg
->list
);
341 spin_unlock_bh(&psock
->ingress_lock
);
345 static inline struct sk_msg
*sk_psock_peek_msg(struct sk_psock
*psock
)
349 spin_lock_bh(&psock
->ingress_lock
);
350 msg
= list_first_entry_or_null(&psock
->ingress_msg
, struct sk_msg
, list
);
351 spin_unlock_bh(&psock
->ingress_lock
);
355 static inline struct sk_msg
*sk_psock_next_msg(struct sk_psock
*psock
,
360 spin_lock_bh(&psock
->ingress_lock
);
361 if (list_is_last(&msg
->list
, &psock
->ingress_msg
))
364 ret
= list_next_entry(msg
, list
);
365 spin_unlock_bh(&psock
->ingress_lock
);
369 static inline bool sk_psock_queue_empty(const struct sk_psock
*psock
)
371 return psock
? list_empty(&psock
->ingress_msg
) : true;
374 static inline void kfree_sk_msg(struct sk_msg
*msg
)
377 consume_skb(msg
->skb
);
381 static inline void sk_psock_report_error(struct sk_psock
*psock
, int err
)
383 struct sock
*sk
= psock
->sk
;
389 struct sk_psock
*sk_psock_init(struct sock
*sk
, int node
);
390 void sk_psock_stop(struct sk_psock
*psock
);
392 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
393 int sk_psock_init_strp(struct sock
*sk
, struct sk_psock
*psock
);
394 void sk_psock_start_strp(struct sock
*sk
, struct sk_psock
*psock
);
395 void sk_psock_stop_strp(struct sock
*sk
, struct sk_psock
*psock
);
397 static inline int sk_psock_init_strp(struct sock
*sk
, struct sk_psock
*psock
)
402 static inline void sk_psock_start_strp(struct sock
*sk
, struct sk_psock
*psock
)
406 static inline void sk_psock_stop_strp(struct sock
*sk
, struct sk_psock
*psock
)
411 void sk_psock_start_verdict(struct sock
*sk
, struct sk_psock
*psock
);
412 void sk_psock_stop_verdict(struct sock
*sk
, struct sk_psock
*psock
);
414 int sk_psock_msg_verdict(struct sock
*sk
, struct sk_psock
*psock
,
418 * This specialized allocator has to be a macro for its allocations to be
419 * accounted separately (to have a separate alloc_tag). The typecast is
420 * intentional to enforce typesafety.
422 #define sk_psock_init_link() \
423 ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \
424 GFP_ATOMIC | __GFP_NOWARN))
426 static inline void sk_psock_free_link(struct sk_psock_link
*link
)
431 struct sk_psock_link
*sk_psock_link_pop(struct sk_psock
*psock
);
433 static inline void sk_psock_cork_free(struct sk_psock
*psock
)
436 sk_msg_free(psock
->sk
, psock
->cork
);
442 static inline void sk_psock_restore_proto(struct sock
*sk
,
443 struct sk_psock
*psock
)
445 if (psock
->psock_update_sk_prot
)
446 psock
->psock_update_sk_prot(sk
, psock
, true);
449 static inline struct sk_psock
*sk_psock_get(struct sock
*sk
)
451 struct sk_psock
*psock
;
454 psock
= sk_psock(sk
);
455 if (psock
&& !refcount_inc_not_zero(&psock
->refcnt
))
461 void sk_psock_drop(struct sock
*sk
, struct sk_psock
*psock
);
463 static inline void sk_psock_put(struct sock
*sk
, struct sk_psock
*psock
)
465 if (refcount_dec_and_test(&psock
->refcnt
))
466 sk_psock_drop(sk
, psock
);
469 static inline void sk_psock_data_ready(struct sock
*sk
, struct sk_psock
*psock
)
471 read_lock_bh(&sk
->sk_callback_lock
);
472 if (psock
->saved_data_ready
)
473 psock
->saved_data_ready(sk
);
475 sk
->sk_data_ready(sk
);
476 read_unlock_bh(&sk
->sk_callback_lock
);
479 static inline void psock_set_prog(struct bpf_prog
**pprog
,
480 struct bpf_prog
*prog
)
482 prog
= xchg(pprog
, prog
);
487 static inline int psock_replace_prog(struct bpf_prog
**pprog
,
488 struct bpf_prog
*prog
,
489 struct bpf_prog
*old
)
491 if (cmpxchg(pprog
, old
, prog
) != old
)
500 static inline void psock_progs_drop(struct sk_psock_progs
*progs
)
502 psock_set_prog(&progs
->msg_parser
, NULL
);
503 psock_set_prog(&progs
->stream_parser
, NULL
);
504 psock_set_prog(&progs
->stream_verdict
, NULL
);
505 psock_set_prog(&progs
->skb_verdict
, NULL
);
508 int sk_psock_tls_strp_read(struct sk_psock
*psock
, struct sk_buff
*skb
);
510 static inline bool sk_psock_strp_enabled(struct sk_psock
*psock
)
514 return !!psock
->saved_data_ready
;
517 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
519 #define BPF_F_STRPARSER (1UL << 1)
521 /* We only have two bits so far. */
522 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
524 static inline bool skb_bpf_strparser(const struct sk_buff
*skb
)
526 unsigned long sk_redir
= skb
->_sk_redir
;
528 return sk_redir
& BPF_F_STRPARSER
;
531 static inline void skb_bpf_set_strparser(struct sk_buff
*skb
)
533 skb
->_sk_redir
|= BPF_F_STRPARSER
;
536 static inline bool skb_bpf_ingress(const struct sk_buff
*skb
)
538 unsigned long sk_redir
= skb
->_sk_redir
;
540 return sk_redir
& BPF_F_INGRESS
;
543 static inline void skb_bpf_set_ingress(struct sk_buff
*skb
)
545 skb
->_sk_redir
|= BPF_F_INGRESS
;
548 static inline void skb_bpf_set_redir(struct sk_buff
*skb
, struct sock
*sk_redir
,
551 skb
->_sk_redir
= (unsigned long)sk_redir
;
553 skb
->_sk_redir
|= BPF_F_INGRESS
;
556 static inline struct sock
*skb_bpf_redirect_fetch(const struct sk_buff
*skb
)
558 unsigned long sk_redir
= skb
->_sk_redir
;
560 return (struct sock
*)(sk_redir
& BPF_F_PTR_MASK
);
563 static inline void skb_bpf_redirect_clear(struct sk_buff
*skb
)
567 #endif /* CONFIG_NET_SOCK_MSG */
568 #endif /* _LINUX_SKMSG_H */