perf scripts python: call-graph-from-sql.py: Provide better default column sizes
[linux/fpc-iii.git] / net / packet / internal.h
blob3bb7c5fb3bff2fd5d91c3d973d006d0cdde29a0b
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PACKET_INTERNAL_H__
3 #define __PACKET_INTERNAL_H__
5 #include <linux/refcount.h>
7 struct packet_mclist {
8 struct packet_mclist *next;
9 int ifindex;
10 int count;
11 unsigned short type;
12 unsigned short alen;
13 unsigned char addr[MAX_ADDR_LEN];
16 /* kbdq - kernel block descriptor queue */
17 struct tpacket_kbdq_core {
18 struct pgv *pkbdq;
19 unsigned int feature_req_word;
20 unsigned int hdrlen;
21 unsigned char reset_pending_on_curr_blk;
22 unsigned char delete_blk_timer;
23 unsigned short kactive_blk_num;
24 unsigned short blk_sizeof_priv;
26 /* last_kactive_blk_num:
27 * trick to see if user-space has caught up
28 * in order to avoid refreshing timer when every single pkt arrives.
30 unsigned short last_kactive_blk_num;
32 char *pkblk_start;
33 char *pkblk_end;
34 int kblk_size;
35 unsigned int max_frame_len;
36 unsigned int knum_blocks;
37 uint64_t knxt_seq_num;
38 char *prev;
39 char *nxt_offset;
40 struct sk_buff *skb;
42 atomic_t blk_fill_in_prog;
44 /* Default is set to 8ms */
45 #define DEFAULT_PRB_RETIRE_TOV (8)
47 unsigned short retire_blk_tov;
48 unsigned short version;
49 unsigned long tov_in_jiffies;
51 /* timer to retire an outstanding block */
52 struct timer_list retire_blk_timer;
55 struct pgv {
56 char *buffer;
59 struct packet_ring_buffer {
60 struct pgv *pg_vec;
62 unsigned int head;
63 unsigned int frames_per_block;
64 unsigned int frame_size;
65 unsigned int frame_max;
67 unsigned int pg_vec_order;
68 unsigned int pg_vec_pages;
69 unsigned int pg_vec_len;
71 unsigned int __percpu *pending_refcnt;
73 struct tpacket_kbdq_core prb_bdqc;
76 extern struct mutex fanout_mutex;
77 #define PACKET_FANOUT_MAX 256
79 struct packet_fanout {
80 possible_net_t net;
81 unsigned int num_members;
82 u16 id;
83 u8 type;
84 u8 flags;
85 union {
86 atomic_t rr_cur;
87 struct bpf_prog __rcu *bpf_prog;
89 struct list_head list;
90 struct sock *arr[PACKET_FANOUT_MAX];
91 spinlock_t lock;
92 refcount_t sk_ref;
93 struct packet_type prot_hook ____cacheline_aligned_in_smp;
96 struct packet_rollover {
97 int sock;
98 atomic_long_t num;
99 atomic_long_t num_huge;
100 atomic_long_t num_failed;
101 #define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
102 u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
103 } ____cacheline_aligned_in_smp;
105 struct packet_sock {
106 /* struct sock has to be the first member of packet_sock */
107 struct sock sk;
108 struct packet_fanout *fanout;
109 union tpacket_stats_u stats;
110 struct packet_ring_buffer rx_ring;
111 struct packet_ring_buffer tx_ring;
112 int copy_thresh;
113 spinlock_t bind_lock;
114 struct mutex pg_vec_lock;
115 unsigned int running; /* bind_lock must be held */
116 unsigned int auxdata:1, /* writer must hold sock lock */
117 origdev:1,
118 has_vnet_hdr:1,
119 tp_loss:1,
120 tp_tx_has_off:1;
121 int pressure;
122 int ifindex; /* bound device */
123 __be16 num;
124 struct packet_rollover *rollover;
125 struct packet_mclist *mclist;
126 atomic_t mapped;
127 enum tpacket_versions tp_version;
128 unsigned int tp_hdrlen;
129 unsigned int tp_reserve;
130 unsigned int tp_tstamp;
131 struct net_device __rcu *cached_dev;
132 int (*xmit)(struct sk_buff *skb);
133 struct packet_type prot_hook ____cacheline_aligned_in_smp;
136 static struct packet_sock *pkt_sk(struct sock *sk)
138 return (struct packet_sock *)sk;
141 #endif