1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 #include <linux/types.h>
6 #include <linux/static_key.h>
8 #include <net/hotdata.h>
12 extern struct static_key_false rps_needed
;
13 extern struct static_key_false rfs_needed
;
16 * This structure holds an RPS map which can be of variable length. The
17 * map is an array of CPUs.
24 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
27 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
28 * tail pointer for that CPU's input queue at the time of last enqueue, and
29 * a hardware filter index.
34 unsigned int last_qtail
;
36 #define RPS_NO_FILTER 0xffff
39 * The rps_dev_flow_table structure contains a table of flow mappings.
41 struct rps_dev_flow_table
{
44 struct rps_dev_flow flows
[];
46 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
47 ((_num) * sizeof(struct rps_dev_flow)))
50 * The rps_sock_flow_table contains mappings of flows to the last CPU
51 * on which they were processed by the application (set in recvmsg).
52 * Each entry is a 32bit value. Upper part is the high-order bits
53 * of flow hash, lower part is CPU number.
54 * rps_cpu_mask is used to partition the space, depending on number of
55 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
56 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
57 * meaning we use 32-6=26 bits for the hash.
59 struct rps_sock_flow_table
{
62 u32 ents
[] ____cacheline_aligned_in_smp
;
64 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
66 #define RPS_NO_CPU 0xffff
68 static inline void rps_record_sock_flow(struct rps_sock_flow_table
*table
,
71 unsigned int index
= hash
& table
->mask
;
72 u32 val
= hash
& ~net_hotdata
.rps_cpu_mask
;
74 /* We only give a hint, preemption can change CPU under us */
75 val
|= raw_smp_processor_id();
77 /* The following WRITE_ONCE() is paired with the READ_ONCE()
78 * here, and another one in get_rps_cpu().
80 if (READ_ONCE(table
->ents
[index
]) != val
)
81 WRITE_ONCE(table
->ents
[index
], val
);
84 #endif /* CONFIG_RPS */
86 static inline void sock_rps_record_flow_hash(__u32 hash
)
89 struct rps_sock_flow_table
*sock_flow_table
;
94 sock_flow_table
= rcu_dereference(net_hotdata
.rps_sock_flow_table
);
96 rps_record_sock_flow(sock_flow_table
, hash
);
101 static inline void sock_rps_record_flow(const struct sock
*sk
)
104 if (static_branch_unlikely(&rfs_needed
)) {
105 /* Reading sk->sk_rxhash might incur an expensive cache line
108 * TCP_ESTABLISHED does cover almost all states where RFS
109 * might be useful, and is cheaper [1] than testing :
110 * IPv4: inet_sk(sk)->inet_daddr
111 * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
112 * OR an additional socket flag
113 * [1] : sk_state and sk_prot are in the same cache line.
115 if (sk
->sk_state
== TCP_ESTABLISHED
) {
116 /* This READ_ONCE() is paired with the WRITE_ONCE()
117 * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
119 sock_rps_record_flow_hash(READ_ONCE(sk
->sk_rxhash
));
125 static inline u32
rps_input_queue_tail_incr(struct softnet_data
*sd
)
128 return ++sd
->input_queue_tail
;
134 static inline void rps_input_queue_tail_save(u32
*dest
, u32 tail
)
137 WRITE_ONCE(*dest
, tail
);
141 static inline void rps_input_queue_head_add(struct softnet_data
*sd
, int val
)
144 WRITE_ONCE(sd
->input_queue_head
, sd
->input_queue_head
+ val
);
148 static inline void rps_input_queue_head_incr(struct softnet_data
*sd
)
150 rps_input_queue_head_add(sd
, 1);
153 #endif /* _NET_RPS_H */