1 // SPDX-License-Identifier: GPL-2.0
3 * This file is based on code from OCTEON SDK by Cavium Networks.
5 * Copyright (c) 2003-2010 Cavium Networks
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/cache.h>
11 #include <linux/cpumask.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/prefetch.h>
17 #include <linux/ratelimit.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
22 #include <linux/xfrm.h>
24 #endif /* CONFIG_XFRM */
26 #include "octeon-ethernet.h"
27 #include "ethernet-defines.h"
28 #include "ethernet-mem.h"
29 #include "ethernet-rx.h"
30 #include "ethernet-util.h"
32 static atomic_t oct_rx_ready
= ATOMIC_INIT(0);
34 static struct oct_rx_group
{
37 struct napi_struct napi
;
41 * cvm_oct_do_interrupt - interrupt handler.
42 * @irq: Interrupt number.
43 * @napi_id: Cookie to identify the NAPI instance.
45 * The interrupt occurs whenever the POW has packets in our group.
48 static irqreturn_t
cvm_oct_do_interrupt(int irq
, void *napi_id
)
50 /* Disable the IRQ and start napi_poll. */
51 disable_irq_nosync(irq
);
52 napi_schedule(napi_id
);
58 * cvm_oct_check_rcv_error - process receive errors
59 * @work: Work queue entry pointing to the packet.
61 * Returns Non-zero if the packet can be dropped, zero otherwise.
63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe
*work
)
67 if (octeon_has_feature(OCTEON_FEATURE_PKND
))
68 port
= work
->word0
.pip
.cn68xx
.pknd
;
70 port
= work
->word1
.cn38xx
.ipprt
;
72 if ((work
->word2
.snoip
.err_code
== 10) && (work
->word1
.len
<= 64))
74 * Ignore length errors on min size packets. Some
75 * equipment incorrectly pads packets to 64+4FCS
76 * instead of 60+4FCS. Note these packets still get
77 * counted as frame errors.
81 if (work
->word2
.snoip
.err_code
== 5 ||
82 work
->word2
.snoip
.err_code
== 7) {
84 * We received a packet with either an alignment error
85 * or a FCS error. This may be signalling that we are
86 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
87 * off. If this is the case we need to parse the
88 * packet to determine if we can remove a non spec
89 * preamble and generate a correct packet.
91 int interface
= cvmx_helper_get_interface_num(port
);
92 int index
= cvmx_helper_get_interface_index_num(port
);
93 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl
;
95 gmxx_rxx_frm_ctl
.u64
=
96 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index
, interface
));
97 if (gmxx_rxx_frm_ctl
.s
.pre_chk
== 0) {
99 cvmx_phys_to_ptr(work
->packet_ptr
.s
.addr
);
102 while (i
< work
->word1
.len
- 1) {
110 /* Port received 0xd5 preamble */
111 work
->packet_ptr
.s
.addr
+= i
+ 1;
112 work
->word1
.len
-= i
+ 5;
116 if ((*ptr
& 0xf) == 0xd) {
117 /* Port received 0xd preamble */
118 work
->packet_ptr
.s
.addr
+= i
;
119 work
->word1
.len
-= i
+ 4;
120 for (i
= 0; i
< work
->word1
.len
; i
++) {
122 ((*ptr
& 0xf0) >> 4) |
123 ((*(ptr
+ 1) & 0xf) << 4);
129 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
131 cvm_oct_free_work(work
);
136 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
137 port
, work
->word2
.snoip
.err_code
);
138 cvm_oct_free_work(work
);
142 static void copy_segments_to_skb(struct cvmx_wqe
*work
, struct sk_buff
*skb
)
144 int segments
= work
->word2
.s
.bufs
;
145 union cvmx_buf_ptr segment_ptr
= work
->packet_ptr
;
146 int len
= work
->word1
.len
;
150 union cvmx_buf_ptr next_ptr
;
152 next_ptr
= *(union cvmx_buf_ptr
*)
153 cvmx_phys_to_ptr(segment_ptr
.s
.addr
- 8);
156 * Octeon Errata PKI-100: The segment size is wrong.
158 * Until it is fixed, calculate the segment size based on
159 * the packet pool buffer size.
160 * When it is fixed, the following line should be replaced
162 * int segment_size = segment_ptr.s.size;
165 CVMX_FPA_PACKET_POOL_SIZE
-
166 (segment_ptr
.s
.addr
-
167 (((segment_ptr
.s
.addr
>> 7) -
168 segment_ptr
.s
.back
) << 7));
170 /* Don't copy more than what is left in the packet */
171 if (segment_size
> len
)
174 /* Copy the data into the packet */
175 skb_put_data(skb
, cvmx_phys_to_ptr(segment_ptr
.s
.addr
),
178 segment_ptr
= next_ptr
;
182 static int cvm_oct_poll(struct oct_rx_group
*rx_group
, int budget
)
184 const int coreid
= cvmx_get_core_num();
188 int did_work_request
= 0;
189 int packet_not_copied
;
191 /* Prefetch cvm_oct_device since we know we need it soon */
192 prefetch(cvm_oct_device
);
194 if (USE_ASYNC_IOBDMA
) {
195 /* Save scratch in case userspace is using it */
197 old_scratch
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
);
200 /* Only allow work for our group (and preserve priorities) */
201 if (OCTEON_IS_MODEL(OCTEON_CN68XX
)) {
202 old_group_mask
= cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid
));
203 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid
),
204 BIT(rx_group
->group
));
205 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid
)); /* Flush */
207 old_group_mask
= cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid
));
208 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid
),
209 (old_group_mask
& ~0xFFFFull
) |
210 BIT(rx_group
->group
));
213 if (USE_ASYNC_IOBDMA
) {
214 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH
, CVMX_POW_NO_WAIT
);
215 did_work_request
= 1;
218 while (rx_count
< budget
) {
219 struct sk_buff
*skb
= NULL
;
220 struct sk_buff
**pskb
= NULL
;
222 struct cvmx_wqe
*work
;
225 if (USE_ASYNC_IOBDMA
&& did_work_request
)
226 work
= cvmx_pow_work_response_async(CVMX_SCR_SCRATCH
);
228 work
= cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT
);
231 did_work_request
= 0;
233 if (OCTEON_IS_MODEL(OCTEON_CN68XX
)) {
234 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS
,
235 BIT(rx_group
->group
));
236 cvmx_write_csr(CVMX_SSO_WQ_INT
,
237 BIT(rx_group
->group
));
239 union cvmx_pow_wq_int wq_int
;
242 wq_int
.s
.iq_dis
= BIT(rx_group
->group
);
243 wq_int
.s
.wq_int
= BIT(rx_group
->group
);
244 cvmx_write_csr(CVMX_POW_WQ_INT
, wq_int
.u64
);
248 pskb
= (struct sk_buff
**)
249 (cvm_oct_get_buffer_ptr(work
->packet_ptr
) -
253 if (USE_ASYNC_IOBDMA
&& rx_count
< (budget
- 1)) {
254 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH
,
256 did_work_request
= 1;
260 skb_in_hw
= work
->word2
.s
.bufs
== 1;
261 if (likely(skb_in_hw
)) {
263 prefetch(&skb
->head
);
267 if (octeon_has_feature(OCTEON_FEATURE_PKND
))
268 port
= work
->word0
.pip
.cn68xx
.pknd
;
270 port
= work
->word1
.cn38xx
.ipprt
;
272 prefetch(cvm_oct_device
[port
]);
274 /* Immediately throw away all packets with receive errors */
275 if (unlikely(work
->word2
.snoip
.rcv_error
)) {
276 if (cvm_oct_check_rcv_error(work
))
281 * We can only use the zero copy path if skbuffs are
282 * in the FPA pool and the packet fits in a single
285 if (likely(skb_in_hw
)) {
286 skb
->data
= skb
->head
+ work
->packet_ptr
.s
.addr
-
287 cvmx_ptr_to_phys(skb
->head
);
289 skb
->len
= work
->word1
.len
;
290 skb_set_tail_pointer(skb
, skb
->len
);
291 packet_not_copied
= 1;
294 * We have to copy the packet. First allocate
297 skb
= dev_alloc_skb(work
->word1
.len
);
299 cvm_oct_free_work(work
);
304 * Check if we've received a packet that was
305 * entirely stored in the work entry.
307 if (unlikely(work
->word2
.s
.bufs
== 0)) {
308 u8
*ptr
= work
->packet_data
;
310 if (likely(!work
->word2
.s
.not_IP
)) {
312 * The beginning of the packet
313 * moves for IP packets.
315 if (work
->word2
.s
.is_v6
)
320 skb_put_data(skb
, ptr
, work
->word1
.len
);
321 /* No packet buffers to free */
323 copy_segments_to_skb(work
, skb
);
325 packet_not_copied
= 0;
327 if (likely((port
< TOTAL_NUMBER_OF_PORTS
) &&
328 cvm_oct_device
[port
])) {
329 struct net_device
*dev
= cvm_oct_device
[port
];
332 * Only accept packets for devices that are
335 if (likely(dev
->flags
& IFF_UP
)) {
336 skb
->protocol
= eth_type_trans(skb
, dev
);
339 if (unlikely(work
->word2
.s
.not_IP
||
340 work
->word2
.s
.IP_exc
||
341 work
->word2
.s
.L4_error
||
342 !work
->word2
.s
.tcp_or_udp
))
343 skb
->ip_summed
= CHECKSUM_NONE
;
345 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
347 /* Increment RX stats for virtual ports */
348 if (port
>= CVMX_PIP_NUM_INPUT_PORTS
) {
349 dev
->stats
.rx_packets
++;
350 dev
->stats
.rx_bytes
+= skb
->len
;
352 netif_receive_skb(skb
);
355 * Drop any packet received for a device that
358 dev
->stats
.rx_dropped
++;
359 dev_kfree_skb_irq(skb
);
363 * Drop any packet received for a device that
366 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
368 dev_kfree_skb_irq(skb
);
371 * Check to see if the skbuff and work share the same
374 if (likely(packet_not_copied
)) {
376 * This buffer needs to be replaced, increment
377 * the number of buffers we need to free by
380 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE
,
383 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, 1);
385 cvm_oct_free_work(work
);
388 /* Restore the original POW group mask */
389 if (OCTEON_IS_MODEL(OCTEON_CN68XX
)) {
390 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid
), old_group_mask
);
391 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid
)); /* Flush */
393 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid
), old_group_mask
);
396 if (USE_ASYNC_IOBDMA
) {
397 /* Restore the scratch area */
398 cvmx_scratch_write64(CVMX_SCR_SCRATCH
, old_scratch
);
400 cvm_oct_rx_refill_pool(0);
406 * cvm_oct_napi_poll - the NAPI poll function.
407 * @napi: The NAPI instance.
408 * @budget: Maximum number of packets to receive.
410 * Returns the number of packets processed.
412 static int cvm_oct_napi_poll(struct napi_struct
*napi
, int budget
)
414 struct oct_rx_group
*rx_group
= container_of(napi
, struct oct_rx_group
,
418 rx_count
= cvm_oct_poll(rx_group
, budget
);
420 if (rx_count
< budget
) {
422 napi_complete_done(napi
, rx_count
);
423 enable_irq(rx_group
->irq
);
428 #ifdef CONFIG_NET_POLL_CONTROLLER
430 * cvm_oct_poll_controller - poll for receive packets
433 * @dev: Device to poll. Unused
435 void cvm_oct_poll_controller(struct net_device
*dev
)
439 if (!atomic_read(&oct_rx_ready
))
442 for (i
= 0; i
< ARRAY_SIZE(oct_rx_group
); i
++) {
443 if (!(pow_receive_groups
& BIT(i
)))
446 cvm_oct_poll(&oct_rx_group
[i
], 16);
451 void cvm_oct_rx_initialize(void)
454 struct net_device
*dev_for_napi
= NULL
;
456 for (i
= 0; i
< TOTAL_NUMBER_OF_PORTS
; i
++) {
457 if (cvm_oct_device
[i
]) {
458 dev_for_napi
= cvm_oct_device
[i
];
464 panic("No net_devices were allocated.");
466 for (i
= 0; i
< ARRAY_SIZE(oct_rx_group
); i
++) {
469 if (!(pow_receive_groups
& BIT(i
)))
472 netif_napi_add_weight(dev_for_napi
, &oct_rx_group
[i
].napi
,
473 cvm_oct_napi_poll
, rx_napi_weight
);
474 napi_enable(&oct_rx_group
[i
].napi
);
476 oct_rx_group
[i
].irq
= OCTEON_IRQ_WORKQ0
+ i
;
477 oct_rx_group
[i
].group
= i
;
479 /* Register an IRQ handler to receive POW interrupts */
480 ret
= request_irq(oct_rx_group
[i
].irq
, cvm_oct_do_interrupt
, 0,
481 "Ethernet", &oct_rx_group
[i
].napi
);
483 panic("Could not acquire Ethernet IRQ %d\n",
484 oct_rx_group
[i
].irq
);
486 disable_irq_nosync(oct_rx_group
[i
].irq
);
488 /* Enable POW interrupt when our port has at least one packet */
489 if (OCTEON_IS_MODEL(OCTEON_CN68XX
)) {
490 union cvmx_sso_wq_int_thrx int_thr
;
491 union cvmx_pow_wq_int_pc int_pc
;
495 int_thr
.s
.tc_thr
= 1;
496 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i
), int_thr
.u64
);
500 cvmx_write_csr(CVMX_SSO_WQ_INT_PC
, int_pc
.u64
);
502 union cvmx_pow_wq_int_thrx int_thr
;
503 union cvmx_pow_wq_int_pc int_pc
;
507 int_thr
.s
.tc_thr
= 1;
508 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i
), int_thr
.u64
);
512 cvmx_write_csr(CVMX_POW_WQ_INT_PC
, int_pc
.u64
);
515 /* Schedule NAPI now. This will indirectly enable the
518 napi_schedule(&oct_rx_group
[i
].napi
);
520 atomic_inc(&oct_rx_ready
);
523 void cvm_oct_rx_shutdown(void)
527 for (i
= 0; i
< ARRAY_SIZE(oct_rx_group
); i
++) {
528 if (!(pow_receive_groups
& BIT(i
)))
531 /* Disable POW interrupt */
532 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
533 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i
), 0);
535 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i
), 0);
537 /* Free the interrupt handler */
538 free_irq(oct_rx_group
[i
].irq
, cvm_oct_device
);
540 netif_napi_del(&oct_rx_group
[i
].napi
);