2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/nls.h>
25 #include <linux/platform_device.h>
26 #include <linux/netdevice.h>
27 #include <linux/interrupt.h>
33 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
34 #define DRV_NAME "fjes"
35 char fjes_driver_name
[] = DRV_NAME
;
36 char fjes_driver_version
[] = DRV_VERSION
;
37 static const char fjes_driver_string
[] =
38 "FUJITSU Extended Socket Network Device Driver";
39 static const char fjes_copyright
[] =
40 "Copyright (c) 2015 FUJITSU LIMITED";
42 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
43 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_VERSION
);
47 static int fjes_request_irq(struct fjes_adapter
*);
48 static void fjes_free_irq(struct fjes_adapter
*);
50 static int fjes_open(struct net_device
*);
51 static int fjes_close(struct net_device
*);
52 static int fjes_setup_resources(struct fjes_adapter
*);
53 static void fjes_free_resources(struct fjes_adapter
*);
54 static netdev_tx_t
fjes_xmit_frame(struct sk_buff
*, struct net_device
*);
55 static void fjes_raise_intr_rxdata_task(struct work_struct
*);
56 static void fjes_tx_stall_task(struct work_struct
*);
57 static void fjes_force_close_task(struct work_struct
*);
58 static irqreturn_t
fjes_intr(int, void*);
59 static struct rtnl_link_stats64
*
60 fjes_get_stats64(struct net_device
*, struct rtnl_link_stats64
*);
61 static int fjes_change_mtu(struct net_device
*, int);
62 static int fjes_vlan_rx_add_vid(struct net_device
*, __be16 proto
, u16
);
63 static int fjes_vlan_rx_kill_vid(struct net_device
*, __be16 proto
, u16
);
64 static void fjes_tx_retry(struct net_device
*);
66 static int fjes_acpi_add(struct acpi_device
*);
67 static int fjes_acpi_remove(struct acpi_device
*);
68 static acpi_status
fjes_get_acpi_resource(struct acpi_resource
*, void*);
70 static int fjes_probe(struct platform_device
*);
71 static int fjes_remove(struct platform_device
*);
73 static int fjes_sw_init(struct fjes_adapter
*);
74 static void fjes_netdev_setup(struct net_device
*);
75 static void fjes_irq_watch_task(struct work_struct
*);
76 static void fjes_watch_unshare_task(struct work_struct
*);
77 static void fjes_rx_irq(struct fjes_adapter
*, int);
78 static int fjes_poll(struct napi_struct
*, int);
80 static const struct acpi_device_id fjes_acpi_ids
[] = {
84 MODULE_DEVICE_TABLE(acpi
, fjes_acpi_ids
);
86 static struct acpi_driver fjes_acpi_driver
= {
93 .remove
= fjes_acpi_remove
,
97 static struct platform_driver fjes_driver
= {
102 .remove
= fjes_remove
,
105 static struct resource fjes_resource
[] = {
107 .flags
= IORESOURCE_MEM
,
112 .flags
= IORESOURCE_IRQ
,
118 static int fjes_acpi_add(struct acpi_device
*device
)
120 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
121 char str_buf
[sizeof(FJES_ACPI_SYMBOL
) + 1];
122 struct platform_device
*plat_dev
;
123 union acpi_object
*str
;
127 status
= acpi_evaluate_object(device
->handle
, "_STR", NULL
, &buffer
);
128 if (ACPI_FAILURE(status
))
131 str
= buffer
.pointer
;
132 result
= utf16s_to_utf8s((wchar_t *)str
->string
.pointer
,
133 str
->string
.length
, UTF16_LITTLE_ENDIAN
,
134 str_buf
, sizeof(str_buf
) - 1);
137 if (strncmp(FJES_ACPI_SYMBOL
, str_buf
, strlen(FJES_ACPI_SYMBOL
)) != 0) {
138 kfree(buffer
.pointer
);
141 kfree(buffer
.pointer
);
143 status
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
144 fjes_get_acpi_resource
, fjes_resource
);
145 if (ACPI_FAILURE(status
))
148 /* create platform_device */
149 plat_dev
= platform_device_register_simple(DRV_NAME
, 0, fjes_resource
,
150 ARRAY_SIZE(fjes_resource
));
151 device
->driver_data
= plat_dev
;
156 static int fjes_acpi_remove(struct acpi_device
*device
)
158 struct platform_device
*plat_dev
;
160 plat_dev
= (struct platform_device
*)acpi_driver_data(device
);
161 platform_device_unregister(plat_dev
);
167 fjes_get_acpi_resource(struct acpi_resource
*acpi_res
, void *data
)
169 struct acpi_resource_address32
*addr
;
170 struct acpi_resource_irq
*irq
;
171 struct resource
*res
= data
;
173 switch (acpi_res
->type
) {
174 case ACPI_RESOURCE_TYPE_ADDRESS32
:
175 addr
= &acpi_res
->data
.address32
;
176 res
[0].start
= addr
->address
.minimum
;
177 res
[0].end
= addr
->address
.minimum
+
178 addr
->address
.address_length
- 1;
181 case ACPI_RESOURCE_TYPE_IRQ
:
182 irq
= &acpi_res
->data
.irq
;
183 if (irq
->interrupt_count
!= 1)
185 res
[1].start
= irq
->interrupts
[0];
186 res
[1].end
= irq
->interrupts
[0];
196 static int fjes_request_irq(struct fjes_adapter
*adapter
)
198 struct net_device
*netdev
= adapter
->netdev
;
201 adapter
->interrupt_watch_enable
= true;
202 if (!delayed_work_pending(&adapter
->interrupt_watch_task
)) {
203 queue_delayed_work(adapter
->control_wq
,
204 &adapter
->interrupt_watch_task
,
205 FJES_IRQ_WATCH_DELAY
);
208 if (!adapter
->irq_registered
) {
209 result
= request_irq(adapter
->hw
.hw_res
.irq
, fjes_intr
,
210 IRQF_SHARED
, netdev
->name
, adapter
);
212 adapter
->irq_registered
= false;
214 adapter
->irq_registered
= true;
220 static void fjes_free_irq(struct fjes_adapter
*adapter
)
222 struct fjes_hw
*hw
= &adapter
->hw
;
224 adapter
->interrupt_watch_enable
= false;
225 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
227 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
229 if (adapter
->irq_registered
) {
230 free_irq(adapter
->hw
.hw_res
.irq
, adapter
);
231 adapter
->irq_registered
= false;
235 static const struct net_device_ops fjes_netdev_ops
= {
236 .ndo_open
= fjes_open
,
237 .ndo_stop
= fjes_close
,
238 .ndo_start_xmit
= fjes_xmit_frame
,
239 .ndo_get_stats64
= fjes_get_stats64
,
240 .ndo_change_mtu
= fjes_change_mtu
,
241 .ndo_tx_timeout
= fjes_tx_retry
,
242 .ndo_vlan_rx_add_vid
= fjes_vlan_rx_add_vid
,
243 .ndo_vlan_rx_kill_vid
= fjes_vlan_rx_kill_vid
,
246 /* fjes_open - Called when a network interface is made active */
247 static int fjes_open(struct net_device
*netdev
)
249 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
250 struct fjes_hw
*hw
= &adapter
->hw
;
253 if (adapter
->open_guard
)
256 result
= fjes_setup_resources(adapter
);
260 hw
->txrx_stop_req_bit
= 0;
261 hw
->epstop_req_bit
= 0;
263 napi_enable(&adapter
->napi
);
265 fjes_hw_capture_interrupt_status(hw
);
267 result
= fjes_request_irq(adapter
);
271 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, false);
273 netif_tx_start_all_queues(netdev
);
274 netif_carrier_on(netdev
);
279 fjes_free_irq(adapter
);
280 napi_disable(&adapter
->napi
);
283 fjes_free_resources(adapter
);
287 /* fjes_close - Disables a network interface */
288 static int fjes_close(struct net_device
*netdev
)
290 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
291 struct fjes_hw
*hw
= &adapter
->hw
;
295 netif_tx_stop_all_queues(netdev
);
296 netif_carrier_off(netdev
);
298 fjes_hw_raise_epstop(hw
);
300 napi_disable(&adapter
->napi
);
302 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
303 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
304 if (epidx
== hw
->my_epid
)
307 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
309 adapter
->hw
.ep_shm_info
[epidx
]
310 .tx
.info
->v1i
.rx_status
&=
313 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
315 fjes_free_irq(adapter
);
317 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
318 cancel_work_sync(&adapter
->unshare_watch_task
);
319 adapter
->unshare_watch_bitmask
= 0;
320 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
321 cancel_work_sync(&adapter
->tx_stall_task
);
323 cancel_work_sync(&hw
->update_zone_task
);
324 cancel_work_sync(&hw
->epstop_task
);
326 fjes_hw_wait_epstop(hw
);
328 fjes_free_resources(adapter
);
333 static int fjes_setup_resources(struct fjes_adapter
*adapter
)
335 struct net_device
*netdev
= adapter
->netdev
;
336 struct ep_share_mem_info
*buf_pair
;
337 struct fjes_hw
*hw
= &adapter
->hw
;
342 mutex_lock(&hw
->hw_info
.lock
);
343 result
= fjes_hw_request_info(hw
);
346 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
347 hw
->ep_shm_info
[epidx
].es_status
=
348 hw
->hw_info
.res_buf
->info
.info
[epidx
].es_status
;
349 hw
->ep_shm_info
[epidx
].zone
=
350 hw
->hw_info
.res_buf
->info
.info
[epidx
].zone
;
356 adapter
->force_reset
= true;
358 mutex_unlock(&hw
->hw_info
.lock
);
361 mutex_unlock(&hw
->hw_info
.lock
);
363 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
364 if ((epidx
!= hw
->my_epid
) &&
365 (hw
->ep_shm_info
[epidx
].es_status
==
366 FJES_ZONING_STATUS_ENABLE
)) {
367 fjes_hw_raise_interrupt(hw
, epidx
,
368 REG_ICTL_MASK_INFO_UPDATE
);
372 msleep(FJES_OPEN_ZONE_UPDATE_WAIT
* hw
->max_epid
);
374 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
375 if (epidx
== hw
->my_epid
)
378 buf_pair
= &hw
->ep_shm_info
[epidx
];
380 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
381 fjes_hw_setup_epbuf(&buf_pair
->tx
, netdev
->dev_addr
,
383 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
385 if (fjes_hw_epid_is_same_zone(hw
, epidx
)) {
386 mutex_lock(&hw
->hw_info
.lock
);
388 fjes_hw_register_buff_addr(hw
, epidx
, buf_pair
);
389 mutex_unlock(&hw
->hw_info
.lock
);
397 adapter
->force_reset
= true;
406 static void fjes_free_resources(struct fjes_adapter
*adapter
)
408 struct net_device
*netdev
= adapter
->netdev
;
409 struct fjes_device_command_param param
;
410 struct ep_share_mem_info
*buf_pair
;
411 struct fjes_hw
*hw
= &adapter
->hw
;
412 bool reset_flag
= false;
417 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
418 if (epidx
== hw
->my_epid
)
421 mutex_lock(&hw
->hw_info
.lock
);
422 result
= fjes_hw_unregister_buff_addr(hw
, epidx
);
423 mutex_unlock(&hw
->hw_info
.lock
);
428 buf_pair
= &hw
->ep_shm_info
[epidx
];
430 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
431 fjes_hw_setup_epbuf(&buf_pair
->tx
,
432 netdev
->dev_addr
, netdev
->mtu
);
433 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
435 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
438 if (reset_flag
|| adapter
->force_reset
) {
439 result
= fjes_hw_reset(hw
);
441 adapter
->force_reset
= false;
444 adapter
->open_guard
= true;
446 hw
->hw_info
.buffer_share_bit
= 0;
448 memset((void *)¶m
, 0, sizeof(param
));
450 param
.req_len
= hw
->hw_info
.req_buf_size
;
451 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
452 param
.res_len
= hw
->hw_info
.res_buf_size
;
453 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
454 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
456 fjes_hw_init_command_registers(hw
, ¶m
);
460 static void fjes_tx_stall_task(struct work_struct
*work
)
462 struct fjes_adapter
*adapter
= container_of(work
,
463 struct fjes_adapter
, tx_stall_task
);
464 struct net_device
*netdev
= adapter
->netdev
;
465 struct fjes_hw
*hw
= &adapter
->hw
;
466 int all_queue_available
, sendable
;
467 enum ep_partner_status pstatus
;
468 int max_epid
, my_epid
, epid
;
469 union ep_buffer_info
*info
;
473 dev_trans_start(netdev
)) > FJES_TX_TX_STALL_TIMEOUT
) {
474 netif_wake_queue(netdev
);
478 my_epid
= hw
->my_epid
;
479 max_epid
= hw
->max_epid
;
481 for (i
= 0; i
< 5; i
++) {
482 all_queue_available
= 1;
484 for (epid
= 0; epid
< max_epid
; epid
++) {
488 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
489 sendable
= (pstatus
== EP_PARTNER_SHARED
);
493 info
= adapter
->hw
.ep_shm_info
[epid
].tx
.info
;
495 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
498 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
,
499 info
->v1i
.count_max
)) {
500 all_queue_available
= 0;
505 if (all_queue_available
) {
506 netif_wake_queue(netdev
);
511 usleep_range(50, 100);
513 queue_work(adapter
->txrx_wq
, &adapter
->tx_stall_task
);
516 static void fjes_force_close_task(struct work_struct
*work
)
518 struct fjes_adapter
*adapter
= container_of(work
,
519 struct fjes_adapter
, force_close_task
);
520 struct net_device
*netdev
= adapter
->netdev
;
527 static void fjes_raise_intr_rxdata_task(struct work_struct
*work
)
529 struct fjes_adapter
*adapter
= container_of(work
,
530 struct fjes_adapter
, raise_intr_rxdata_task
);
531 struct fjes_hw
*hw
= &adapter
->hw
;
532 enum ep_partner_status pstatus
;
533 int max_epid
, my_epid
, epid
;
535 my_epid
= hw
->my_epid
;
536 max_epid
= hw
->max_epid
;
538 for (epid
= 0; epid
< max_epid
; epid
++)
539 hw
->ep_shm_info
[epid
].tx_status_work
= 0;
541 for (epid
= 0; epid
< max_epid
; epid
++) {
545 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
546 if (pstatus
== EP_PARTNER_SHARED
) {
547 hw
->ep_shm_info
[epid
].tx_status_work
=
548 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
;
550 if (hw
->ep_shm_info
[epid
].tx_status_work
==
551 FJES_TX_DELAY_SEND_PENDING
) {
552 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
=
553 FJES_TX_DELAY_SEND_NONE
;
558 for (epid
= 0; epid
< max_epid
; epid
++) {
562 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
563 if ((hw
->ep_shm_info
[epid
].tx_status_work
==
564 FJES_TX_DELAY_SEND_PENDING
) &&
565 (pstatus
== EP_PARTNER_SHARED
) &&
566 !(hw
->ep_shm_info
[epid
].rx
.info
->v1i
.rx_status
&
567 FJES_RX_POLL_WORK
)) {
568 fjes_hw_raise_interrupt(hw
, epid
,
569 REG_ICTL_MASK_RX_DATA
);
573 usleep_range(500, 1000);
576 static int fjes_tx_send(struct fjes_adapter
*adapter
, int dest
,
577 void *data
, size_t len
)
581 retval
= fjes_hw_epbuf_tx_pkt_send(&adapter
->hw
.ep_shm_info
[dest
].tx
,
586 adapter
->hw
.ep_shm_info
[dest
].tx
.info
->v1i
.tx_status
=
587 FJES_TX_DELAY_SEND_PENDING
;
588 if (!work_pending(&adapter
->raise_intr_rxdata_task
))
589 queue_work(adapter
->txrx_wq
,
590 &adapter
->raise_intr_rxdata_task
);
597 fjes_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
599 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
600 struct fjes_hw
*hw
= &adapter
->hw
;
602 int max_epid
, my_epid
, dest_epid
;
603 enum ep_partner_status pstatus
;
604 struct netdev_queue
*cur_queue
;
605 char shortpkt
[VLAN_ETH_HLEN
];
616 cur_queue
= netdev_get_tx_queue(netdev
, queue_no
);
618 eth
= (struct ethhdr
*)skb
->data
;
619 my_epid
= hw
->my_epid
;
621 vlan
= (vlan_get_tag(skb
, &vlan_id
) == 0) ? true : false;
626 if (is_multicast_ether_addr(eth
->h_dest
)) {
628 max_epid
= hw
->max_epid
;
630 } else if (is_local_ether_addr(eth
->h_dest
)) {
631 dest_epid
= eth
->h_dest
[ETH_ALEN
- 1];
632 max_epid
= dest_epid
+ 1;
634 if ((eth
->h_dest
[0] == 0x02) &&
635 (0x00 == (eth
->h_dest
[1] | eth
->h_dest
[2] |
636 eth
->h_dest
[3] | eth
->h_dest
[4])) &&
637 (dest_epid
< hw
->max_epid
)) {
644 adapter
->stats64
.tx_packets
+= 1;
645 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
646 adapter
->stats64
.tx_bytes
+= len
;
647 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
654 adapter
->stats64
.tx_packets
+= 1;
655 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
656 adapter
->stats64
.tx_bytes
+= len
;
657 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
660 for (; dest_epid
< max_epid
; dest_epid
++) {
661 if (my_epid
== dest_epid
)
664 pstatus
= fjes_hw_get_partner_ep_status(hw
, dest_epid
);
665 if (pstatus
!= EP_PARTNER_SHARED
) {
667 } else if (!fjes_hw_check_epbuf_version(
668 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
, 0)) {
669 /* version is NOT 0 */
670 adapter
->stats64
.tx_carrier_errors
+= 1;
671 hw
->ep_shm_info
[dest_epid
].net_stats
672 .tx_carrier_errors
+= 1;
675 } else if (!fjes_hw_check_mtu(
676 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
678 adapter
->stats64
.tx_dropped
+= 1;
679 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_dropped
+= 1;
680 adapter
->stats64
.tx_errors
+= 1;
681 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_errors
+= 1;
685 !fjes_hw_check_vlan_id(
686 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
690 if (len
< VLAN_ETH_HLEN
) {
691 memset(shortpkt
, 0, VLAN_ETH_HLEN
);
692 memcpy(shortpkt
, skb
->data
, skb
->len
);
697 if (adapter
->tx_retry_count
== 0) {
698 adapter
->tx_start_jiffies
= jiffies
;
699 adapter
->tx_retry_count
= 1;
701 adapter
->tx_retry_count
++;
704 if (fjes_tx_send(adapter
, dest_epid
, data
, len
)) {
709 (long)adapter
->tx_start_jiffies
) >=
710 FJES_TX_RETRY_TIMEOUT
) {
711 adapter
->stats64
.tx_fifo_errors
+= 1;
712 hw
->ep_shm_info
[dest_epid
].net_stats
713 .tx_fifo_errors
+= 1;
714 adapter
->stats64
.tx_errors
+= 1;
715 hw
->ep_shm_info
[dest_epid
].net_stats
720 netif_trans_update(netdev
);
721 netif_tx_stop_queue(cur_queue
);
723 if (!work_pending(&adapter
->tx_stall_task
))
724 queue_work(adapter
->txrx_wq
,
725 &adapter
->tx_stall_task
);
727 ret
= NETDEV_TX_BUSY
;
731 adapter
->stats64
.tx_packets
+= 1;
732 hw
->ep_shm_info
[dest_epid
].net_stats
734 adapter
->stats64
.tx_bytes
+= len
;
735 hw
->ep_shm_info
[dest_epid
].net_stats
739 adapter
->tx_retry_count
= 0;
745 if (ret
== NETDEV_TX_OK
) {
748 adapter
->stats64
.tx_packets
+= 1;
749 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
750 adapter
->stats64
.tx_bytes
+= 1;
751 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
758 static void fjes_tx_retry(struct net_device
*netdev
)
760 struct netdev_queue
*queue
= netdev_get_tx_queue(netdev
, 0);
762 netif_tx_wake_queue(queue
);
765 static struct rtnl_link_stats64
*
766 fjes_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
768 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
770 memcpy(stats
, &adapter
->stats64
, sizeof(struct rtnl_link_stats64
));
775 static int fjes_change_mtu(struct net_device
*netdev
, int new_mtu
)
777 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
778 bool running
= netif_running(netdev
);
779 struct fjes_hw
*hw
= &adapter
->hw
;
784 for (idx
= 0; fjes_support_mtu
[idx
] != 0; idx
++) {
785 if (new_mtu
<= fjes_support_mtu
[idx
]) {
786 new_mtu
= fjes_support_mtu
[idx
];
787 if (new_mtu
== netdev
->mtu
)
799 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
800 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
801 if (epidx
== hw
->my_epid
)
803 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
804 ~FJES_RX_MTU_CHANGING_DONE
;
806 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
808 netif_tx_stop_all_queues(netdev
);
809 netif_carrier_off(netdev
);
810 cancel_work_sync(&adapter
->tx_stall_task
);
811 napi_disable(&adapter
->napi
);
815 netif_tx_stop_all_queues(netdev
);
818 netdev
->mtu
= new_mtu
;
821 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
822 if (epidx
== hw
->my_epid
)
825 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
826 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
830 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
831 FJES_RX_MTU_CHANGING_DONE
;
832 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
835 netif_tx_wake_all_queues(netdev
);
836 netif_carrier_on(netdev
);
837 napi_enable(&adapter
->napi
);
838 napi_schedule(&adapter
->napi
);
844 static int fjes_vlan_rx_add_vid(struct net_device
*netdev
,
845 __be16 proto
, u16 vid
)
847 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
851 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
852 if (epid
== adapter
->hw
.my_epid
)
855 if (!fjes_hw_check_vlan_id(
856 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
))
857 ret
= fjes_hw_set_vlan_id(
858 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
861 return ret
? 0 : -ENOSPC
;
864 static int fjes_vlan_rx_kill_vid(struct net_device
*netdev
,
865 __be16 proto
, u16 vid
)
867 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
870 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
871 if (epid
== adapter
->hw
.my_epid
)
874 fjes_hw_del_vlan_id(&adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
880 static void fjes_txrx_stop_req_irq(struct fjes_adapter
*adapter
,
883 struct fjes_hw
*hw
= &adapter
->hw
;
884 enum ep_partner_status status
;
887 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
889 case EP_PARTNER_UNSHARE
:
890 case EP_PARTNER_COMPLETE
:
893 case EP_PARTNER_WAITING
:
894 if (src_epid
< hw
->my_epid
) {
895 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
896 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
897 FJES_RX_STOP_REQ_DONE
;
898 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
900 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
901 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
903 if (!work_pending(&adapter
->unshare_watch_task
))
904 queue_work(adapter
->control_wq
,
905 &adapter
->unshare_watch_task
);
908 case EP_PARTNER_SHARED
:
909 if (hw
->ep_shm_info
[src_epid
].rx
.info
->v1i
.rx_status
&
910 FJES_RX_STOP_REQ_REQUEST
) {
911 set_bit(src_epid
, &hw
->epstop_req_bit
);
912 if (!work_pending(&hw
->epstop_task
))
913 queue_work(adapter
->control_wq
,
920 static void fjes_stop_req_irq(struct fjes_adapter
*adapter
, int src_epid
)
922 struct fjes_hw
*hw
= &adapter
->hw
;
923 enum ep_partner_status status
;
926 set_bit(src_epid
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
928 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
930 case EP_PARTNER_WAITING
:
931 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
932 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
933 FJES_RX_STOP_REQ_DONE
;
934 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
935 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
937 case EP_PARTNER_UNSHARE
:
938 case EP_PARTNER_COMPLETE
:
940 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
941 if (!work_pending(&adapter
->unshare_watch_task
))
942 queue_work(adapter
->control_wq
,
943 &adapter
->unshare_watch_task
);
945 case EP_PARTNER_SHARED
:
946 set_bit(src_epid
, &hw
->epstop_req_bit
);
948 if (!work_pending(&hw
->epstop_task
))
949 queue_work(adapter
->control_wq
, &hw
->epstop_task
);
954 static void fjes_update_zone_irq(struct fjes_adapter
*adapter
,
957 struct fjes_hw
*hw
= &adapter
->hw
;
959 if (!work_pending(&hw
->update_zone_task
))
960 queue_work(adapter
->control_wq
, &hw
->update_zone_task
);
963 static irqreturn_t
fjes_intr(int irq
, void *data
)
965 struct fjes_adapter
*adapter
= data
;
966 struct fjes_hw
*hw
= &adapter
->hw
;
970 icr
= fjes_hw_capture_interrupt_status(hw
);
972 if (icr
& REG_IS_MASK_IS_ASSERT
) {
973 if (icr
& REG_ICTL_MASK_RX_DATA
)
974 fjes_rx_irq(adapter
, icr
& REG_IS_MASK_EPID
);
976 if (icr
& REG_ICTL_MASK_DEV_STOP_REQ
)
977 fjes_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
979 if (icr
& REG_ICTL_MASK_TXRX_STOP_REQ
)
980 fjes_txrx_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
982 if (icr
& REG_ICTL_MASK_TXRX_STOP_DONE
)
983 fjes_hw_set_irqmask(hw
,
984 REG_ICTL_MASK_TXRX_STOP_DONE
, true);
986 if (icr
& REG_ICTL_MASK_INFO_UPDATE
)
987 fjes_update_zone_irq(adapter
, icr
& REG_IS_MASK_EPID
);
997 static int fjes_rxframe_search_exist(struct fjes_adapter
*adapter
,
1000 struct fjes_hw
*hw
= &adapter
->hw
;
1001 enum ep_partner_status pstatus
;
1002 int max_epid
, cur_epid
;
1005 max_epid
= hw
->max_epid
;
1006 start_epid
= (start_epid
+ 1 + max_epid
) % max_epid
;
1008 for (i
= 0; i
< max_epid
; i
++) {
1009 cur_epid
= (start_epid
+ i
) % max_epid
;
1010 if (cur_epid
== hw
->my_epid
)
1013 pstatus
= fjes_hw_get_partner_ep_status(hw
, cur_epid
);
1014 if (pstatus
== EP_PARTNER_SHARED
) {
1015 if (!fjes_hw_epbuf_rx_is_empty(
1016 &hw
->ep_shm_info
[cur_epid
].rx
))
1023 static void *fjes_rxframe_get(struct fjes_adapter
*adapter
, size_t *psize
,
1028 *cur_epid
= fjes_rxframe_search_exist(adapter
, *cur_epid
);
1033 fjes_hw_epbuf_rx_curpkt_get_addr(
1034 &adapter
->hw
.ep_shm_info
[*cur_epid
].rx
, psize
);
1039 static void fjes_rxframe_release(struct fjes_adapter
*adapter
, int cur_epid
)
1041 fjes_hw_epbuf_rx_curpkt_drop(&adapter
->hw
.ep_shm_info
[cur_epid
].rx
);
1044 static void fjes_rx_irq(struct fjes_adapter
*adapter
, int src_epid
)
1046 struct fjes_hw
*hw
= &adapter
->hw
;
1048 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, true);
1050 adapter
->unset_rx_last
= true;
1051 napi_schedule(&adapter
->napi
);
1054 static int fjes_poll(struct napi_struct
*napi
, int budget
)
1056 struct fjes_adapter
*adapter
=
1057 container_of(napi
, struct fjes_adapter
, napi
);
1058 struct net_device
*netdev
= napi
->dev
;
1059 struct fjes_hw
*hw
= &adapter
->hw
;
1060 struct sk_buff
*skb
;
1067 spin_lock(&hw
->rx_status_lock
);
1068 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1069 if (epidx
== hw
->my_epid
)
1072 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1074 adapter
->hw
.ep_shm_info
[epidx
]
1075 .tx
.info
->v1i
.rx_status
|= FJES_RX_POLL_WORK
;
1077 spin_unlock(&hw
->rx_status_lock
);
1079 while (work_done
< budget
) {
1080 prefetch(&adapter
->hw
);
1081 frame
= fjes_rxframe_get(adapter
, &frame_len
, &cur_epid
);
1084 skb
= napi_alloc_skb(napi
, frame_len
);
1086 adapter
->stats64
.rx_dropped
+= 1;
1087 hw
->ep_shm_info
[cur_epid
].net_stats
1089 adapter
->stats64
.rx_errors
+= 1;
1090 hw
->ep_shm_info
[cur_epid
].net_stats
1093 memcpy(skb_put(skb
, frame_len
),
1095 skb
->protocol
= eth_type_trans(skb
, netdev
);
1096 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1098 netif_receive_skb(skb
);
1102 adapter
->stats64
.rx_packets
+= 1;
1103 hw
->ep_shm_info
[cur_epid
].net_stats
1105 adapter
->stats64
.rx_bytes
+= frame_len
;
1106 hw
->ep_shm_info
[cur_epid
].net_stats
1107 .rx_bytes
+= frame_len
;
1109 if (is_multicast_ether_addr(
1110 ((struct ethhdr
*)frame
)->h_dest
)) {
1111 adapter
->stats64
.multicast
+= 1;
1112 hw
->ep_shm_info
[cur_epid
].net_stats
1117 fjes_rxframe_release(adapter
, cur_epid
);
1118 adapter
->unset_rx_last
= true;
1124 if (work_done
< budget
) {
1125 napi_complete(napi
);
1127 if (adapter
->unset_rx_last
) {
1128 adapter
->rx_last_jiffies
= jiffies
;
1129 adapter
->unset_rx_last
= false;
1132 if (((long)jiffies
- (long)adapter
->rx_last_jiffies
) < 3) {
1133 napi_reschedule(napi
);
1135 spin_lock(&hw
->rx_status_lock
);
1136 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1137 if (epidx
== hw
->my_epid
)
1139 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1141 adapter
->hw
.ep_shm_info
[epidx
].tx
1142 .info
->v1i
.rx_status
&=
1145 spin_unlock(&hw
->rx_status_lock
);
1147 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, false);
1154 /* fjes_probe - Device Initialization Routine */
1155 static int fjes_probe(struct platform_device
*plat_dev
)
1157 struct fjes_adapter
*adapter
;
1158 struct net_device
*netdev
;
1159 struct resource
*res
;
1164 netdev
= alloc_netdev_mq(sizeof(struct fjes_adapter
), "es%d",
1165 NET_NAME_UNKNOWN
, fjes_netdev_setup
,
1171 SET_NETDEV_DEV(netdev
, &plat_dev
->dev
);
1173 dev_set_drvdata(&plat_dev
->dev
, netdev
);
1174 adapter
= netdev_priv(netdev
);
1175 adapter
->netdev
= netdev
;
1176 adapter
->plat_dev
= plat_dev
;
1180 /* setup the private structure */
1181 err
= fjes_sw_init(adapter
);
1183 goto err_free_netdev
;
1185 INIT_WORK(&adapter
->force_close_task
, fjes_force_close_task
);
1186 adapter
->force_reset
= false;
1187 adapter
->open_guard
= false;
1189 adapter
->txrx_wq
= alloc_workqueue(DRV_NAME
"/txrx", WQ_MEM_RECLAIM
, 0);
1190 adapter
->control_wq
= alloc_workqueue(DRV_NAME
"/control",
1193 INIT_WORK(&adapter
->tx_stall_task
, fjes_tx_stall_task
);
1194 INIT_WORK(&adapter
->raise_intr_rxdata_task
,
1195 fjes_raise_intr_rxdata_task
);
1196 INIT_WORK(&adapter
->unshare_watch_task
, fjes_watch_unshare_task
);
1197 adapter
->unshare_watch_bitmask
= 0;
1199 INIT_DELAYED_WORK(&adapter
->interrupt_watch_task
, fjes_irq_watch_task
);
1200 adapter
->interrupt_watch_enable
= false;
1202 res
= platform_get_resource(plat_dev
, IORESOURCE_MEM
, 0);
1203 hw
->hw_res
.start
= res
->start
;
1204 hw
->hw_res
.size
= resource_size(res
);
1205 hw
->hw_res
.irq
= platform_get_irq(plat_dev
, 0);
1206 err
= fjes_hw_init(&adapter
->hw
);
1208 goto err_free_netdev
;
1210 /* setup MAC address (02:00:00:00:00:[epid])*/
1211 netdev
->dev_addr
[0] = 2;
1212 netdev
->dev_addr
[1] = 0;
1213 netdev
->dev_addr
[2] = 0;
1214 netdev
->dev_addr
[3] = 0;
1215 netdev
->dev_addr
[4] = 0;
1216 netdev
->dev_addr
[5] = hw
->my_epid
; /* EPID */
1218 err
= register_netdev(netdev
);
1222 netif_carrier_off(netdev
);
1227 fjes_hw_exit(&adapter
->hw
);
1229 free_netdev(netdev
);
1234 /* fjes_remove - Device Removal Routine */
1235 static int fjes_remove(struct platform_device
*plat_dev
)
1237 struct net_device
*netdev
= dev_get_drvdata(&plat_dev
->dev
);
1238 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
1239 struct fjes_hw
*hw
= &adapter
->hw
;
1241 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
1242 cancel_work_sync(&adapter
->unshare_watch_task
);
1243 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
1244 cancel_work_sync(&adapter
->tx_stall_task
);
1245 if (adapter
->control_wq
)
1246 destroy_workqueue(adapter
->control_wq
);
1247 if (adapter
->txrx_wq
)
1248 destroy_workqueue(adapter
->txrx_wq
);
1250 unregister_netdev(netdev
);
1254 netif_napi_del(&adapter
->napi
);
1256 free_netdev(netdev
);
1261 static int fjes_sw_init(struct fjes_adapter
*adapter
)
1263 struct net_device
*netdev
= adapter
->netdev
;
1265 netif_napi_add(netdev
, &adapter
->napi
, fjes_poll
, 64);
1270 /* fjes_netdev_setup - netdevice initialization routine */
1271 static void fjes_netdev_setup(struct net_device
*netdev
)
1273 ether_setup(netdev
);
1275 netdev
->watchdog_timeo
= FJES_TX_RETRY_INTERVAL
;
1276 netdev
->netdev_ops
= &fjes_netdev_ops
;
1277 fjes_set_ethtool_ops(netdev
);
1278 netdev
->mtu
= fjes_support_mtu
[3];
1279 netdev
->flags
|= IFF_BROADCAST
;
1280 netdev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_CTAG_FILTER
;
1283 static void fjes_irq_watch_task(struct work_struct
*work
)
1285 struct fjes_adapter
*adapter
= container_of(to_delayed_work(work
),
1286 struct fjes_adapter
, interrupt_watch_task
);
1288 local_irq_disable();
1289 fjes_intr(adapter
->hw
.hw_res
.irq
, adapter
);
1292 if (fjes_rxframe_search_exist(adapter
, 0) >= 0)
1293 napi_schedule(&adapter
->napi
);
1295 if (adapter
->interrupt_watch_enable
) {
1296 if (!delayed_work_pending(&adapter
->interrupt_watch_task
))
1297 queue_delayed_work(adapter
->control_wq
,
1298 &adapter
->interrupt_watch_task
,
1299 FJES_IRQ_WATCH_DELAY
);
1303 static void fjes_watch_unshare_task(struct work_struct
*work
)
1305 struct fjes_adapter
*adapter
=
1306 container_of(work
, struct fjes_adapter
, unshare_watch_task
);
1308 struct net_device
*netdev
= adapter
->netdev
;
1309 struct fjes_hw
*hw
= &adapter
->hw
;
1311 int unshare_watch
, unshare_reserve
;
1312 int max_epid
, my_epid
, epidx
;
1313 int stop_req
, stop_req_done
;
1314 ulong unshare_watch_bitmask
;
1315 unsigned long flags
;
1320 my_epid
= hw
->my_epid
;
1321 max_epid
= hw
->max_epid
;
1323 unshare_watch_bitmask
= adapter
->unshare_watch_bitmask
;
1324 adapter
->unshare_watch_bitmask
= 0;
1326 while ((unshare_watch_bitmask
|| hw
->txrx_stop_req_bit
) &&
1327 (wait_time
< 3000)) {
1328 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1329 if (epidx
== hw
->my_epid
)
1332 is_shared
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
1335 stop_req
= test_bit(epidx
, &hw
->txrx_stop_req_bit
);
1337 stop_req_done
= hw
->ep_shm_info
[epidx
].rx
.info
->v1i
.rx_status
&
1338 FJES_RX_STOP_REQ_DONE
;
1340 unshare_watch
= test_bit(epidx
, &unshare_watch_bitmask
);
1342 unshare_reserve
= test_bit(epidx
,
1343 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1346 (is_shared
&& (!is_shared
|| !stop_req_done
))) &&
1347 (is_shared
|| !unshare_watch
|| !unshare_reserve
))
1350 mutex_lock(&hw
->hw_info
.lock
);
1351 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1359 &adapter
->force_close_task
)) {
1360 adapter
->force_reset
= true;
1362 &adapter
->force_close_task
);
1366 mutex_unlock(&hw
->hw_info
.lock
);
1368 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1369 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1370 netdev
->dev_addr
, netdev
->mtu
);
1371 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1373 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1374 clear_bit(epidx
, &unshare_watch_bitmask
);
1376 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1383 if (hw
->hw_info
.buffer_unshare_reserve_bit
) {
1384 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1385 if (epidx
== hw
->my_epid
)
1389 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
1390 mutex_lock(&hw
->hw_info
.lock
);
1392 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1400 &adapter
->force_close_task
)) {
1401 adapter
->force_reset
= true;
1403 &adapter
->force_close_task
);
1407 mutex_unlock(&hw
->hw_info
.lock
);
1409 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1410 fjes_hw_setup_epbuf(
1411 &hw
->ep_shm_info
[epidx
].tx
,
1412 netdev
->dev_addr
, netdev
->mtu
);
1413 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1416 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1417 clear_bit(epidx
, &unshare_watch_bitmask
);
1418 clear_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1421 if (test_bit(epidx
, &unshare_watch_bitmask
)) {
1422 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1423 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
1424 ~FJES_RX_STOP_REQ_DONE
;
1425 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1432 /* fjes_init_module - Driver Registration Routine */
1433 static int __init
fjes_init_module(void)
1437 pr_info("%s - version %s - %s\n",
1438 fjes_driver_string
, fjes_driver_version
, fjes_copyright
);
1440 result
= platform_driver_register(&fjes_driver
);
1444 result
= acpi_bus_register_driver(&fjes_acpi_driver
);
1446 goto fail_acpi_driver
;
1451 platform_driver_unregister(&fjes_driver
);
1455 module_init(fjes_init_module
);
1457 /* fjes_exit_module - Driver Exit Cleanup Routine */
1458 static void __exit
fjes_exit_module(void)
1460 acpi_bus_unregister_driver(&fjes_acpi_driver
);
1461 platform_driver_unregister(&fjes_driver
);
1464 module_exit(fjes_exit_module
);