2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/nls.h>
25 #include <linux/platform_device.h>
26 #include <linux/netdevice.h>
27 #include <linux/interrupt.h>
30 #include "fjes_trace.h"
34 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
35 #define DRV_NAME "fjes"
36 char fjes_driver_name
[] = DRV_NAME
;
37 char fjes_driver_version
[] = DRV_VERSION
;
38 static const char fjes_driver_string
[] =
39 "FUJITSU Extended Socket Network Device Driver";
40 static const char fjes_copyright
[] =
41 "Copyright (c) 2015 FUJITSU LIMITED";
43 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
44 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(DRV_VERSION
);
48 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
50 static int fjes_request_irq(struct fjes_adapter
*);
51 static void fjes_free_irq(struct fjes_adapter
*);
53 static int fjes_open(struct net_device
*);
54 static int fjes_close(struct net_device
*);
55 static int fjes_setup_resources(struct fjes_adapter
*);
56 static void fjes_free_resources(struct fjes_adapter
*);
57 static netdev_tx_t
fjes_xmit_frame(struct sk_buff
*, struct net_device
*);
58 static void fjes_raise_intr_rxdata_task(struct work_struct
*);
59 static void fjes_tx_stall_task(struct work_struct
*);
60 static void fjes_force_close_task(struct work_struct
*);
61 static irqreturn_t
fjes_intr(int, void*);
62 static void fjes_get_stats64(struct net_device
*, struct rtnl_link_stats64
*);
63 static int fjes_change_mtu(struct net_device
*, int);
64 static int fjes_vlan_rx_add_vid(struct net_device
*, __be16 proto
, u16
);
65 static int fjes_vlan_rx_kill_vid(struct net_device
*, __be16 proto
, u16
);
66 static void fjes_tx_retry(struct net_device
*);
68 static int fjes_acpi_add(struct acpi_device
*);
69 static int fjes_acpi_remove(struct acpi_device
*);
70 static acpi_status
fjes_get_acpi_resource(struct acpi_resource
*, void*);
72 static int fjes_probe(struct platform_device
*);
73 static int fjes_remove(struct platform_device
*);
75 static int fjes_sw_init(struct fjes_adapter
*);
76 static void fjes_netdev_setup(struct net_device
*);
77 static void fjes_irq_watch_task(struct work_struct
*);
78 static void fjes_watch_unshare_task(struct work_struct
*);
79 static void fjes_rx_irq(struct fjes_adapter
*, int);
80 static int fjes_poll(struct napi_struct
*, int);
82 static const struct acpi_device_id fjes_acpi_ids
[] = {
83 {ACPI_MOTHERBOARD_RESOURCE_HID
, 0},
86 MODULE_DEVICE_TABLE(acpi
, fjes_acpi_ids
);
88 static struct acpi_driver fjes_acpi_driver
= {
95 .remove
= fjes_acpi_remove
,
99 static struct platform_driver fjes_driver
= {
104 .remove
= fjes_remove
,
107 static struct resource fjes_resource
[] = {
109 .flags
= IORESOURCE_MEM
,
114 .flags
= IORESOURCE_IRQ
,
120 static bool is_extended_socket_device(struct acpi_device
*device
)
122 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
123 char str_buf
[sizeof(FJES_ACPI_SYMBOL
) + 1];
124 union acpi_object
*str
;
128 status
= acpi_evaluate_object(device
->handle
, "_STR", NULL
, &buffer
);
129 if (ACPI_FAILURE(status
))
132 str
= buffer
.pointer
;
133 result
= utf16s_to_utf8s((wchar_t *)str
->string
.pointer
,
134 str
->string
.length
, UTF16_LITTLE_ENDIAN
,
135 str_buf
, sizeof(str_buf
) - 1);
138 if (strncmp(FJES_ACPI_SYMBOL
, str_buf
, strlen(FJES_ACPI_SYMBOL
)) != 0) {
139 kfree(buffer
.pointer
);
142 kfree(buffer
.pointer
);
147 static int acpi_check_extended_socket_status(struct acpi_device
*device
)
149 unsigned long long sta
;
152 status
= acpi_evaluate_integer(device
->handle
, "_STA", NULL
, &sta
);
153 if (ACPI_FAILURE(status
))
156 if (!((sta
& ACPI_STA_DEVICE_PRESENT
) &&
157 (sta
& ACPI_STA_DEVICE_ENABLED
) &&
158 (sta
& ACPI_STA_DEVICE_UI
) &&
159 (sta
& ACPI_STA_DEVICE_FUNCTIONING
)))
165 static int fjes_acpi_add(struct acpi_device
*device
)
167 struct platform_device
*plat_dev
;
170 if (!is_extended_socket_device(device
))
173 if (acpi_check_extended_socket_status(device
))
176 status
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
177 fjes_get_acpi_resource
, fjes_resource
);
178 if (ACPI_FAILURE(status
))
181 /* create platform_device */
182 plat_dev
= platform_device_register_simple(DRV_NAME
, 0, fjes_resource
,
183 ARRAY_SIZE(fjes_resource
));
184 device
->driver_data
= plat_dev
;
189 static int fjes_acpi_remove(struct acpi_device
*device
)
191 struct platform_device
*plat_dev
;
193 plat_dev
= (struct platform_device
*)acpi_driver_data(device
);
194 platform_device_unregister(plat_dev
);
200 fjes_get_acpi_resource(struct acpi_resource
*acpi_res
, void *data
)
202 struct acpi_resource_address32
*addr
;
203 struct acpi_resource_irq
*irq
;
204 struct resource
*res
= data
;
206 switch (acpi_res
->type
) {
207 case ACPI_RESOURCE_TYPE_ADDRESS32
:
208 addr
= &acpi_res
->data
.address32
;
209 res
[0].start
= addr
->address
.minimum
;
210 res
[0].end
= addr
->address
.minimum
+
211 addr
->address
.address_length
- 1;
214 case ACPI_RESOURCE_TYPE_IRQ
:
215 irq
= &acpi_res
->data
.irq
;
216 if (irq
->interrupt_count
!= 1)
218 res
[1].start
= irq
->interrupts
[0];
219 res
[1].end
= irq
->interrupts
[0];
229 static int fjes_request_irq(struct fjes_adapter
*adapter
)
231 struct net_device
*netdev
= adapter
->netdev
;
234 adapter
->interrupt_watch_enable
= true;
235 if (!delayed_work_pending(&adapter
->interrupt_watch_task
)) {
236 queue_delayed_work(adapter
->control_wq
,
237 &adapter
->interrupt_watch_task
,
238 FJES_IRQ_WATCH_DELAY
);
241 if (!adapter
->irq_registered
) {
242 result
= request_irq(adapter
->hw
.hw_res
.irq
, fjes_intr
,
243 IRQF_SHARED
, netdev
->name
, adapter
);
245 adapter
->irq_registered
= false;
247 adapter
->irq_registered
= true;
253 static void fjes_free_irq(struct fjes_adapter
*adapter
)
255 struct fjes_hw
*hw
= &adapter
->hw
;
257 adapter
->interrupt_watch_enable
= false;
258 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
260 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
262 if (adapter
->irq_registered
) {
263 free_irq(adapter
->hw
.hw_res
.irq
, adapter
);
264 adapter
->irq_registered
= false;
268 static const struct net_device_ops fjes_netdev_ops
= {
269 .ndo_open
= fjes_open
,
270 .ndo_stop
= fjes_close
,
271 .ndo_start_xmit
= fjes_xmit_frame
,
272 .ndo_get_stats64
= fjes_get_stats64
,
273 .ndo_change_mtu
= fjes_change_mtu
,
274 .ndo_tx_timeout
= fjes_tx_retry
,
275 .ndo_vlan_rx_add_vid
= fjes_vlan_rx_add_vid
,
276 .ndo_vlan_rx_kill_vid
= fjes_vlan_rx_kill_vid
,
279 /* fjes_open - Called when a network interface is made active */
280 static int fjes_open(struct net_device
*netdev
)
282 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
283 struct fjes_hw
*hw
= &adapter
->hw
;
286 if (adapter
->open_guard
)
289 result
= fjes_setup_resources(adapter
);
293 hw
->txrx_stop_req_bit
= 0;
294 hw
->epstop_req_bit
= 0;
296 napi_enable(&adapter
->napi
);
298 fjes_hw_capture_interrupt_status(hw
);
300 result
= fjes_request_irq(adapter
);
304 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, false);
306 netif_tx_start_all_queues(netdev
);
307 netif_carrier_on(netdev
);
312 fjes_free_irq(adapter
);
313 napi_disable(&adapter
->napi
);
316 fjes_free_resources(adapter
);
320 /* fjes_close - Disables a network interface */
321 static int fjes_close(struct net_device
*netdev
)
323 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
324 struct fjes_hw
*hw
= &adapter
->hw
;
328 netif_tx_stop_all_queues(netdev
);
329 netif_carrier_off(netdev
);
331 fjes_hw_raise_epstop(hw
);
333 napi_disable(&adapter
->napi
);
335 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
336 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
337 if (epidx
== hw
->my_epid
)
340 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
342 adapter
->hw
.ep_shm_info
[epidx
]
343 .tx
.info
->v1i
.rx_status
&=
346 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
348 fjes_free_irq(adapter
);
350 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
351 cancel_work_sync(&adapter
->unshare_watch_task
);
352 adapter
->unshare_watch_bitmask
= 0;
353 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
354 cancel_work_sync(&adapter
->tx_stall_task
);
356 cancel_work_sync(&hw
->update_zone_task
);
357 cancel_work_sync(&hw
->epstop_task
);
359 fjes_hw_wait_epstop(hw
);
361 fjes_free_resources(adapter
);
366 static int fjes_setup_resources(struct fjes_adapter
*adapter
)
368 struct net_device
*netdev
= adapter
->netdev
;
369 struct ep_share_mem_info
*buf_pair
;
370 struct fjes_hw
*hw
= &adapter
->hw
;
375 mutex_lock(&hw
->hw_info
.lock
);
376 result
= fjes_hw_request_info(hw
);
379 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
380 hw
->ep_shm_info
[epidx
].es_status
=
381 hw
->hw_info
.res_buf
->info
.info
[epidx
].es_status
;
382 hw
->ep_shm_info
[epidx
].zone
=
383 hw
->hw_info
.res_buf
->info
.info
[epidx
].zone
;
389 adapter
->force_reset
= true;
391 mutex_unlock(&hw
->hw_info
.lock
);
394 mutex_unlock(&hw
->hw_info
.lock
);
396 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
397 if ((epidx
!= hw
->my_epid
) &&
398 (hw
->ep_shm_info
[epidx
].es_status
==
399 FJES_ZONING_STATUS_ENABLE
)) {
400 fjes_hw_raise_interrupt(hw
, epidx
,
401 REG_ICTL_MASK_INFO_UPDATE
);
402 hw
->ep_shm_info
[epidx
].ep_stats
403 .send_intr_zoneupdate
+= 1;
407 msleep(FJES_OPEN_ZONE_UPDATE_WAIT
* hw
->max_epid
);
409 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
410 if (epidx
== hw
->my_epid
)
413 buf_pair
= &hw
->ep_shm_info
[epidx
];
415 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
416 fjes_hw_setup_epbuf(&buf_pair
->tx
, netdev
->dev_addr
,
418 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
420 if (fjes_hw_epid_is_same_zone(hw
, epidx
)) {
421 mutex_lock(&hw
->hw_info
.lock
);
423 fjes_hw_register_buff_addr(hw
, epidx
, buf_pair
);
424 mutex_unlock(&hw
->hw_info
.lock
);
432 adapter
->force_reset
= true;
436 hw
->ep_shm_info
[epidx
].ep_stats
437 .com_regist_buf_exec
+= 1;
444 static void fjes_free_resources(struct fjes_adapter
*adapter
)
446 struct net_device
*netdev
= adapter
->netdev
;
447 struct fjes_device_command_param param
;
448 struct ep_share_mem_info
*buf_pair
;
449 struct fjes_hw
*hw
= &adapter
->hw
;
450 bool reset_flag
= false;
455 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
456 if (epidx
== hw
->my_epid
)
459 mutex_lock(&hw
->hw_info
.lock
);
460 result
= fjes_hw_unregister_buff_addr(hw
, epidx
);
461 mutex_unlock(&hw
->hw_info
.lock
);
463 hw
->ep_shm_info
[epidx
].ep_stats
.com_unregist_buf_exec
+= 1;
468 buf_pair
= &hw
->ep_shm_info
[epidx
];
470 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
471 fjes_hw_setup_epbuf(&buf_pair
->tx
,
472 netdev
->dev_addr
, netdev
->mtu
);
473 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
475 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
478 if (reset_flag
|| adapter
->force_reset
) {
479 result
= fjes_hw_reset(hw
);
481 adapter
->force_reset
= false;
484 adapter
->open_guard
= true;
486 hw
->hw_info
.buffer_share_bit
= 0;
488 memset((void *)¶m
, 0, sizeof(param
));
490 param
.req_len
= hw
->hw_info
.req_buf_size
;
491 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
492 param
.res_len
= hw
->hw_info
.res_buf_size
;
493 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
494 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
496 fjes_hw_init_command_registers(hw
, ¶m
);
500 static void fjes_tx_stall_task(struct work_struct
*work
)
502 struct fjes_adapter
*adapter
= container_of(work
,
503 struct fjes_adapter
, tx_stall_task
);
504 struct net_device
*netdev
= adapter
->netdev
;
505 struct fjes_hw
*hw
= &adapter
->hw
;
506 int all_queue_available
, sendable
;
507 enum ep_partner_status pstatus
;
508 int max_epid
, my_epid
, epid
;
509 union ep_buffer_info
*info
;
513 dev_trans_start(netdev
)) > FJES_TX_TX_STALL_TIMEOUT
) {
514 netif_wake_queue(netdev
);
518 my_epid
= hw
->my_epid
;
519 max_epid
= hw
->max_epid
;
521 for (i
= 0; i
< 5; i
++) {
522 all_queue_available
= 1;
524 for (epid
= 0; epid
< max_epid
; epid
++) {
528 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
529 sendable
= (pstatus
== EP_PARTNER_SHARED
);
533 info
= adapter
->hw
.ep_shm_info
[epid
].tx
.info
;
535 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
538 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
,
539 info
->v1i
.count_max
)) {
540 all_queue_available
= 0;
545 if (all_queue_available
) {
546 netif_wake_queue(netdev
);
551 usleep_range(50, 100);
553 queue_work(adapter
->txrx_wq
, &adapter
->tx_stall_task
);
556 static void fjes_force_close_task(struct work_struct
*work
)
558 struct fjes_adapter
*adapter
= container_of(work
,
559 struct fjes_adapter
, force_close_task
);
560 struct net_device
*netdev
= adapter
->netdev
;
567 static void fjes_raise_intr_rxdata_task(struct work_struct
*work
)
569 struct fjes_adapter
*adapter
= container_of(work
,
570 struct fjes_adapter
, raise_intr_rxdata_task
);
571 struct fjes_hw
*hw
= &adapter
->hw
;
572 enum ep_partner_status pstatus
;
573 int max_epid
, my_epid
, epid
;
575 my_epid
= hw
->my_epid
;
576 max_epid
= hw
->max_epid
;
578 for (epid
= 0; epid
< max_epid
; epid
++)
579 hw
->ep_shm_info
[epid
].tx_status_work
= 0;
581 for (epid
= 0; epid
< max_epid
; epid
++) {
585 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
586 if (pstatus
== EP_PARTNER_SHARED
) {
587 hw
->ep_shm_info
[epid
].tx_status_work
=
588 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
;
590 if (hw
->ep_shm_info
[epid
].tx_status_work
==
591 FJES_TX_DELAY_SEND_PENDING
) {
592 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
=
593 FJES_TX_DELAY_SEND_NONE
;
598 for (epid
= 0; epid
< max_epid
; epid
++) {
602 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
603 if ((hw
->ep_shm_info
[epid
].tx_status_work
==
604 FJES_TX_DELAY_SEND_PENDING
) &&
605 (pstatus
== EP_PARTNER_SHARED
) &&
606 !(hw
->ep_shm_info
[epid
].rx
.info
->v1i
.rx_status
&
607 FJES_RX_POLL_WORK
)) {
608 fjes_hw_raise_interrupt(hw
, epid
,
609 REG_ICTL_MASK_RX_DATA
);
610 hw
->ep_shm_info
[epid
].ep_stats
.send_intr_rx
+= 1;
614 usleep_range(500, 1000);
617 static int fjes_tx_send(struct fjes_adapter
*adapter
, int dest
,
618 void *data
, size_t len
)
622 retval
= fjes_hw_epbuf_tx_pkt_send(&adapter
->hw
.ep_shm_info
[dest
].tx
,
627 adapter
->hw
.ep_shm_info
[dest
].tx
.info
->v1i
.tx_status
=
628 FJES_TX_DELAY_SEND_PENDING
;
629 if (!work_pending(&adapter
->raise_intr_rxdata_task
))
630 queue_work(adapter
->txrx_wq
,
631 &adapter
->raise_intr_rxdata_task
);
638 fjes_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
640 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
641 struct fjes_hw
*hw
= &adapter
->hw
;
643 int max_epid
, my_epid
, dest_epid
;
644 enum ep_partner_status pstatus
;
645 struct netdev_queue
*cur_queue
;
646 char shortpkt
[VLAN_ETH_HLEN
];
657 cur_queue
= netdev_get_tx_queue(netdev
, queue_no
);
659 eth
= (struct ethhdr
*)skb
->data
;
660 my_epid
= hw
->my_epid
;
662 vlan
= (vlan_get_tag(skb
, &vlan_id
) == 0) ? true : false;
667 if (is_multicast_ether_addr(eth
->h_dest
)) {
669 max_epid
= hw
->max_epid
;
671 } else if (is_local_ether_addr(eth
->h_dest
)) {
672 dest_epid
= eth
->h_dest
[ETH_ALEN
- 1];
673 max_epid
= dest_epid
+ 1;
675 if ((eth
->h_dest
[0] == 0x02) &&
676 (0x00 == (eth
->h_dest
[1] | eth
->h_dest
[2] |
677 eth
->h_dest
[3] | eth
->h_dest
[4])) &&
678 (dest_epid
< hw
->max_epid
)) {
685 adapter
->stats64
.tx_packets
+= 1;
686 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
687 adapter
->stats64
.tx_bytes
+= len
;
688 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
695 adapter
->stats64
.tx_packets
+= 1;
696 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
697 adapter
->stats64
.tx_bytes
+= len
;
698 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
701 for (; dest_epid
< max_epid
; dest_epid
++) {
702 if (my_epid
== dest_epid
)
705 pstatus
= fjes_hw_get_partner_ep_status(hw
, dest_epid
);
706 if (pstatus
!= EP_PARTNER_SHARED
) {
708 hw
->ep_shm_info
[dest_epid
].ep_stats
709 .tx_dropped_not_shared
+= 1;
711 } else if (!fjes_hw_check_epbuf_version(
712 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
, 0)) {
713 /* version is NOT 0 */
714 adapter
->stats64
.tx_carrier_errors
+= 1;
715 hw
->ep_shm_info
[dest_epid
].net_stats
716 .tx_carrier_errors
+= 1;
717 hw
->ep_shm_info
[dest_epid
].ep_stats
718 .tx_dropped_ver_mismatch
+= 1;
721 } else if (!fjes_hw_check_mtu(
722 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
724 adapter
->stats64
.tx_dropped
+= 1;
725 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_dropped
+= 1;
726 adapter
->stats64
.tx_errors
+= 1;
727 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_errors
+= 1;
728 hw
->ep_shm_info
[dest_epid
].ep_stats
729 .tx_dropped_buf_size_mismatch
+= 1;
733 !fjes_hw_check_vlan_id(
734 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
736 hw
->ep_shm_info
[dest_epid
].ep_stats
737 .tx_dropped_vlanid_mismatch
+= 1;
740 if (len
< VLAN_ETH_HLEN
) {
741 memset(shortpkt
, 0, VLAN_ETH_HLEN
);
742 memcpy(shortpkt
, skb
->data
, skb
->len
);
747 if (adapter
->tx_retry_count
== 0) {
748 adapter
->tx_start_jiffies
= jiffies
;
749 adapter
->tx_retry_count
= 1;
751 adapter
->tx_retry_count
++;
754 if (fjes_tx_send(adapter
, dest_epid
, data
, len
)) {
759 (long)adapter
->tx_start_jiffies
) >=
760 FJES_TX_RETRY_TIMEOUT
) {
761 adapter
->stats64
.tx_fifo_errors
+= 1;
762 hw
->ep_shm_info
[dest_epid
].net_stats
763 .tx_fifo_errors
+= 1;
764 adapter
->stats64
.tx_errors
+= 1;
765 hw
->ep_shm_info
[dest_epid
].net_stats
770 netif_trans_update(netdev
);
771 hw
->ep_shm_info
[dest_epid
].ep_stats
772 .tx_buffer_full
+= 1;
773 netif_tx_stop_queue(cur_queue
);
775 if (!work_pending(&adapter
->tx_stall_task
))
776 queue_work(adapter
->txrx_wq
,
777 &adapter
->tx_stall_task
);
779 ret
= NETDEV_TX_BUSY
;
783 adapter
->stats64
.tx_packets
+= 1;
784 hw
->ep_shm_info
[dest_epid
].net_stats
786 adapter
->stats64
.tx_bytes
+= len
;
787 hw
->ep_shm_info
[dest_epid
].net_stats
791 adapter
->tx_retry_count
= 0;
797 if (ret
== NETDEV_TX_OK
) {
800 adapter
->stats64
.tx_packets
+= 1;
801 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
802 adapter
->stats64
.tx_bytes
+= 1;
803 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
810 static void fjes_tx_retry(struct net_device
*netdev
)
812 struct netdev_queue
*queue
= netdev_get_tx_queue(netdev
, 0);
814 netif_tx_wake_queue(queue
);
818 fjes_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
820 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
822 memcpy(stats
, &adapter
->stats64
, sizeof(struct rtnl_link_stats64
));
825 static int fjes_change_mtu(struct net_device
*netdev
, int new_mtu
)
827 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
828 bool running
= netif_running(netdev
);
829 struct fjes_hw
*hw
= &adapter
->hw
;
834 for (idx
= 0; fjes_support_mtu
[idx
] != 0; idx
++) {
835 if (new_mtu
<= fjes_support_mtu
[idx
]) {
836 new_mtu
= fjes_support_mtu
[idx
];
837 if (new_mtu
== netdev
->mtu
)
849 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
850 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
851 if (epidx
== hw
->my_epid
)
853 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
854 ~FJES_RX_MTU_CHANGING_DONE
;
856 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
858 netif_tx_stop_all_queues(netdev
);
859 netif_carrier_off(netdev
);
860 cancel_work_sync(&adapter
->tx_stall_task
);
861 napi_disable(&adapter
->napi
);
865 netif_tx_stop_all_queues(netdev
);
868 netdev
->mtu
= new_mtu
;
871 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
872 if (epidx
== hw
->my_epid
)
875 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
876 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
880 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
881 FJES_RX_MTU_CHANGING_DONE
;
882 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
885 netif_tx_wake_all_queues(netdev
);
886 netif_carrier_on(netdev
);
887 napi_enable(&adapter
->napi
);
888 napi_schedule(&adapter
->napi
);
894 static int fjes_vlan_rx_add_vid(struct net_device
*netdev
,
895 __be16 proto
, u16 vid
)
897 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
901 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
902 if (epid
== adapter
->hw
.my_epid
)
905 if (!fjes_hw_check_vlan_id(
906 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
))
907 ret
= fjes_hw_set_vlan_id(
908 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
911 return ret
? 0 : -ENOSPC
;
914 static int fjes_vlan_rx_kill_vid(struct net_device
*netdev
,
915 __be16 proto
, u16 vid
)
917 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
920 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
921 if (epid
== adapter
->hw
.my_epid
)
924 fjes_hw_del_vlan_id(&adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
930 static void fjes_txrx_stop_req_irq(struct fjes_adapter
*adapter
,
933 struct fjes_hw
*hw
= &adapter
->hw
;
934 enum ep_partner_status status
;
937 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
938 trace_fjes_txrx_stop_req_irq_pre(hw
, src_epid
, status
);
940 case EP_PARTNER_UNSHARE
:
941 case EP_PARTNER_COMPLETE
:
944 case EP_PARTNER_WAITING
:
945 if (src_epid
< hw
->my_epid
) {
946 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
947 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
948 FJES_RX_STOP_REQ_DONE
;
949 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
951 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
952 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
954 if (!work_pending(&adapter
->unshare_watch_task
))
955 queue_work(adapter
->control_wq
,
956 &adapter
->unshare_watch_task
);
959 case EP_PARTNER_SHARED
:
960 if (hw
->ep_shm_info
[src_epid
].rx
.info
->v1i
.rx_status
&
961 FJES_RX_STOP_REQ_REQUEST
) {
962 set_bit(src_epid
, &hw
->epstop_req_bit
);
963 if (!work_pending(&hw
->epstop_task
))
964 queue_work(adapter
->control_wq
,
969 trace_fjes_txrx_stop_req_irq_post(hw
, src_epid
);
972 static void fjes_stop_req_irq(struct fjes_adapter
*adapter
, int src_epid
)
974 struct fjes_hw
*hw
= &adapter
->hw
;
975 enum ep_partner_status status
;
978 set_bit(src_epid
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
980 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
981 trace_fjes_stop_req_irq_pre(hw
, src_epid
, status
);
983 case EP_PARTNER_WAITING
:
984 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
985 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
986 FJES_RX_STOP_REQ_DONE
;
987 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
988 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
990 case EP_PARTNER_UNSHARE
:
991 case EP_PARTNER_COMPLETE
:
993 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
994 if (!work_pending(&adapter
->unshare_watch_task
))
995 queue_work(adapter
->control_wq
,
996 &adapter
->unshare_watch_task
);
998 case EP_PARTNER_SHARED
:
999 set_bit(src_epid
, &hw
->epstop_req_bit
);
1001 if (!work_pending(&hw
->epstop_task
))
1002 queue_work(adapter
->control_wq
, &hw
->epstop_task
);
1005 trace_fjes_stop_req_irq_post(hw
, src_epid
);
1008 static void fjes_update_zone_irq(struct fjes_adapter
*adapter
,
1011 struct fjes_hw
*hw
= &adapter
->hw
;
1013 if (!work_pending(&hw
->update_zone_task
))
1014 queue_work(adapter
->control_wq
, &hw
->update_zone_task
);
1017 static irqreturn_t
fjes_intr(int irq
, void *data
)
1019 struct fjes_adapter
*adapter
= data
;
1020 struct fjes_hw
*hw
= &adapter
->hw
;
1024 icr
= fjes_hw_capture_interrupt_status(hw
);
1026 if (icr
& REG_IS_MASK_IS_ASSERT
) {
1027 if (icr
& REG_ICTL_MASK_RX_DATA
) {
1028 fjes_rx_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1029 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1033 if (icr
& REG_ICTL_MASK_DEV_STOP_REQ
) {
1034 fjes_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1035 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1036 .recv_intr_stop
+= 1;
1039 if (icr
& REG_ICTL_MASK_TXRX_STOP_REQ
) {
1040 fjes_txrx_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1041 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1042 .recv_intr_unshare
+= 1;
1045 if (icr
& REG_ICTL_MASK_TXRX_STOP_DONE
)
1046 fjes_hw_set_irqmask(hw
,
1047 REG_ICTL_MASK_TXRX_STOP_DONE
, true);
1049 if (icr
& REG_ICTL_MASK_INFO_UPDATE
) {
1050 fjes_update_zone_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1051 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1052 .recv_intr_zoneupdate
+= 1;
1063 static int fjes_rxframe_search_exist(struct fjes_adapter
*adapter
,
1066 struct fjes_hw
*hw
= &adapter
->hw
;
1067 enum ep_partner_status pstatus
;
1068 int max_epid
, cur_epid
;
1071 max_epid
= hw
->max_epid
;
1072 start_epid
= (start_epid
+ 1 + max_epid
) % max_epid
;
1074 for (i
= 0; i
< max_epid
; i
++) {
1075 cur_epid
= (start_epid
+ i
) % max_epid
;
1076 if (cur_epid
== hw
->my_epid
)
1079 pstatus
= fjes_hw_get_partner_ep_status(hw
, cur_epid
);
1080 if (pstatus
== EP_PARTNER_SHARED
) {
1081 if (!fjes_hw_epbuf_rx_is_empty(
1082 &hw
->ep_shm_info
[cur_epid
].rx
))
1089 static void *fjes_rxframe_get(struct fjes_adapter
*adapter
, size_t *psize
,
1094 *cur_epid
= fjes_rxframe_search_exist(adapter
, *cur_epid
);
1099 fjes_hw_epbuf_rx_curpkt_get_addr(
1100 &adapter
->hw
.ep_shm_info
[*cur_epid
].rx
, psize
);
1105 static void fjes_rxframe_release(struct fjes_adapter
*adapter
, int cur_epid
)
1107 fjes_hw_epbuf_rx_curpkt_drop(&adapter
->hw
.ep_shm_info
[cur_epid
].rx
);
1110 static void fjes_rx_irq(struct fjes_adapter
*adapter
, int src_epid
)
1112 struct fjes_hw
*hw
= &adapter
->hw
;
1114 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, true);
1116 adapter
->unset_rx_last
= true;
1117 napi_schedule(&adapter
->napi
);
1120 static int fjes_poll(struct napi_struct
*napi
, int budget
)
1122 struct fjes_adapter
*adapter
=
1123 container_of(napi
, struct fjes_adapter
, napi
);
1124 struct net_device
*netdev
= napi
->dev
;
1125 struct fjes_hw
*hw
= &adapter
->hw
;
1126 struct sk_buff
*skb
;
1133 spin_lock(&hw
->rx_status_lock
);
1134 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1135 if (epidx
== hw
->my_epid
)
1138 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1140 adapter
->hw
.ep_shm_info
[epidx
]
1141 .tx
.info
->v1i
.rx_status
|= FJES_RX_POLL_WORK
;
1143 spin_unlock(&hw
->rx_status_lock
);
1145 while (work_done
< budget
) {
1146 prefetch(&adapter
->hw
);
1147 frame
= fjes_rxframe_get(adapter
, &frame_len
, &cur_epid
);
1150 skb
= napi_alloc_skb(napi
, frame_len
);
1152 adapter
->stats64
.rx_dropped
+= 1;
1153 hw
->ep_shm_info
[cur_epid
].net_stats
1155 adapter
->stats64
.rx_errors
+= 1;
1156 hw
->ep_shm_info
[cur_epid
].net_stats
1159 skb_put_data(skb
, frame
, frame_len
);
1160 skb
->protocol
= eth_type_trans(skb
, netdev
);
1161 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1163 netif_receive_skb(skb
);
1167 adapter
->stats64
.rx_packets
+= 1;
1168 hw
->ep_shm_info
[cur_epid
].net_stats
1170 adapter
->stats64
.rx_bytes
+= frame_len
;
1171 hw
->ep_shm_info
[cur_epid
].net_stats
1172 .rx_bytes
+= frame_len
;
1174 if (is_multicast_ether_addr(
1175 ((struct ethhdr
*)frame
)->h_dest
)) {
1176 adapter
->stats64
.multicast
+= 1;
1177 hw
->ep_shm_info
[cur_epid
].net_stats
1182 fjes_rxframe_release(adapter
, cur_epid
);
1183 adapter
->unset_rx_last
= true;
1189 if (work_done
< budget
) {
1190 napi_complete_done(napi
, work_done
);
1192 if (adapter
->unset_rx_last
) {
1193 adapter
->rx_last_jiffies
= jiffies
;
1194 adapter
->unset_rx_last
= false;
1197 if (((long)jiffies
- (long)adapter
->rx_last_jiffies
) < 3) {
1198 napi_reschedule(napi
);
1200 spin_lock(&hw
->rx_status_lock
);
1201 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1202 if (epidx
== hw
->my_epid
)
1204 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1206 adapter
->hw
.ep_shm_info
[epidx
].tx
1207 .info
->v1i
.rx_status
&=
1210 spin_unlock(&hw
->rx_status_lock
);
1212 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, false);
1219 /* fjes_probe - Device Initialization Routine */
1220 static int fjes_probe(struct platform_device
*plat_dev
)
1222 struct fjes_adapter
*adapter
;
1223 struct net_device
*netdev
;
1224 struct resource
*res
;
1229 netdev
= alloc_netdev_mq(sizeof(struct fjes_adapter
), "es%d",
1230 NET_NAME_UNKNOWN
, fjes_netdev_setup
,
1236 SET_NETDEV_DEV(netdev
, &plat_dev
->dev
);
1238 dev_set_drvdata(&plat_dev
->dev
, netdev
);
1239 adapter
= netdev_priv(netdev
);
1240 adapter
->netdev
= netdev
;
1241 adapter
->plat_dev
= plat_dev
;
1245 /* setup the private structure */
1246 err
= fjes_sw_init(adapter
);
1248 goto err_free_netdev
;
1250 INIT_WORK(&adapter
->force_close_task
, fjes_force_close_task
);
1251 adapter
->force_reset
= false;
1252 adapter
->open_guard
= false;
1254 adapter
->txrx_wq
= alloc_workqueue(DRV_NAME
"/txrx", WQ_MEM_RECLAIM
, 0);
1255 adapter
->control_wq
= alloc_workqueue(DRV_NAME
"/control",
1258 INIT_WORK(&adapter
->tx_stall_task
, fjes_tx_stall_task
);
1259 INIT_WORK(&adapter
->raise_intr_rxdata_task
,
1260 fjes_raise_intr_rxdata_task
);
1261 INIT_WORK(&adapter
->unshare_watch_task
, fjes_watch_unshare_task
);
1262 adapter
->unshare_watch_bitmask
= 0;
1264 INIT_DELAYED_WORK(&adapter
->interrupt_watch_task
, fjes_irq_watch_task
);
1265 adapter
->interrupt_watch_enable
= false;
1267 res
= platform_get_resource(plat_dev
, IORESOURCE_MEM
, 0);
1268 hw
->hw_res
.start
= res
->start
;
1269 hw
->hw_res
.size
= resource_size(res
);
1270 hw
->hw_res
.irq
= platform_get_irq(plat_dev
, 0);
1271 err
= fjes_hw_init(&adapter
->hw
);
1273 goto err_free_netdev
;
1275 /* setup MAC address (02:00:00:00:00:[epid])*/
1276 netdev
->dev_addr
[0] = 2;
1277 netdev
->dev_addr
[1] = 0;
1278 netdev
->dev_addr
[2] = 0;
1279 netdev
->dev_addr
[3] = 0;
1280 netdev
->dev_addr
[4] = 0;
1281 netdev
->dev_addr
[5] = hw
->my_epid
; /* EPID */
1283 err
= register_netdev(netdev
);
1287 netif_carrier_off(netdev
);
1289 fjes_dbg_adapter_init(adapter
);
1294 fjes_hw_exit(&adapter
->hw
);
1296 free_netdev(netdev
);
1301 /* fjes_remove - Device Removal Routine */
1302 static int fjes_remove(struct platform_device
*plat_dev
)
1304 struct net_device
*netdev
= dev_get_drvdata(&plat_dev
->dev
);
1305 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
1306 struct fjes_hw
*hw
= &adapter
->hw
;
1308 fjes_dbg_adapter_exit(adapter
);
1310 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
1311 cancel_work_sync(&adapter
->unshare_watch_task
);
1312 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
1313 cancel_work_sync(&adapter
->tx_stall_task
);
1314 if (adapter
->control_wq
)
1315 destroy_workqueue(adapter
->control_wq
);
1316 if (adapter
->txrx_wq
)
1317 destroy_workqueue(adapter
->txrx_wq
);
1319 unregister_netdev(netdev
);
1323 netif_napi_del(&adapter
->napi
);
1325 free_netdev(netdev
);
1330 static int fjes_sw_init(struct fjes_adapter
*adapter
)
1332 struct net_device
*netdev
= adapter
->netdev
;
1334 netif_napi_add(netdev
, &adapter
->napi
, fjes_poll
, 64);
1339 /* fjes_netdev_setup - netdevice initialization routine */
1340 static void fjes_netdev_setup(struct net_device
*netdev
)
1342 ether_setup(netdev
);
1344 netdev
->watchdog_timeo
= FJES_TX_RETRY_INTERVAL
;
1345 netdev
->netdev_ops
= &fjes_netdev_ops
;
1346 fjes_set_ethtool_ops(netdev
);
1347 netdev
->mtu
= fjes_support_mtu
[3];
1348 netdev
->min_mtu
= fjes_support_mtu
[0];
1349 netdev
->max_mtu
= fjes_support_mtu
[3];
1350 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1353 static void fjes_irq_watch_task(struct work_struct
*work
)
1355 struct fjes_adapter
*adapter
= container_of(to_delayed_work(work
),
1356 struct fjes_adapter
, interrupt_watch_task
);
1358 local_irq_disable();
1359 fjes_intr(adapter
->hw
.hw_res
.irq
, adapter
);
1362 if (fjes_rxframe_search_exist(adapter
, 0) >= 0)
1363 napi_schedule(&adapter
->napi
);
1365 if (adapter
->interrupt_watch_enable
) {
1366 if (!delayed_work_pending(&adapter
->interrupt_watch_task
))
1367 queue_delayed_work(adapter
->control_wq
,
1368 &adapter
->interrupt_watch_task
,
1369 FJES_IRQ_WATCH_DELAY
);
1373 static void fjes_watch_unshare_task(struct work_struct
*work
)
1375 struct fjes_adapter
*adapter
=
1376 container_of(work
, struct fjes_adapter
, unshare_watch_task
);
1378 struct net_device
*netdev
= adapter
->netdev
;
1379 struct fjes_hw
*hw
= &adapter
->hw
;
1381 int unshare_watch
, unshare_reserve
;
1382 int max_epid
, my_epid
, epidx
;
1383 int stop_req
, stop_req_done
;
1384 ulong unshare_watch_bitmask
;
1385 unsigned long flags
;
1390 my_epid
= hw
->my_epid
;
1391 max_epid
= hw
->max_epid
;
1393 unshare_watch_bitmask
= adapter
->unshare_watch_bitmask
;
1394 adapter
->unshare_watch_bitmask
= 0;
1396 while ((unshare_watch_bitmask
|| hw
->txrx_stop_req_bit
) &&
1397 (wait_time
< 3000)) {
1398 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1399 if (epidx
== hw
->my_epid
)
1402 is_shared
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
1405 stop_req
= test_bit(epidx
, &hw
->txrx_stop_req_bit
);
1407 stop_req_done
= hw
->ep_shm_info
[epidx
].rx
.info
->v1i
.rx_status
&
1408 FJES_RX_STOP_REQ_DONE
;
1410 unshare_watch
= test_bit(epidx
, &unshare_watch_bitmask
);
1412 unshare_reserve
= test_bit(epidx
,
1413 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1416 (is_shared
&& (!is_shared
|| !stop_req_done
))) &&
1417 (is_shared
|| !unshare_watch
|| !unshare_reserve
))
1420 mutex_lock(&hw
->hw_info
.lock
);
1421 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1429 &adapter
->force_close_task
)) {
1430 adapter
->force_reset
= true;
1432 &adapter
->force_close_task
);
1436 mutex_unlock(&hw
->hw_info
.lock
);
1437 hw
->ep_shm_info
[epidx
].ep_stats
1438 .com_unregist_buf_exec
+= 1;
1440 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1441 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1442 netdev
->dev_addr
, netdev
->mtu
);
1443 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1445 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1446 clear_bit(epidx
, &unshare_watch_bitmask
);
1448 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1455 if (hw
->hw_info
.buffer_unshare_reserve_bit
) {
1456 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1457 if (epidx
== hw
->my_epid
)
1461 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
1462 mutex_lock(&hw
->hw_info
.lock
);
1464 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1472 &adapter
->force_close_task
)) {
1473 adapter
->force_reset
= true;
1475 &adapter
->force_close_task
);
1479 mutex_unlock(&hw
->hw_info
.lock
);
1481 hw
->ep_shm_info
[epidx
].ep_stats
1482 .com_unregist_buf_exec
+= 1;
1484 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1485 fjes_hw_setup_epbuf(
1486 &hw
->ep_shm_info
[epidx
].tx
,
1487 netdev
->dev_addr
, netdev
->mtu
);
1488 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1491 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1492 clear_bit(epidx
, &unshare_watch_bitmask
);
1493 clear_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1496 if (test_bit(epidx
, &unshare_watch_bitmask
)) {
1497 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1498 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
1499 ~FJES_RX_STOP_REQ_DONE
;
1500 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1508 acpi_find_extended_socket_device(acpi_handle obj_handle
, u32 level
,
1509 void *context
, void **return_value
)
1511 struct acpi_device
*device
;
1512 bool *found
= context
;
1515 result
= acpi_bus_get_device(obj_handle
, &device
);
1519 if (strcmp(acpi_device_hid(device
), ACPI_MOTHERBOARD_RESOURCE_HID
))
1522 if (!is_extended_socket_device(device
))
1525 if (acpi_check_extended_socket_status(device
))
1529 return AE_CTRL_TERMINATE
;
1532 /* fjes_init_module - Driver Registration Routine */
1533 static int __init
fjes_init_module(void)
1538 acpi_walk_namespace(ACPI_TYPE_DEVICE
, ACPI_ROOT_OBJECT
, ACPI_UINT32_MAX
,
1539 acpi_find_extended_socket_device
, NULL
, &found
,
1545 pr_info("%s - version %s - %s\n",
1546 fjes_driver_string
, fjes_driver_version
, fjes_copyright
);
1550 result
= platform_driver_register(&fjes_driver
);
1556 result
= acpi_bus_register_driver(&fjes_acpi_driver
);
1558 goto fail_acpi_driver
;
1563 platform_driver_unregister(&fjes_driver
);
1568 module_init(fjes_init_module
);
1570 /* fjes_exit_module - Driver Exit Cleanup Routine */
1571 static void __exit
fjes_exit_module(void)
1573 acpi_bus_unregister_driver(&fjes_acpi_driver
);
1574 platform_driver_unregister(&fjes_driver
);
1578 module_exit(fjes_exit_module
);