1 // SPDX-License-Identifier: GPL-2.0-only
3 * FUJITSU Extended Socket Network Device driver
4 * Copyright (c) 2015 FUJITSU LIMITED
7 #include <linux/module.h>
8 #include <linux/types.h>
10 #include <linux/platform_device.h>
11 #include <linux/netdevice.h>
12 #include <linux/interrupt.h>
15 #include "fjes_trace.h"
19 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
20 #define DRV_NAME "fjes"
21 char fjes_driver_name
[] = DRV_NAME
;
22 char fjes_driver_version
[] = DRV_VERSION
;
23 static const char fjes_driver_string
[] =
24 "FUJITSU Extended Socket Network Device Driver";
25 static const char fjes_copyright
[] =
26 "Copyright (c) 2015 FUJITSU LIMITED";
28 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
29 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_VERSION
);
33 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
35 static int fjes_request_irq(struct fjes_adapter
*);
36 static void fjes_free_irq(struct fjes_adapter
*);
38 static int fjes_open(struct net_device
*);
39 static int fjes_close(struct net_device
*);
40 static int fjes_setup_resources(struct fjes_adapter
*);
41 static void fjes_free_resources(struct fjes_adapter
*);
42 static netdev_tx_t
fjes_xmit_frame(struct sk_buff
*, struct net_device
*);
43 static void fjes_raise_intr_rxdata_task(struct work_struct
*);
44 static void fjes_tx_stall_task(struct work_struct
*);
45 static void fjes_force_close_task(struct work_struct
*);
46 static irqreturn_t
fjes_intr(int, void*);
47 static void fjes_get_stats64(struct net_device
*, struct rtnl_link_stats64
*);
48 static int fjes_change_mtu(struct net_device
*, int);
49 static int fjes_vlan_rx_add_vid(struct net_device
*, __be16 proto
, u16
);
50 static int fjes_vlan_rx_kill_vid(struct net_device
*, __be16 proto
, u16
);
51 static void fjes_tx_retry(struct net_device
*, unsigned int txqueue
);
53 static int fjes_acpi_add(struct acpi_device
*);
54 static int fjes_acpi_remove(struct acpi_device
*);
55 static acpi_status
fjes_get_acpi_resource(struct acpi_resource
*, void*);
57 static int fjes_probe(struct platform_device
*);
58 static int fjes_remove(struct platform_device
*);
60 static int fjes_sw_init(struct fjes_adapter
*);
61 static void fjes_netdev_setup(struct net_device
*);
62 static void fjes_irq_watch_task(struct work_struct
*);
63 static void fjes_watch_unshare_task(struct work_struct
*);
64 static void fjes_rx_irq(struct fjes_adapter
*, int);
65 static int fjes_poll(struct napi_struct
*, int);
67 static const struct acpi_device_id fjes_acpi_ids
[] = {
68 {ACPI_MOTHERBOARD_RESOURCE_HID
, 0},
71 MODULE_DEVICE_TABLE(acpi
, fjes_acpi_ids
);
73 static struct acpi_driver fjes_acpi_driver
= {
80 .remove
= fjes_acpi_remove
,
84 static struct platform_driver fjes_driver
= {
89 .remove
= fjes_remove
,
92 static struct resource fjes_resource
[] = {
94 .flags
= IORESOURCE_MEM
,
99 .flags
= IORESOURCE_IRQ
,
105 static bool is_extended_socket_device(struct acpi_device
*device
)
107 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
108 char str_buf
[sizeof(FJES_ACPI_SYMBOL
) + 1];
109 union acpi_object
*str
;
113 status
= acpi_evaluate_object(device
->handle
, "_STR", NULL
, &buffer
);
114 if (ACPI_FAILURE(status
))
117 str
= buffer
.pointer
;
118 result
= utf16s_to_utf8s((wchar_t *)str
->string
.pointer
,
119 str
->string
.length
, UTF16_LITTLE_ENDIAN
,
120 str_buf
, sizeof(str_buf
) - 1);
123 if (strncmp(FJES_ACPI_SYMBOL
, str_buf
, strlen(FJES_ACPI_SYMBOL
)) != 0) {
124 kfree(buffer
.pointer
);
127 kfree(buffer
.pointer
);
132 static int acpi_check_extended_socket_status(struct acpi_device
*device
)
134 unsigned long long sta
;
137 status
= acpi_evaluate_integer(device
->handle
, "_STA", NULL
, &sta
);
138 if (ACPI_FAILURE(status
))
141 if (!((sta
& ACPI_STA_DEVICE_PRESENT
) &&
142 (sta
& ACPI_STA_DEVICE_ENABLED
) &&
143 (sta
& ACPI_STA_DEVICE_UI
) &&
144 (sta
& ACPI_STA_DEVICE_FUNCTIONING
)))
150 static int fjes_acpi_add(struct acpi_device
*device
)
152 struct platform_device
*plat_dev
;
155 if (!is_extended_socket_device(device
))
158 if (acpi_check_extended_socket_status(device
))
161 status
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
162 fjes_get_acpi_resource
, fjes_resource
);
163 if (ACPI_FAILURE(status
))
166 /* create platform_device */
167 plat_dev
= platform_device_register_simple(DRV_NAME
, 0, fjes_resource
,
168 ARRAY_SIZE(fjes_resource
));
169 if (IS_ERR(plat_dev
))
170 return PTR_ERR(plat_dev
);
172 device
->driver_data
= plat_dev
;
177 static int fjes_acpi_remove(struct acpi_device
*device
)
179 struct platform_device
*plat_dev
;
181 plat_dev
= (struct platform_device
*)acpi_driver_data(device
);
182 platform_device_unregister(plat_dev
);
188 fjes_get_acpi_resource(struct acpi_resource
*acpi_res
, void *data
)
190 struct acpi_resource_address32
*addr
;
191 struct acpi_resource_irq
*irq
;
192 struct resource
*res
= data
;
194 switch (acpi_res
->type
) {
195 case ACPI_RESOURCE_TYPE_ADDRESS32
:
196 addr
= &acpi_res
->data
.address32
;
197 res
[0].start
= addr
->address
.minimum
;
198 res
[0].end
= addr
->address
.minimum
+
199 addr
->address
.address_length
- 1;
202 case ACPI_RESOURCE_TYPE_IRQ
:
203 irq
= &acpi_res
->data
.irq
;
204 if (irq
->interrupt_count
!= 1)
206 res
[1].start
= irq
->interrupts
[0];
207 res
[1].end
= irq
->interrupts
[0];
217 static int fjes_request_irq(struct fjes_adapter
*adapter
)
219 struct net_device
*netdev
= adapter
->netdev
;
222 adapter
->interrupt_watch_enable
= true;
223 if (!delayed_work_pending(&adapter
->interrupt_watch_task
)) {
224 queue_delayed_work(adapter
->control_wq
,
225 &adapter
->interrupt_watch_task
,
226 FJES_IRQ_WATCH_DELAY
);
229 if (!adapter
->irq_registered
) {
230 result
= request_irq(adapter
->hw
.hw_res
.irq
, fjes_intr
,
231 IRQF_SHARED
, netdev
->name
, adapter
);
233 adapter
->irq_registered
= false;
235 adapter
->irq_registered
= true;
241 static void fjes_free_irq(struct fjes_adapter
*adapter
)
243 struct fjes_hw
*hw
= &adapter
->hw
;
245 adapter
->interrupt_watch_enable
= false;
246 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
248 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
250 if (adapter
->irq_registered
) {
251 free_irq(adapter
->hw
.hw_res
.irq
, adapter
);
252 adapter
->irq_registered
= false;
256 static const struct net_device_ops fjes_netdev_ops
= {
257 .ndo_open
= fjes_open
,
258 .ndo_stop
= fjes_close
,
259 .ndo_start_xmit
= fjes_xmit_frame
,
260 .ndo_get_stats64
= fjes_get_stats64
,
261 .ndo_change_mtu
= fjes_change_mtu
,
262 .ndo_tx_timeout
= fjes_tx_retry
,
263 .ndo_vlan_rx_add_vid
= fjes_vlan_rx_add_vid
,
264 .ndo_vlan_rx_kill_vid
= fjes_vlan_rx_kill_vid
,
267 /* fjes_open - Called when a network interface is made active */
268 static int fjes_open(struct net_device
*netdev
)
270 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
271 struct fjes_hw
*hw
= &adapter
->hw
;
274 if (adapter
->open_guard
)
277 result
= fjes_setup_resources(adapter
);
281 hw
->txrx_stop_req_bit
= 0;
282 hw
->epstop_req_bit
= 0;
284 napi_enable(&adapter
->napi
);
286 fjes_hw_capture_interrupt_status(hw
);
288 result
= fjes_request_irq(adapter
);
292 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, false);
294 netif_tx_start_all_queues(netdev
);
295 netif_carrier_on(netdev
);
300 fjes_free_irq(adapter
);
301 napi_disable(&adapter
->napi
);
304 fjes_free_resources(adapter
);
308 /* fjes_close - Disables a network interface */
309 static int fjes_close(struct net_device
*netdev
)
311 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
312 struct fjes_hw
*hw
= &adapter
->hw
;
316 netif_tx_stop_all_queues(netdev
);
317 netif_carrier_off(netdev
);
319 fjes_hw_raise_epstop(hw
);
321 napi_disable(&adapter
->napi
);
323 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
324 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
325 if (epidx
== hw
->my_epid
)
328 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
330 adapter
->hw
.ep_shm_info
[epidx
]
331 .tx
.info
->v1i
.rx_status
&=
334 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
336 fjes_free_irq(adapter
);
338 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
339 cancel_work_sync(&adapter
->unshare_watch_task
);
340 adapter
->unshare_watch_bitmask
= 0;
341 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
342 cancel_work_sync(&adapter
->tx_stall_task
);
344 cancel_work_sync(&hw
->update_zone_task
);
345 cancel_work_sync(&hw
->epstop_task
);
347 fjes_hw_wait_epstop(hw
);
349 fjes_free_resources(adapter
);
354 static int fjes_setup_resources(struct fjes_adapter
*adapter
)
356 struct net_device
*netdev
= adapter
->netdev
;
357 struct ep_share_mem_info
*buf_pair
;
358 struct fjes_hw
*hw
= &adapter
->hw
;
363 mutex_lock(&hw
->hw_info
.lock
);
364 result
= fjes_hw_request_info(hw
);
367 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
368 hw
->ep_shm_info
[epidx
].es_status
=
369 hw
->hw_info
.res_buf
->info
.info
[epidx
].es_status
;
370 hw
->ep_shm_info
[epidx
].zone
=
371 hw
->hw_info
.res_buf
->info
.info
[epidx
].zone
;
377 adapter
->force_reset
= true;
379 mutex_unlock(&hw
->hw_info
.lock
);
382 mutex_unlock(&hw
->hw_info
.lock
);
384 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
385 if ((epidx
!= hw
->my_epid
) &&
386 (hw
->ep_shm_info
[epidx
].es_status
==
387 FJES_ZONING_STATUS_ENABLE
)) {
388 fjes_hw_raise_interrupt(hw
, epidx
,
389 REG_ICTL_MASK_INFO_UPDATE
);
390 hw
->ep_shm_info
[epidx
].ep_stats
391 .send_intr_zoneupdate
+= 1;
395 msleep(FJES_OPEN_ZONE_UPDATE_WAIT
* hw
->max_epid
);
397 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
398 if (epidx
== hw
->my_epid
)
401 buf_pair
= &hw
->ep_shm_info
[epidx
];
403 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
404 fjes_hw_setup_epbuf(&buf_pair
->tx
, netdev
->dev_addr
,
406 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
408 if (fjes_hw_epid_is_same_zone(hw
, epidx
)) {
409 mutex_lock(&hw
->hw_info
.lock
);
411 fjes_hw_register_buff_addr(hw
, epidx
, buf_pair
);
412 mutex_unlock(&hw
->hw_info
.lock
);
420 adapter
->force_reset
= true;
424 hw
->ep_shm_info
[epidx
].ep_stats
425 .com_regist_buf_exec
+= 1;
432 static void fjes_free_resources(struct fjes_adapter
*adapter
)
434 struct net_device
*netdev
= adapter
->netdev
;
435 struct fjes_device_command_param param
;
436 struct ep_share_mem_info
*buf_pair
;
437 struct fjes_hw
*hw
= &adapter
->hw
;
438 bool reset_flag
= false;
443 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
444 if (epidx
== hw
->my_epid
)
447 mutex_lock(&hw
->hw_info
.lock
);
448 result
= fjes_hw_unregister_buff_addr(hw
, epidx
);
449 mutex_unlock(&hw
->hw_info
.lock
);
451 hw
->ep_shm_info
[epidx
].ep_stats
.com_unregist_buf_exec
+= 1;
456 buf_pair
= &hw
->ep_shm_info
[epidx
];
458 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
459 fjes_hw_setup_epbuf(&buf_pair
->tx
,
460 netdev
->dev_addr
, netdev
->mtu
);
461 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
463 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
466 if (reset_flag
|| adapter
->force_reset
) {
467 result
= fjes_hw_reset(hw
);
469 adapter
->force_reset
= false;
472 adapter
->open_guard
= true;
474 hw
->hw_info
.buffer_share_bit
= 0;
476 memset((void *)¶m
, 0, sizeof(param
));
478 param
.req_len
= hw
->hw_info
.req_buf_size
;
479 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
480 param
.res_len
= hw
->hw_info
.res_buf_size
;
481 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
482 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
484 fjes_hw_init_command_registers(hw
, ¶m
);
488 static void fjes_tx_stall_task(struct work_struct
*work
)
490 struct fjes_adapter
*adapter
= container_of(work
,
491 struct fjes_adapter
, tx_stall_task
);
492 struct net_device
*netdev
= adapter
->netdev
;
493 struct fjes_hw
*hw
= &adapter
->hw
;
494 int all_queue_available
, sendable
;
495 enum ep_partner_status pstatus
;
496 int max_epid
, my_epid
, epid
;
497 union ep_buffer_info
*info
;
501 dev_trans_start(netdev
)) > FJES_TX_TX_STALL_TIMEOUT
) {
502 netif_wake_queue(netdev
);
506 my_epid
= hw
->my_epid
;
507 max_epid
= hw
->max_epid
;
509 for (i
= 0; i
< 5; i
++) {
510 all_queue_available
= 1;
512 for (epid
= 0; epid
< max_epid
; epid
++) {
516 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
517 sendable
= (pstatus
== EP_PARTNER_SHARED
);
521 info
= adapter
->hw
.ep_shm_info
[epid
].tx
.info
;
523 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
526 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
,
527 info
->v1i
.count_max
)) {
528 all_queue_available
= 0;
533 if (all_queue_available
) {
534 netif_wake_queue(netdev
);
539 usleep_range(50, 100);
541 queue_work(adapter
->txrx_wq
, &adapter
->tx_stall_task
);
544 static void fjes_force_close_task(struct work_struct
*work
)
546 struct fjes_adapter
*adapter
= container_of(work
,
547 struct fjes_adapter
, force_close_task
);
548 struct net_device
*netdev
= adapter
->netdev
;
555 static void fjes_raise_intr_rxdata_task(struct work_struct
*work
)
557 struct fjes_adapter
*adapter
= container_of(work
,
558 struct fjes_adapter
, raise_intr_rxdata_task
);
559 struct fjes_hw
*hw
= &adapter
->hw
;
560 enum ep_partner_status pstatus
;
561 int max_epid
, my_epid
, epid
;
563 my_epid
= hw
->my_epid
;
564 max_epid
= hw
->max_epid
;
566 for (epid
= 0; epid
< max_epid
; epid
++)
567 hw
->ep_shm_info
[epid
].tx_status_work
= 0;
569 for (epid
= 0; epid
< max_epid
; epid
++) {
573 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
574 if (pstatus
== EP_PARTNER_SHARED
) {
575 hw
->ep_shm_info
[epid
].tx_status_work
=
576 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
;
578 if (hw
->ep_shm_info
[epid
].tx_status_work
==
579 FJES_TX_DELAY_SEND_PENDING
) {
580 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
=
581 FJES_TX_DELAY_SEND_NONE
;
586 for (epid
= 0; epid
< max_epid
; epid
++) {
590 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
591 if ((hw
->ep_shm_info
[epid
].tx_status_work
==
592 FJES_TX_DELAY_SEND_PENDING
) &&
593 (pstatus
== EP_PARTNER_SHARED
) &&
594 !(hw
->ep_shm_info
[epid
].rx
.info
->v1i
.rx_status
&
595 FJES_RX_POLL_WORK
)) {
596 fjes_hw_raise_interrupt(hw
, epid
,
597 REG_ICTL_MASK_RX_DATA
);
598 hw
->ep_shm_info
[epid
].ep_stats
.send_intr_rx
+= 1;
602 usleep_range(500, 1000);
605 static int fjes_tx_send(struct fjes_adapter
*adapter
, int dest
,
606 void *data
, size_t len
)
610 retval
= fjes_hw_epbuf_tx_pkt_send(&adapter
->hw
.ep_shm_info
[dest
].tx
,
615 adapter
->hw
.ep_shm_info
[dest
].tx
.info
->v1i
.tx_status
=
616 FJES_TX_DELAY_SEND_PENDING
;
617 if (!work_pending(&adapter
->raise_intr_rxdata_task
))
618 queue_work(adapter
->txrx_wq
,
619 &adapter
->raise_intr_rxdata_task
);
626 fjes_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
628 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
629 struct fjes_hw
*hw
= &adapter
->hw
;
631 int max_epid
, my_epid
, dest_epid
;
632 enum ep_partner_status pstatus
;
633 struct netdev_queue
*cur_queue
;
634 char shortpkt
[VLAN_ETH_HLEN
];
645 cur_queue
= netdev_get_tx_queue(netdev
, queue_no
);
647 eth
= (struct ethhdr
*)skb
->data
;
648 my_epid
= hw
->my_epid
;
650 vlan
= (vlan_get_tag(skb
, &vlan_id
) == 0) ? true : false;
655 if (is_multicast_ether_addr(eth
->h_dest
)) {
657 max_epid
= hw
->max_epid
;
659 } else if (is_local_ether_addr(eth
->h_dest
)) {
660 dest_epid
= eth
->h_dest
[ETH_ALEN
- 1];
661 max_epid
= dest_epid
+ 1;
663 if ((eth
->h_dest
[0] == 0x02) &&
664 (0x00 == (eth
->h_dest
[1] | eth
->h_dest
[2] |
665 eth
->h_dest
[3] | eth
->h_dest
[4])) &&
666 (dest_epid
< hw
->max_epid
)) {
673 adapter
->stats64
.tx_packets
+= 1;
674 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
675 adapter
->stats64
.tx_bytes
+= len
;
676 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
683 adapter
->stats64
.tx_packets
+= 1;
684 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
685 adapter
->stats64
.tx_bytes
+= len
;
686 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
689 for (; dest_epid
< max_epid
; dest_epid
++) {
690 if (my_epid
== dest_epid
)
693 pstatus
= fjes_hw_get_partner_ep_status(hw
, dest_epid
);
694 if (pstatus
!= EP_PARTNER_SHARED
) {
696 hw
->ep_shm_info
[dest_epid
].ep_stats
697 .tx_dropped_not_shared
+= 1;
699 } else if (!fjes_hw_check_epbuf_version(
700 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
, 0)) {
701 /* version is NOT 0 */
702 adapter
->stats64
.tx_carrier_errors
+= 1;
703 hw
->ep_shm_info
[dest_epid
].net_stats
704 .tx_carrier_errors
+= 1;
705 hw
->ep_shm_info
[dest_epid
].ep_stats
706 .tx_dropped_ver_mismatch
+= 1;
709 } else if (!fjes_hw_check_mtu(
710 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
712 adapter
->stats64
.tx_dropped
+= 1;
713 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_dropped
+= 1;
714 adapter
->stats64
.tx_errors
+= 1;
715 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_errors
+= 1;
716 hw
->ep_shm_info
[dest_epid
].ep_stats
717 .tx_dropped_buf_size_mismatch
+= 1;
721 !fjes_hw_check_vlan_id(
722 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
724 hw
->ep_shm_info
[dest_epid
].ep_stats
725 .tx_dropped_vlanid_mismatch
+= 1;
728 if (len
< VLAN_ETH_HLEN
) {
729 memset(shortpkt
, 0, VLAN_ETH_HLEN
);
730 memcpy(shortpkt
, skb
->data
, skb
->len
);
735 if (adapter
->tx_retry_count
== 0) {
736 adapter
->tx_start_jiffies
= jiffies
;
737 adapter
->tx_retry_count
= 1;
739 adapter
->tx_retry_count
++;
742 if (fjes_tx_send(adapter
, dest_epid
, data
, len
)) {
747 (long)adapter
->tx_start_jiffies
) >=
748 FJES_TX_RETRY_TIMEOUT
) {
749 adapter
->stats64
.tx_fifo_errors
+= 1;
750 hw
->ep_shm_info
[dest_epid
].net_stats
751 .tx_fifo_errors
+= 1;
752 adapter
->stats64
.tx_errors
+= 1;
753 hw
->ep_shm_info
[dest_epid
].net_stats
758 netif_trans_update(netdev
);
759 hw
->ep_shm_info
[dest_epid
].ep_stats
760 .tx_buffer_full
+= 1;
761 netif_tx_stop_queue(cur_queue
);
763 if (!work_pending(&adapter
->tx_stall_task
))
764 queue_work(adapter
->txrx_wq
,
765 &adapter
->tx_stall_task
);
767 ret
= NETDEV_TX_BUSY
;
771 adapter
->stats64
.tx_packets
+= 1;
772 hw
->ep_shm_info
[dest_epid
].net_stats
774 adapter
->stats64
.tx_bytes
+= len
;
775 hw
->ep_shm_info
[dest_epid
].net_stats
779 adapter
->tx_retry_count
= 0;
785 if (ret
== NETDEV_TX_OK
) {
788 adapter
->stats64
.tx_packets
+= 1;
789 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
790 adapter
->stats64
.tx_bytes
+= 1;
791 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
798 static void fjes_tx_retry(struct net_device
*netdev
, unsigned int txqueue
)
800 struct netdev_queue
*queue
= netdev_get_tx_queue(netdev
, 0);
802 netif_tx_wake_queue(queue
);
806 fjes_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
808 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
810 memcpy(stats
, &adapter
->stats64
, sizeof(struct rtnl_link_stats64
));
813 static int fjes_change_mtu(struct net_device
*netdev
, int new_mtu
)
815 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
816 bool running
= netif_running(netdev
);
817 struct fjes_hw
*hw
= &adapter
->hw
;
822 for (idx
= 0; fjes_support_mtu
[idx
] != 0; idx
++) {
823 if (new_mtu
<= fjes_support_mtu
[idx
]) {
824 new_mtu
= fjes_support_mtu
[idx
];
825 if (new_mtu
== netdev
->mtu
)
837 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
838 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
839 if (epidx
== hw
->my_epid
)
841 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
842 ~FJES_RX_MTU_CHANGING_DONE
;
844 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
846 netif_tx_stop_all_queues(netdev
);
847 netif_carrier_off(netdev
);
848 cancel_work_sync(&adapter
->tx_stall_task
);
849 napi_disable(&adapter
->napi
);
853 netif_tx_stop_all_queues(netdev
);
856 netdev
->mtu
= new_mtu
;
859 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
860 if (epidx
== hw
->my_epid
)
863 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
864 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
868 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
869 FJES_RX_MTU_CHANGING_DONE
;
870 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
873 netif_tx_wake_all_queues(netdev
);
874 netif_carrier_on(netdev
);
875 napi_enable(&adapter
->napi
);
876 napi_schedule(&adapter
->napi
);
882 static int fjes_vlan_rx_add_vid(struct net_device
*netdev
,
883 __be16 proto
, u16 vid
)
885 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
889 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
890 if (epid
== adapter
->hw
.my_epid
)
893 if (!fjes_hw_check_vlan_id(
894 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
))
895 ret
= fjes_hw_set_vlan_id(
896 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
899 return ret
? 0 : -ENOSPC
;
902 static int fjes_vlan_rx_kill_vid(struct net_device
*netdev
,
903 __be16 proto
, u16 vid
)
905 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
908 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
909 if (epid
== adapter
->hw
.my_epid
)
912 fjes_hw_del_vlan_id(&adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
918 static void fjes_txrx_stop_req_irq(struct fjes_adapter
*adapter
,
921 struct fjes_hw
*hw
= &adapter
->hw
;
922 enum ep_partner_status status
;
925 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
926 trace_fjes_txrx_stop_req_irq_pre(hw
, src_epid
, status
);
928 case EP_PARTNER_UNSHARE
:
929 case EP_PARTNER_COMPLETE
:
932 case EP_PARTNER_WAITING
:
933 if (src_epid
< hw
->my_epid
) {
934 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
935 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
936 FJES_RX_STOP_REQ_DONE
;
937 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
939 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
940 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
942 if (!work_pending(&adapter
->unshare_watch_task
))
943 queue_work(adapter
->control_wq
,
944 &adapter
->unshare_watch_task
);
947 case EP_PARTNER_SHARED
:
948 if (hw
->ep_shm_info
[src_epid
].rx
.info
->v1i
.rx_status
&
949 FJES_RX_STOP_REQ_REQUEST
) {
950 set_bit(src_epid
, &hw
->epstop_req_bit
);
951 if (!work_pending(&hw
->epstop_task
))
952 queue_work(adapter
->control_wq
,
957 trace_fjes_txrx_stop_req_irq_post(hw
, src_epid
);
960 static void fjes_stop_req_irq(struct fjes_adapter
*adapter
, int src_epid
)
962 struct fjes_hw
*hw
= &adapter
->hw
;
963 enum ep_partner_status status
;
966 set_bit(src_epid
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
968 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
969 trace_fjes_stop_req_irq_pre(hw
, src_epid
, status
);
971 case EP_PARTNER_WAITING
:
972 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
973 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
974 FJES_RX_STOP_REQ_DONE
;
975 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
976 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
978 case EP_PARTNER_UNSHARE
:
979 case EP_PARTNER_COMPLETE
:
981 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
982 if (!work_pending(&adapter
->unshare_watch_task
))
983 queue_work(adapter
->control_wq
,
984 &adapter
->unshare_watch_task
);
986 case EP_PARTNER_SHARED
:
987 set_bit(src_epid
, &hw
->epstop_req_bit
);
989 if (!work_pending(&hw
->epstop_task
))
990 queue_work(adapter
->control_wq
, &hw
->epstop_task
);
993 trace_fjes_stop_req_irq_post(hw
, src_epid
);
996 static void fjes_update_zone_irq(struct fjes_adapter
*adapter
,
999 struct fjes_hw
*hw
= &adapter
->hw
;
1001 if (!work_pending(&hw
->update_zone_task
))
1002 queue_work(adapter
->control_wq
, &hw
->update_zone_task
);
1005 static irqreturn_t
fjes_intr(int irq
, void *data
)
1007 struct fjes_adapter
*adapter
= data
;
1008 struct fjes_hw
*hw
= &adapter
->hw
;
1012 icr
= fjes_hw_capture_interrupt_status(hw
);
1014 if (icr
& REG_IS_MASK_IS_ASSERT
) {
1015 if (icr
& REG_ICTL_MASK_RX_DATA
) {
1016 fjes_rx_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1017 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1021 if (icr
& REG_ICTL_MASK_DEV_STOP_REQ
) {
1022 fjes_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1023 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1024 .recv_intr_stop
+= 1;
1027 if (icr
& REG_ICTL_MASK_TXRX_STOP_REQ
) {
1028 fjes_txrx_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1029 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1030 .recv_intr_unshare
+= 1;
1033 if (icr
& REG_ICTL_MASK_TXRX_STOP_DONE
)
1034 fjes_hw_set_irqmask(hw
,
1035 REG_ICTL_MASK_TXRX_STOP_DONE
, true);
1037 if (icr
& REG_ICTL_MASK_INFO_UPDATE
) {
1038 fjes_update_zone_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1039 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1040 .recv_intr_zoneupdate
+= 1;
1051 static int fjes_rxframe_search_exist(struct fjes_adapter
*adapter
,
1054 struct fjes_hw
*hw
= &adapter
->hw
;
1055 enum ep_partner_status pstatus
;
1056 int max_epid
, cur_epid
;
1059 max_epid
= hw
->max_epid
;
1060 start_epid
= (start_epid
+ 1 + max_epid
) % max_epid
;
1062 for (i
= 0; i
< max_epid
; i
++) {
1063 cur_epid
= (start_epid
+ i
) % max_epid
;
1064 if (cur_epid
== hw
->my_epid
)
1067 pstatus
= fjes_hw_get_partner_ep_status(hw
, cur_epid
);
1068 if (pstatus
== EP_PARTNER_SHARED
) {
1069 if (!fjes_hw_epbuf_rx_is_empty(
1070 &hw
->ep_shm_info
[cur_epid
].rx
))
1077 static void *fjes_rxframe_get(struct fjes_adapter
*adapter
, size_t *psize
,
1082 *cur_epid
= fjes_rxframe_search_exist(adapter
, *cur_epid
);
1087 fjes_hw_epbuf_rx_curpkt_get_addr(
1088 &adapter
->hw
.ep_shm_info
[*cur_epid
].rx
, psize
);
1093 static void fjes_rxframe_release(struct fjes_adapter
*adapter
, int cur_epid
)
1095 fjes_hw_epbuf_rx_curpkt_drop(&adapter
->hw
.ep_shm_info
[cur_epid
].rx
);
1098 static void fjes_rx_irq(struct fjes_adapter
*adapter
, int src_epid
)
1100 struct fjes_hw
*hw
= &adapter
->hw
;
1102 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, true);
1104 adapter
->unset_rx_last
= true;
1105 napi_schedule(&adapter
->napi
);
1108 static int fjes_poll(struct napi_struct
*napi
, int budget
)
1110 struct fjes_adapter
*adapter
=
1111 container_of(napi
, struct fjes_adapter
, napi
);
1112 struct net_device
*netdev
= napi
->dev
;
1113 struct fjes_hw
*hw
= &adapter
->hw
;
1114 struct sk_buff
*skb
;
1121 spin_lock(&hw
->rx_status_lock
);
1122 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1123 if (epidx
== hw
->my_epid
)
1126 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1128 adapter
->hw
.ep_shm_info
[epidx
]
1129 .tx
.info
->v1i
.rx_status
|= FJES_RX_POLL_WORK
;
1131 spin_unlock(&hw
->rx_status_lock
);
1133 while (work_done
< budget
) {
1134 prefetch(&adapter
->hw
);
1135 frame
= fjes_rxframe_get(adapter
, &frame_len
, &cur_epid
);
1138 skb
= napi_alloc_skb(napi
, frame_len
);
1140 adapter
->stats64
.rx_dropped
+= 1;
1141 hw
->ep_shm_info
[cur_epid
].net_stats
1143 adapter
->stats64
.rx_errors
+= 1;
1144 hw
->ep_shm_info
[cur_epid
].net_stats
1147 skb_put_data(skb
, frame
, frame_len
);
1148 skb
->protocol
= eth_type_trans(skb
, netdev
);
1149 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1151 netif_receive_skb(skb
);
1155 adapter
->stats64
.rx_packets
+= 1;
1156 hw
->ep_shm_info
[cur_epid
].net_stats
1158 adapter
->stats64
.rx_bytes
+= frame_len
;
1159 hw
->ep_shm_info
[cur_epid
].net_stats
1160 .rx_bytes
+= frame_len
;
1162 if (is_multicast_ether_addr(
1163 ((struct ethhdr
*)frame
)->h_dest
)) {
1164 adapter
->stats64
.multicast
+= 1;
1165 hw
->ep_shm_info
[cur_epid
].net_stats
1170 fjes_rxframe_release(adapter
, cur_epid
);
1171 adapter
->unset_rx_last
= true;
1177 if (work_done
< budget
) {
1178 napi_complete_done(napi
, work_done
);
1180 if (adapter
->unset_rx_last
) {
1181 adapter
->rx_last_jiffies
= jiffies
;
1182 adapter
->unset_rx_last
= false;
1185 if (((long)jiffies
- (long)adapter
->rx_last_jiffies
) < 3) {
1186 napi_reschedule(napi
);
1188 spin_lock(&hw
->rx_status_lock
);
1189 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1190 if (epidx
== hw
->my_epid
)
1192 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1194 adapter
->hw
.ep_shm_info
[epidx
].tx
1195 .info
->v1i
.rx_status
&=
1198 spin_unlock(&hw
->rx_status_lock
);
1200 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, false);
1207 /* fjes_probe - Device Initialization Routine */
1208 static int fjes_probe(struct platform_device
*plat_dev
)
1210 struct fjes_adapter
*adapter
;
1211 struct net_device
*netdev
;
1212 struct resource
*res
;
1217 netdev
= alloc_netdev_mq(sizeof(struct fjes_adapter
), "es%d",
1218 NET_NAME_UNKNOWN
, fjes_netdev_setup
,
1224 SET_NETDEV_DEV(netdev
, &plat_dev
->dev
);
1226 dev_set_drvdata(&plat_dev
->dev
, netdev
);
1227 adapter
= netdev_priv(netdev
);
1228 adapter
->netdev
= netdev
;
1229 adapter
->plat_dev
= plat_dev
;
1233 /* setup the private structure */
1234 err
= fjes_sw_init(adapter
);
1236 goto err_free_netdev
;
1238 INIT_WORK(&adapter
->force_close_task
, fjes_force_close_task
);
1239 adapter
->force_reset
= false;
1240 adapter
->open_guard
= false;
1242 adapter
->txrx_wq
= alloc_workqueue(DRV_NAME
"/txrx", WQ_MEM_RECLAIM
, 0);
1243 if (unlikely(!adapter
->txrx_wq
)) {
1245 goto err_free_netdev
;
1248 adapter
->control_wq
= alloc_workqueue(DRV_NAME
"/control",
1250 if (unlikely(!adapter
->control_wq
)) {
1252 goto err_free_txrx_wq
;
1255 INIT_WORK(&adapter
->tx_stall_task
, fjes_tx_stall_task
);
1256 INIT_WORK(&adapter
->raise_intr_rxdata_task
,
1257 fjes_raise_intr_rxdata_task
);
1258 INIT_WORK(&adapter
->unshare_watch_task
, fjes_watch_unshare_task
);
1259 adapter
->unshare_watch_bitmask
= 0;
1261 INIT_DELAYED_WORK(&adapter
->interrupt_watch_task
, fjes_irq_watch_task
);
1262 adapter
->interrupt_watch_enable
= false;
1264 res
= platform_get_resource(plat_dev
, IORESOURCE_MEM
, 0);
1265 hw
->hw_res
.start
= res
->start
;
1266 hw
->hw_res
.size
= resource_size(res
);
1267 hw
->hw_res
.irq
= platform_get_irq(plat_dev
, 0);
1268 err
= fjes_hw_init(&adapter
->hw
);
1270 goto err_free_control_wq
;
1272 /* setup MAC address (02:00:00:00:00:[epid])*/
1273 netdev
->dev_addr
[0] = 2;
1274 netdev
->dev_addr
[1] = 0;
1275 netdev
->dev_addr
[2] = 0;
1276 netdev
->dev_addr
[3] = 0;
1277 netdev
->dev_addr
[4] = 0;
1278 netdev
->dev_addr
[5] = hw
->my_epid
; /* EPID */
1280 err
= register_netdev(netdev
);
1284 netif_carrier_off(netdev
);
1286 fjes_dbg_adapter_init(adapter
);
1291 fjes_hw_exit(&adapter
->hw
);
1292 err_free_control_wq
:
1293 destroy_workqueue(adapter
->control_wq
);
1295 destroy_workqueue(adapter
->txrx_wq
);
1297 free_netdev(netdev
);
1302 /* fjes_remove - Device Removal Routine */
1303 static int fjes_remove(struct platform_device
*plat_dev
)
1305 struct net_device
*netdev
= dev_get_drvdata(&plat_dev
->dev
);
1306 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
1307 struct fjes_hw
*hw
= &adapter
->hw
;
1309 fjes_dbg_adapter_exit(adapter
);
1311 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
1312 cancel_work_sync(&adapter
->unshare_watch_task
);
1313 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
1314 cancel_work_sync(&adapter
->tx_stall_task
);
1315 if (adapter
->control_wq
)
1316 destroy_workqueue(adapter
->control_wq
);
1317 if (adapter
->txrx_wq
)
1318 destroy_workqueue(adapter
->txrx_wq
);
1320 unregister_netdev(netdev
);
1324 netif_napi_del(&adapter
->napi
);
1326 free_netdev(netdev
);
1331 static int fjes_sw_init(struct fjes_adapter
*adapter
)
1333 struct net_device
*netdev
= adapter
->netdev
;
1335 netif_napi_add(netdev
, &adapter
->napi
, fjes_poll
, 64);
1340 /* fjes_netdev_setup - netdevice initialization routine */
1341 static void fjes_netdev_setup(struct net_device
*netdev
)
1343 ether_setup(netdev
);
1345 netdev
->watchdog_timeo
= FJES_TX_RETRY_INTERVAL
;
1346 netdev
->netdev_ops
= &fjes_netdev_ops
;
1347 fjes_set_ethtool_ops(netdev
);
1348 netdev
->mtu
= fjes_support_mtu
[3];
1349 netdev
->min_mtu
= fjes_support_mtu
[0];
1350 netdev
->max_mtu
= fjes_support_mtu
[3];
1351 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1354 static void fjes_irq_watch_task(struct work_struct
*work
)
1356 struct fjes_adapter
*adapter
= container_of(to_delayed_work(work
),
1357 struct fjes_adapter
, interrupt_watch_task
);
1359 local_irq_disable();
1360 fjes_intr(adapter
->hw
.hw_res
.irq
, adapter
);
1363 if (fjes_rxframe_search_exist(adapter
, 0) >= 0)
1364 napi_schedule(&adapter
->napi
);
1366 if (adapter
->interrupt_watch_enable
) {
1367 if (!delayed_work_pending(&adapter
->interrupt_watch_task
))
1368 queue_delayed_work(adapter
->control_wq
,
1369 &adapter
->interrupt_watch_task
,
1370 FJES_IRQ_WATCH_DELAY
);
1374 static void fjes_watch_unshare_task(struct work_struct
*work
)
1376 struct fjes_adapter
*adapter
=
1377 container_of(work
, struct fjes_adapter
, unshare_watch_task
);
1379 struct net_device
*netdev
= adapter
->netdev
;
1380 struct fjes_hw
*hw
= &adapter
->hw
;
1382 int unshare_watch
, unshare_reserve
;
1383 int max_epid
, my_epid
, epidx
;
1384 int stop_req
, stop_req_done
;
1385 ulong unshare_watch_bitmask
;
1386 unsigned long flags
;
1391 my_epid
= hw
->my_epid
;
1392 max_epid
= hw
->max_epid
;
1394 unshare_watch_bitmask
= adapter
->unshare_watch_bitmask
;
1395 adapter
->unshare_watch_bitmask
= 0;
1397 while ((unshare_watch_bitmask
|| hw
->txrx_stop_req_bit
) &&
1398 (wait_time
< 3000)) {
1399 for (epidx
= 0; epidx
< max_epid
; epidx
++) {
1400 if (epidx
== my_epid
)
1403 is_shared
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
1406 stop_req
= test_bit(epidx
, &hw
->txrx_stop_req_bit
);
1408 stop_req_done
= hw
->ep_shm_info
[epidx
].rx
.info
->v1i
.rx_status
&
1409 FJES_RX_STOP_REQ_DONE
;
1411 unshare_watch
= test_bit(epidx
, &unshare_watch_bitmask
);
1413 unshare_reserve
= test_bit(epidx
,
1414 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1417 (is_shared
&& (!is_shared
|| !stop_req_done
))) &&
1418 (is_shared
|| !unshare_watch
|| !unshare_reserve
))
1421 mutex_lock(&hw
->hw_info
.lock
);
1422 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1430 &adapter
->force_close_task
)) {
1431 adapter
->force_reset
= true;
1433 &adapter
->force_close_task
);
1437 mutex_unlock(&hw
->hw_info
.lock
);
1438 hw
->ep_shm_info
[epidx
].ep_stats
1439 .com_unregist_buf_exec
+= 1;
1441 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1442 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1443 netdev
->dev_addr
, netdev
->mtu
);
1444 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1446 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1447 clear_bit(epidx
, &unshare_watch_bitmask
);
1449 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1456 if (hw
->hw_info
.buffer_unshare_reserve_bit
) {
1457 for (epidx
= 0; epidx
< max_epid
; epidx
++) {
1458 if (epidx
== my_epid
)
1462 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
1463 mutex_lock(&hw
->hw_info
.lock
);
1465 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1473 &adapter
->force_close_task
)) {
1474 adapter
->force_reset
= true;
1476 &adapter
->force_close_task
);
1480 mutex_unlock(&hw
->hw_info
.lock
);
1482 hw
->ep_shm_info
[epidx
].ep_stats
1483 .com_unregist_buf_exec
+= 1;
1485 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1486 fjes_hw_setup_epbuf(
1487 &hw
->ep_shm_info
[epidx
].tx
,
1488 netdev
->dev_addr
, netdev
->mtu
);
1489 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1492 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1493 clear_bit(epidx
, &unshare_watch_bitmask
);
1494 clear_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1497 if (test_bit(epidx
, &unshare_watch_bitmask
)) {
1498 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1499 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
1500 ~FJES_RX_STOP_REQ_DONE
;
1501 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1509 acpi_find_extended_socket_device(acpi_handle obj_handle
, u32 level
,
1510 void *context
, void **return_value
)
1512 struct acpi_device
*device
;
1513 bool *found
= context
;
1516 result
= acpi_bus_get_device(obj_handle
, &device
);
1520 if (strcmp(acpi_device_hid(device
), ACPI_MOTHERBOARD_RESOURCE_HID
))
1523 if (!is_extended_socket_device(device
))
1526 if (acpi_check_extended_socket_status(device
))
1530 return AE_CTRL_TERMINATE
;
1533 /* fjes_init_module - Driver Registration Routine */
1534 static int __init
fjes_init_module(void)
1539 acpi_walk_namespace(ACPI_TYPE_DEVICE
, ACPI_ROOT_OBJECT
, ACPI_UINT32_MAX
,
1540 acpi_find_extended_socket_device
, NULL
, &found
,
1546 pr_info("%s - version %s - %s\n",
1547 fjes_driver_string
, fjes_driver_version
, fjes_copyright
);
1551 result
= platform_driver_register(&fjes_driver
);
1557 result
= acpi_bus_register_driver(&fjes_acpi_driver
);
1559 goto fail_acpi_driver
;
1564 platform_driver_unregister(&fjes_driver
);
1569 module_init(fjes_init_module
);
1571 /* fjes_exit_module - Driver Exit Cleanup Routine */
1572 static void __exit
fjes_exit_module(void)
1574 acpi_bus_unregister_driver(&fjes_acpi_driver
);
1575 platform_driver_unregister(&fjes_driver
);
1579 module_exit(fjes_exit_module
);