2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/nls.h>
25 #include <linux/platform_device.h>
26 #include <linux/netdevice.h>
27 #include <linux/interrupt.h>
30 #include "fjes_trace.h"
34 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
35 #define DRV_NAME "fjes"
36 char fjes_driver_name
[] = DRV_NAME
;
37 char fjes_driver_version
[] = DRV_VERSION
;
38 static const char fjes_driver_string
[] =
39 "FUJITSU Extended Socket Network Device Driver";
40 static const char fjes_copyright
[] =
41 "Copyright (c) 2015 FUJITSU LIMITED";
43 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
44 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(DRV_VERSION
);
48 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
50 static int fjes_request_irq(struct fjes_adapter
*);
51 static void fjes_free_irq(struct fjes_adapter
*);
53 static int fjes_open(struct net_device
*);
54 static int fjes_close(struct net_device
*);
55 static int fjes_setup_resources(struct fjes_adapter
*);
56 static void fjes_free_resources(struct fjes_adapter
*);
57 static netdev_tx_t
fjes_xmit_frame(struct sk_buff
*, struct net_device
*);
58 static void fjes_raise_intr_rxdata_task(struct work_struct
*);
59 static void fjes_tx_stall_task(struct work_struct
*);
60 static void fjes_force_close_task(struct work_struct
*);
61 static irqreturn_t
fjes_intr(int, void*);
62 static void fjes_get_stats64(struct net_device
*, struct rtnl_link_stats64
*);
63 static int fjes_change_mtu(struct net_device
*, int);
64 static int fjes_vlan_rx_add_vid(struct net_device
*, __be16 proto
, u16
);
65 static int fjes_vlan_rx_kill_vid(struct net_device
*, __be16 proto
, u16
);
66 static void fjes_tx_retry(struct net_device
*);
68 static int fjes_acpi_add(struct acpi_device
*);
69 static int fjes_acpi_remove(struct acpi_device
*);
70 static acpi_status
fjes_get_acpi_resource(struct acpi_resource
*, void*);
72 static int fjes_probe(struct platform_device
*);
73 static int fjes_remove(struct platform_device
*);
75 static int fjes_sw_init(struct fjes_adapter
*);
76 static void fjes_netdev_setup(struct net_device
*);
77 static void fjes_irq_watch_task(struct work_struct
*);
78 static void fjes_watch_unshare_task(struct work_struct
*);
79 static void fjes_rx_irq(struct fjes_adapter
*, int);
80 static int fjes_poll(struct napi_struct
*, int);
82 static const struct acpi_device_id fjes_acpi_ids
[] = {
83 {ACPI_MOTHERBOARD_RESOURCE_HID
, 0},
86 MODULE_DEVICE_TABLE(acpi
, fjes_acpi_ids
);
88 static struct acpi_driver fjes_acpi_driver
= {
95 .remove
= fjes_acpi_remove
,
99 static struct platform_driver fjes_driver
= {
104 .remove
= fjes_remove
,
107 static struct resource fjes_resource
[] = {
109 .flags
= IORESOURCE_MEM
,
114 .flags
= IORESOURCE_IRQ
,
120 static bool is_extended_socket_device(struct acpi_device
*device
)
122 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
123 char str_buf
[sizeof(FJES_ACPI_SYMBOL
) + 1];
124 union acpi_object
*str
;
128 status
= acpi_evaluate_object(device
->handle
, "_STR", NULL
, &buffer
);
129 if (ACPI_FAILURE(status
))
132 str
= buffer
.pointer
;
133 result
= utf16s_to_utf8s((wchar_t *)str
->string
.pointer
,
134 str
->string
.length
, UTF16_LITTLE_ENDIAN
,
135 str_buf
, sizeof(str_buf
) - 1);
138 if (strncmp(FJES_ACPI_SYMBOL
, str_buf
, strlen(FJES_ACPI_SYMBOL
)) != 0) {
139 kfree(buffer
.pointer
);
142 kfree(buffer
.pointer
);
147 static int acpi_check_extended_socket_status(struct acpi_device
*device
)
149 unsigned long long sta
;
152 status
= acpi_evaluate_integer(device
->handle
, "_STA", NULL
, &sta
);
153 if (ACPI_FAILURE(status
))
156 if (!((sta
& ACPI_STA_DEVICE_PRESENT
) &&
157 (sta
& ACPI_STA_DEVICE_ENABLED
) &&
158 (sta
& ACPI_STA_DEVICE_UI
) &&
159 (sta
& ACPI_STA_DEVICE_FUNCTIONING
)))
165 static int fjes_acpi_add(struct acpi_device
*device
)
167 struct platform_device
*plat_dev
;
170 if (!is_extended_socket_device(device
))
173 if (acpi_check_extended_socket_status(device
))
176 status
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
177 fjes_get_acpi_resource
, fjes_resource
);
178 if (ACPI_FAILURE(status
))
181 /* create platform_device */
182 plat_dev
= platform_device_register_simple(DRV_NAME
, 0, fjes_resource
,
183 ARRAY_SIZE(fjes_resource
));
184 if (IS_ERR(plat_dev
))
185 return PTR_ERR(plat_dev
);
187 device
->driver_data
= plat_dev
;
192 static int fjes_acpi_remove(struct acpi_device
*device
)
194 struct platform_device
*plat_dev
;
196 plat_dev
= (struct platform_device
*)acpi_driver_data(device
);
197 platform_device_unregister(plat_dev
);
203 fjes_get_acpi_resource(struct acpi_resource
*acpi_res
, void *data
)
205 struct acpi_resource_address32
*addr
;
206 struct acpi_resource_irq
*irq
;
207 struct resource
*res
= data
;
209 switch (acpi_res
->type
) {
210 case ACPI_RESOURCE_TYPE_ADDRESS32
:
211 addr
= &acpi_res
->data
.address32
;
212 res
[0].start
= addr
->address
.minimum
;
213 res
[0].end
= addr
->address
.minimum
+
214 addr
->address
.address_length
- 1;
217 case ACPI_RESOURCE_TYPE_IRQ
:
218 irq
= &acpi_res
->data
.irq
;
219 if (irq
->interrupt_count
!= 1)
221 res
[1].start
= irq
->interrupts
[0];
222 res
[1].end
= irq
->interrupts
[0];
232 static int fjes_request_irq(struct fjes_adapter
*adapter
)
234 struct net_device
*netdev
= adapter
->netdev
;
237 adapter
->interrupt_watch_enable
= true;
238 if (!delayed_work_pending(&adapter
->interrupt_watch_task
)) {
239 queue_delayed_work(adapter
->control_wq
,
240 &adapter
->interrupt_watch_task
,
241 FJES_IRQ_WATCH_DELAY
);
244 if (!adapter
->irq_registered
) {
245 result
= request_irq(adapter
->hw
.hw_res
.irq
, fjes_intr
,
246 IRQF_SHARED
, netdev
->name
, adapter
);
248 adapter
->irq_registered
= false;
250 adapter
->irq_registered
= true;
256 static void fjes_free_irq(struct fjes_adapter
*adapter
)
258 struct fjes_hw
*hw
= &adapter
->hw
;
260 adapter
->interrupt_watch_enable
= false;
261 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
263 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
265 if (adapter
->irq_registered
) {
266 free_irq(adapter
->hw
.hw_res
.irq
, adapter
);
267 adapter
->irq_registered
= false;
271 static const struct net_device_ops fjes_netdev_ops
= {
272 .ndo_open
= fjes_open
,
273 .ndo_stop
= fjes_close
,
274 .ndo_start_xmit
= fjes_xmit_frame
,
275 .ndo_get_stats64
= fjes_get_stats64
,
276 .ndo_change_mtu
= fjes_change_mtu
,
277 .ndo_tx_timeout
= fjes_tx_retry
,
278 .ndo_vlan_rx_add_vid
= fjes_vlan_rx_add_vid
,
279 .ndo_vlan_rx_kill_vid
= fjes_vlan_rx_kill_vid
,
282 /* fjes_open - Called when a network interface is made active */
283 static int fjes_open(struct net_device
*netdev
)
285 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
286 struct fjes_hw
*hw
= &adapter
->hw
;
289 if (adapter
->open_guard
)
292 result
= fjes_setup_resources(adapter
);
296 hw
->txrx_stop_req_bit
= 0;
297 hw
->epstop_req_bit
= 0;
299 napi_enable(&adapter
->napi
);
301 fjes_hw_capture_interrupt_status(hw
);
303 result
= fjes_request_irq(adapter
);
307 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, false);
309 netif_tx_start_all_queues(netdev
);
310 netif_carrier_on(netdev
);
315 fjes_free_irq(adapter
);
316 napi_disable(&adapter
->napi
);
319 fjes_free_resources(adapter
);
323 /* fjes_close - Disables a network interface */
324 static int fjes_close(struct net_device
*netdev
)
326 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
327 struct fjes_hw
*hw
= &adapter
->hw
;
331 netif_tx_stop_all_queues(netdev
);
332 netif_carrier_off(netdev
);
334 fjes_hw_raise_epstop(hw
);
336 napi_disable(&adapter
->napi
);
338 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
339 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
340 if (epidx
== hw
->my_epid
)
343 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
345 adapter
->hw
.ep_shm_info
[epidx
]
346 .tx
.info
->v1i
.rx_status
&=
349 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
351 fjes_free_irq(adapter
);
353 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
354 cancel_work_sync(&adapter
->unshare_watch_task
);
355 adapter
->unshare_watch_bitmask
= 0;
356 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
357 cancel_work_sync(&adapter
->tx_stall_task
);
359 cancel_work_sync(&hw
->update_zone_task
);
360 cancel_work_sync(&hw
->epstop_task
);
362 fjes_hw_wait_epstop(hw
);
364 fjes_free_resources(adapter
);
369 static int fjes_setup_resources(struct fjes_adapter
*adapter
)
371 struct net_device
*netdev
= adapter
->netdev
;
372 struct ep_share_mem_info
*buf_pair
;
373 struct fjes_hw
*hw
= &adapter
->hw
;
378 mutex_lock(&hw
->hw_info
.lock
);
379 result
= fjes_hw_request_info(hw
);
382 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
383 hw
->ep_shm_info
[epidx
].es_status
=
384 hw
->hw_info
.res_buf
->info
.info
[epidx
].es_status
;
385 hw
->ep_shm_info
[epidx
].zone
=
386 hw
->hw_info
.res_buf
->info
.info
[epidx
].zone
;
392 adapter
->force_reset
= true;
394 mutex_unlock(&hw
->hw_info
.lock
);
397 mutex_unlock(&hw
->hw_info
.lock
);
399 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
400 if ((epidx
!= hw
->my_epid
) &&
401 (hw
->ep_shm_info
[epidx
].es_status
==
402 FJES_ZONING_STATUS_ENABLE
)) {
403 fjes_hw_raise_interrupt(hw
, epidx
,
404 REG_ICTL_MASK_INFO_UPDATE
);
405 hw
->ep_shm_info
[epidx
].ep_stats
406 .send_intr_zoneupdate
+= 1;
410 msleep(FJES_OPEN_ZONE_UPDATE_WAIT
* hw
->max_epid
);
412 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
413 if (epidx
== hw
->my_epid
)
416 buf_pair
= &hw
->ep_shm_info
[epidx
];
418 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
419 fjes_hw_setup_epbuf(&buf_pair
->tx
, netdev
->dev_addr
,
421 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
423 if (fjes_hw_epid_is_same_zone(hw
, epidx
)) {
424 mutex_lock(&hw
->hw_info
.lock
);
426 fjes_hw_register_buff_addr(hw
, epidx
, buf_pair
);
427 mutex_unlock(&hw
->hw_info
.lock
);
435 adapter
->force_reset
= true;
439 hw
->ep_shm_info
[epidx
].ep_stats
440 .com_regist_buf_exec
+= 1;
447 static void fjes_free_resources(struct fjes_adapter
*adapter
)
449 struct net_device
*netdev
= adapter
->netdev
;
450 struct fjes_device_command_param param
;
451 struct ep_share_mem_info
*buf_pair
;
452 struct fjes_hw
*hw
= &adapter
->hw
;
453 bool reset_flag
= false;
458 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
459 if (epidx
== hw
->my_epid
)
462 mutex_lock(&hw
->hw_info
.lock
);
463 result
= fjes_hw_unregister_buff_addr(hw
, epidx
);
464 mutex_unlock(&hw
->hw_info
.lock
);
466 hw
->ep_shm_info
[epidx
].ep_stats
.com_unregist_buf_exec
+= 1;
471 buf_pair
= &hw
->ep_shm_info
[epidx
];
473 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
474 fjes_hw_setup_epbuf(&buf_pair
->tx
,
475 netdev
->dev_addr
, netdev
->mtu
);
476 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
478 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
481 if (reset_flag
|| adapter
->force_reset
) {
482 result
= fjes_hw_reset(hw
);
484 adapter
->force_reset
= false;
487 adapter
->open_guard
= true;
489 hw
->hw_info
.buffer_share_bit
= 0;
491 memset((void *)¶m
, 0, sizeof(param
));
493 param
.req_len
= hw
->hw_info
.req_buf_size
;
494 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
495 param
.res_len
= hw
->hw_info
.res_buf_size
;
496 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
497 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
499 fjes_hw_init_command_registers(hw
, ¶m
);
503 static void fjes_tx_stall_task(struct work_struct
*work
)
505 struct fjes_adapter
*adapter
= container_of(work
,
506 struct fjes_adapter
, tx_stall_task
);
507 struct net_device
*netdev
= adapter
->netdev
;
508 struct fjes_hw
*hw
= &adapter
->hw
;
509 int all_queue_available
, sendable
;
510 enum ep_partner_status pstatus
;
511 int max_epid
, my_epid
, epid
;
512 union ep_buffer_info
*info
;
516 dev_trans_start(netdev
)) > FJES_TX_TX_STALL_TIMEOUT
) {
517 netif_wake_queue(netdev
);
521 my_epid
= hw
->my_epid
;
522 max_epid
= hw
->max_epid
;
524 for (i
= 0; i
< 5; i
++) {
525 all_queue_available
= 1;
527 for (epid
= 0; epid
< max_epid
; epid
++) {
531 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
532 sendable
= (pstatus
== EP_PARTNER_SHARED
);
536 info
= adapter
->hw
.ep_shm_info
[epid
].tx
.info
;
538 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
541 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
,
542 info
->v1i
.count_max
)) {
543 all_queue_available
= 0;
548 if (all_queue_available
) {
549 netif_wake_queue(netdev
);
554 usleep_range(50, 100);
556 queue_work(adapter
->txrx_wq
, &adapter
->tx_stall_task
);
559 static void fjes_force_close_task(struct work_struct
*work
)
561 struct fjes_adapter
*adapter
= container_of(work
,
562 struct fjes_adapter
, force_close_task
);
563 struct net_device
*netdev
= adapter
->netdev
;
570 static void fjes_raise_intr_rxdata_task(struct work_struct
*work
)
572 struct fjes_adapter
*adapter
= container_of(work
,
573 struct fjes_adapter
, raise_intr_rxdata_task
);
574 struct fjes_hw
*hw
= &adapter
->hw
;
575 enum ep_partner_status pstatus
;
576 int max_epid
, my_epid
, epid
;
578 my_epid
= hw
->my_epid
;
579 max_epid
= hw
->max_epid
;
581 for (epid
= 0; epid
< max_epid
; epid
++)
582 hw
->ep_shm_info
[epid
].tx_status_work
= 0;
584 for (epid
= 0; epid
< max_epid
; epid
++) {
588 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
589 if (pstatus
== EP_PARTNER_SHARED
) {
590 hw
->ep_shm_info
[epid
].tx_status_work
=
591 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
;
593 if (hw
->ep_shm_info
[epid
].tx_status_work
==
594 FJES_TX_DELAY_SEND_PENDING
) {
595 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
=
596 FJES_TX_DELAY_SEND_NONE
;
601 for (epid
= 0; epid
< max_epid
; epid
++) {
605 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
606 if ((hw
->ep_shm_info
[epid
].tx_status_work
==
607 FJES_TX_DELAY_SEND_PENDING
) &&
608 (pstatus
== EP_PARTNER_SHARED
) &&
609 !(hw
->ep_shm_info
[epid
].rx
.info
->v1i
.rx_status
&
610 FJES_RX_POLL_WORK
)) {
611 fjes_hw_raise_interrupt(hw
, epid
,
612 REG_ICTL_MASK_RX_DATA
);
613 hw
->ep_shm_info
[epid
].ep_stats
.send_intr_rx
+= 1;
617 usleep_range(500, 1000);
620 static int fjes_tx_send(struct fjes_adapter
*adapter
, int dest
,
621 void *data
, size_t len
)
625 retval
= fjes_hw_epbuf_tx_pkt_send(&adapter
->hw
.ep_shm_info
[dest
].tx
,
630 adapter
->hw
.ep_shm_info
[dest
].tx
.info
->v1i
.tx_status
=
631 FJES_TX_DELAY_SEND_PENDING
;
632 if (!work_pending(&adapter
->raise_intr_rxdata_task
))
633 queue_work(adapter
->txrx_wq
,
634 &adapter
->raise_intr_rxdata_task
);
641 fjes_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
643 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
644 struct fjes_hw
*hw
= &adapter
->hw
;
646 int max_epid
, my_epid
, dest_epid
;
647 enum ep_partner_status pstatus
;
648 struct netdev_queue
*cur_queue
;
649 char shortpkt
[VLAN_ETH_HLEN
];
660 cur_queue
= netdev_get_tx_queue(netdev
, queue_no
);
662 eth
= (struct ethhdr
*)skb
->data
;
663 my_epid
= hw
->my_epid
;
665 vlan
= (vlan_get_tag(skb
, &vlan_id
) == 0) ? true : false;
670 if (is_multicast_ether_addr(eth
->h_dest
)) {
672 max_epid
= hw
->max_epid
;
674 } else if (is_local_ether_addr(eth
->h_dest
)) {
675 dest_epid
= eth
->h_dest
[ETH_ALEN
- 1];
676 max_epid
= dest_epid
+ 1;
678 if ((eth
->h_dest
[0] == 0x02) &&
679 (0x00 == (eth
->h_dest
[1] | eth
->h_dest
[2] |
680 eth
->h_dest
[3] | eth
->h_dest
[4])) &&
681 (dest_epid
< hw
->max_epid
)) {
688 adapter
->stats64
.tx_packets
+= 1;
689 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
690 adapter
->stats64
.tx_bytes
+= len
;
691 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
698 adapter
->stats64
.tx_packets
+= 1;
699 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
700 adapter
->stats64
.tx_bytes
+= len
;
701 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
704 for (; dest_epid
< max_epid
; dest_epid
++) {
705 if (my_epid
== dest_epid
)
708 pstatus
= fjes_hw_get_partner_ep_status(hw
, dest_epid
);
709 if (pstatus
!= EP_PARTNER_SHARED
) {
711 hw
->ep_shm_info
[dest_epid
].ep_stats
712 .tx_dropped_not_shared
+= 1;
714 } else if (!fjes_hw_check_epbuf_version(
715 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
, 0)) {
716 /* version is NOT 0 */
717 adapter
->stats64
.tx_carrier_errors
+= 1;
718 hw
->ep_shm_info
[dest_epid
].net_stats
719 .tx_carrier_errors
+= 1;
720 hw
->ep_shm_info
[dest_epid
].ep_stats
721 .tx_dropped_ver_mismatch
+= 1;
724 } else if (!fjes_hw_check_mtu(
725 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
727 adapter
->stats64
.tx_dropped
+= 1;
728 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_dropped
+= 1;
729 adapter
->stats64
.tx_errors
+= 1;
730 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_errors
+= 1;
731 hw
->ep_shm_info
[dest_epid
].ep_stats
732 .tx_dropped_buf_size_mismatch
+= 1;
736 !fjes_hw_check_vlan_id(
737 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
739 hw
->ep_shm_info
[dest_epid
].ep_stats
740 .tx_dropped_vlanid_mismatch
+= 1;
743 if (len
< VLAN_ETH_HLEN
) {
744 memset(shortpkt
, 0, VLAN_ETH_HLEN
);
745 memcpy(shortpkt
, skb
->data
, skb
->len
);
750 if (adapter
->tx_retry_count
== 0) {
751 adapter
->tx_start_jiffies
= jiffies
;
752 adapter
->tx_retry_count
= 1;
754 adapter
->tx_retry_count
++;
757 if (fjes_tx_send(adapter
, dest_epid
, data
, len
)) {
762 (long)adapter
->tx_start_jiffies
) >=
763 FJES_TX_RETRY_TIMEOUT
) {
764 adapter
->stats64
.tx_fifo_errors
+= 1;
765 hw
->ep_shm_info
[dest_epid
].net_stats
766 .tx_fifo_errors
+= 1;
767 adapter
->stats64
.tx_errors
+= 1;
768 hw
->ep_shm_info
[dest_epid
].net_stats
773 netif_trans_update(netdev
);
774 hw
->ep_shm_info
[dest_epid
].ep_stats
775 .tx_buffer_full
+= 1;
776 netif_tx_stop_queue(cur_queue
);
778 if (!work_pending(&adapter
->tx_stall_task
))
779 queue_work(adapter
->txrx_wq
,
780 &adapter
->tx_stall_task
);
782 ret
= NETDEV_TX_BUSY
;
786 adapter
->stats64
.tx_packets
+= 1;
787 hw
->ep_shm_info
[dest_epid
].net_stats
789 adapter
->stats64
.tx_bytes
+= len
;
790 hw
->ep_shm_info
[dest_epid
].net_stats
794 adapter
->tx_retry_count
= 0;
800 if (ret
== NETDEV_TX_OK
) {
803 adapter
->stats64
.tx_packets
+= 1;
804 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
805 adapter
->stats64
.tx_bytes
+= 1;
806 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
813 static void fjes_tx_retry(struct net_device
*netdev
)
815 struct netdev_queue
*queue
= netdev_get_tx_queue(netdev
, 0);
817 netif_tx_wake_queue(queue
);
821 fjes_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
823 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
825 memcpy(stats
, &adapter
->stats64
, sizeof(struct rtnl_link_stats64
));
828 static int fjes_change_mtu(struct net_device
*netdev
, int new_mtu
)
830 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
831 bool running
= netif_running(netdev
);
832 struct fjes_hw
*hw
= &adapter
->hw
;
837 for (idx
= 0; fjes_support_mtu
[idx
] != 0; idx
++) {
838 if (new_mtu
<= fjes_support_mtu
[idx
]) {
839 new_mtu
= fjes_support_mtu
[idx
];
840 if (new_mtu
== netdev
->mtu
)
852 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
853 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
854 if (epidx
== hw
->my_epid
)
856 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
857 ~FJES_RX_MTU_CHANGING_DONE
;
859 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
861 netif_tx_stop_all_queues(netdev
);
862 netif_carrier_off(netdev
);
863 cancel_work_sync(&adapter
->tx_stall_task
);
864 napi_disable(&adapter
->napi
);
868 netif_tx_stop_all_queues(netdev
);
871 netdev
->mtu
= new_mtu
;
874 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
875 if (epidx
== hw
->my_epid
)
878 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
879 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
883 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
884 FJES_RX_MTU_CHANGING_DONE
;
885 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
888 netif_tx_wake_all_queues(netdev
);
889 netif_carrier_on(netdev
);
890 napi_enable(&adapter
->napi
);
891 napi_schedule(&adapter
->napi
);
897 static int fjes_vlan_rx_add_vid(struct net_device
*netdev
,
898 __be16 proto
, u16 vid
)
900 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
904 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
905 if (epid
== adapter
->hw
.my_epid
)
908 if (!fjes_hw_check_vlan_id(
909 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
))
910 ret
= fjes_hw_set_vlan_id(
911 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
914 return ret
? 0 : -ENOSPC
;
917 static int fjes_vlan_rx_kill_vid(struct net_device
*netdev
,
918 __be16 proto
, u16 vid
)
920 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
923 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
924 if (epid
== adapter
->hw
.my_epid
)
927 fjes_hw_del_vlan_id(&adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
933 static void fjes_txrx_stop_req_irq(struct fjes_adapter
*adapter
,
936 struct fjes_hw
*hw
= &adapter
->hw
;
937 enum ep_partner_status status
;
940 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
941 trace_fjes_txrx_stop_req_irq_pre(hw
, src_epid
, status
);
943 case EP_PARTNER_UNSHARE
:
944 case EP_PARTNER_COMPLETE
:
947 case EP_PARTNER_WAITING
:
948 if (src_epid
< hw
->my_epid
) {
949 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
950 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
951 FJES_RX_STOP_REQ_DONE
;
952 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
954 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
955 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
957 if (!work_pending(&adapter
->unshare_watch_task
))
958 queue_work(adapter
->control_wq
,
959 &adapter
->unshare_watch_task
);
962 case EP_PARTNER_SHARED
:
963 if (hw
->ep_shm_info
[src_epid
].rx
.info
->v1i
.rx_status
&
964 FJES_RX_STOP_REQ_REQUEST
) {
965 set_bit(src_epid
, &hw
->epstop_req_bit
);
966 if (!work_pending(&hw
->epstop_task
))
967 queue_work(adapter
->control_wq
,
972 trace_fjes_txrx_stop_req_irq_post(hw
, src_epid
);
975 static void fjes_stop_req_irq(struct fjes_adapter
*adapter
, int src_epid
)
977 struct fjes_hw
*hw
= &adapter
->hw
;
978 enum ep_partner_status status
;
981 set_bit(src_epid
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
983 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
984 trace_fjes_stop_req_irq_pre(hw
, src_epid
, status
);
986 case EP_PARTNER_WAITING
:
987 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
988 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
989 FJES_RX_STOP_REQ_DONE
;
990 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
991 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
993 case EP_PARTNER_UNSHARE
:
994 case EP_PARTNER_COMPLETE
:
996 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
997 if (!work_pending(&adapter
->unshare_watch_task
))
998 queue_work(adapter
->control_wq
,
999 &adapter
->unshare_watch_task
);
1001 case EP_PARTNER_SHARED
:
1002 set_bit(src_epid
, &hw
->epstop_req_bit
);
1004 if (!work_pending(&hw
->epstop_task
))
1005 queue_work(adapter
->control_wq
, &hw
->epstop_task
);
1008 trace_fjes_stop_req_irq_post(hw
, src_epid
);
1011 static void fjes_update_zone_irq(struct fjes_adapter
*adapter
,
1014 struct fjes_hw
*hw
= &adapter
->hw
;
1016 if (!work_pending(&hw
->update_zone_task
))
1017 queue_work(adapter
->control_wq
, &hw
->update_zone_task
);
1020 static irqreturn_t
fjes_intr(int irq
, void *data
)
1022 struct fjes_adapter
*adapter
= data
;
1023 struct fjes_hw
*hw
= &adapter
->hw
;
1027 icr
= fjes_hw_capture_interrupt_status(hw
);
1029 if (icr
& REG_IS_MASK_IS_ASSERT
) {
1030 if (icr
& REG_ICTL_MASK_RX_DATA
) {
1031 fjes_rx_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1032 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1036 if (icr
& REG_ICTL_MASK_DEV_STOP_REQ
) {
1037 fjes_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1038 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1039 .recv_intr_stop
+= 1;
1042 if (icr
& REG_ICTL_MASK_TXRX_STOP_REQ
) {
1043 fjes_txrx_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1044 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1045 .recv_intr_unshare
+= 1;
1048 if (icr
& REG_ICTL_MASK_TXRX_STOP_DONE
)
1049 fjes_hw_set_irqmask(hw
,
1050 REG_ICTL_MASK_TXRX_STOP_DONE
, true);
1052 if (icr
& REG_ICTL_MASK_INFO_UPDATE
) {
1053 fjes_update_zone_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1054 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1055 .recv_intr_zoneupdate
+= 1;
1066 static int fjes_rxframe_search_exist(struct fjes_adapter
*adapter
,
1069 struct fjes_hw
*hw
= &adapter
->hw
;
1070 enum ep_partner_status pstatus
;
1071 int max_epid
, cur_epid
;
1074 max_epid
= hw
->max_epid
;
1075 start_epid
= (start_epid
+ 1 + max_epid
) % max_epid
;
1077 for (i
= 0; i
< max_epid
; i
++) {
1078 cur_epid
= (start_epid
+ i
) % max_epid
;
1079 if (cur_epid
== hw
->my_epid
)
1082 pstatus
= fjes_hw_get_partner_ep_status(hw
, cur_epid
);
1083 if (pstatus
== EP_PARTNER_SHARED
) {
1084 if (!fjes_hw_epbuf_rx_is_empty(
1085 &hw
->ep_shm_info
[cur_epid
].rx
))
1092 static void *fjes_rxframe_get(struct fjes_adapter
*adapter
, size_t *psize
,
1097 *cur_epid
= fjes_rxframe_search_exist(adapter
, *cur_epid
);
1102 fjes_hw_epbuf_rx_curpkt_get_addr(
1103 &adapter
->hw
.ep_shm_info
[*cur_epid
].rx
, psize
);
1108 static void fjes_rxframe_release(struct fjes_adapter
*adapter
, int cur_epid
)
1110 fjes_hw_epbuf_rx_curpkt_drop(&adapter
->hw
.ep_shm_info
[cur_epid
].rx
);
1113 static void fjes_rx_irq(struct fjes_adapter
*adapter
, int src_epid
)
1115 struct fjes_hw
*hw
= &adapter
->hw
;
1117 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, true);
1119 adapter
->unset_rx_last
= true;
1120 napi_schedule(&adapter
->napi
);
1123 static int fjes_poll(struct napi_struct
*napi
, int budget
)
1125 struct fjes_adapter
*adapter
=
1126 container_of(napi
, struct fjes_adapter
, napi
);
1127 struct net_device
*netdev
= napi
->dev
;
1128 struct fjes_hw
*hw
= &adapter
->hw
;
1129 struct sk_buff
*skb
;
1136 spin_lock(&hw
->rx_status_lock
);
1137 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1138 if (epidx
== hw
->my_epid
)
1141 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1143 adapter
->hw
.ep_shm_info
[epidx
]
1144 .tx
.info
->v1i
.rx_status
|= FJES_RX_POLL_WORK
;
1146 spin_unlock(&hw
->rx_status_lock
);
1148 while (work_done
< budget
) {
1149 prefetch(&adapter
->hw
);
1150 frame
= fjes_rxframe_get(adapter
, &frame_len
, &cur_epid
);
1153 skb
= napi_alloc_skb(napi
, frame_len
);
1155 adapter
->stats64
.rx_dropped
+= 1;
1156 hw
->ep_shm_info
[cur_epid
].net_stats
1158 adapter
->stats64
.rx_errors
+= 1;
1159 hw
->ep_shm_info
[cur_epid
].net_stats
1162 skb_put_data(skb
, frame
, frame_len
);
1163 skb
->protocol
= eth_type_trans(skb
, netdev
);
1164 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1166 netif_receive_skb(skb
);
1170 adapter
->stats64
.rx_packets
+= 1;
1171 hw
->ep_shm_info
[cur_epid
].net_stats
1173 adapter
->stats64
.rx_bytes
+= frame_len
;
1174 hw
->ep_shm_info
[cur_epid
].net_stats
1175 .rx_bytes
+= frame_len
;
1177 if (is_multicast_ether_addr(
1178 ((struct ethhdr
*)frame
)->h_dest
)) {
1179 adapter
->stats64
.multicast
+= 1;
1180 hw
->ep_shm_info
[cur_epid
].net_stats
1185 fjes_rxframe_release(adapter
, cur_epid
);
1186 adapter
->unset_rx_last
= true;
1192 if (work_done
< budget
) {
1193 napi_complete_done(napi
, work_done
);
1195 if (adapter
->unset_rx_last
) {
1196 adapter
->rx_last_jiffies
= jiffies
;
1197 adapter
->unset_rx_last
= false;
1200 if (((long)jiffies
- (long)adapter
->rx_last_jiffies
) < 3) {
1201 napi_reschedule(napi
);
1203 spin_lock(&hw
->rx_status_lock
);
1204 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1205 if (epidx
== hw
->my_epid
)
1207 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1209 adapter
->hw
.ep_shm_info
[epidx
].tx
1210 .info
->v1i
.rx_status
&=
1213 spin_unlock(&hw
->rx_status_lock
);
1215 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, false);
1222 /* fjes_probe - Device Initialization Routine */
1223 static int fjes_probe(struct platform_device
*plat_dev
)
1225 struct fjes_adapter
*adapter
;
1226 struct net_device
*netdev
;
1227 struct resource
*res
;
1232 netdev
= alloc_netdev_mq(sizeof(struct fjes_adapter
), "es%d",
1233 NET_NAME_UNKNOWN
, fjes_netdev_setup
,
1239 SET_NETDEV_DEV(netdev
, &plat_dev
->dev
);
1241 dev_set_drvdata(&plat_dev
->dev
, netdev
);
1242 adapter
= netdev_priv(netdev
);
1243 adapter
->netdev
= netdev
;
1244 adapter
->plat_dev
= plat_dev
;
1248 /* setup the private structure */
1249 err
= fjes_sw_init(adapter
);
1251 goto err_free_netdev
;
1253 INIT_WORK(&adapter
->force_close_task
, fjes_force_close_task
);
1254 adapter
->force_reset
= false;
1255 adapter
->open_guard
= false;
1257 adapter
->txrx_wq
= alloc_workqueue(DRV_NAME
"/txrx", WQ_MEM_RECLAIM
, 0);
1258 if (unlikely(!adapter
->txrx_wq
)) {
1260 goto err_free_netdev
;
1263 adapter
->control_wq
= alloc_workqueue(DRV_NAME
"/control",
1265 if (unlikely(!adapter
->control_wq
)) {
1267 goto err_free_txrx_wq
;
1270 INIT_WORK(&adapter
->tx_stall_task
, fjes_tx_stall_task
);
1271 INIT_WORK(&adapter
->raise_intr_rxdata_task
,
1272 fjes_raise_intr_rxdata_task
);
1273 INIT_WORK(&adapter
->unshare_watch_task
, fjes_watch_unshare_task
);
1274 adapter
->unshare_watch_bitmask
= 0;
1276 INIT_DELAYED_WORK(&adapter
->interrupt_watch_task
, fjes_irq_watch_task
);
1277 adapter
->interrupt_watch_enable
= false;
1279 res
= platform_get_resource(plat_dev
, IORESOURCE_MEM
, 0);
1280 hw
->hw_res
.start
= res
->start
;
1281 hw
->hw_res
.size
= resource_size(res
);
1282 hw
->hw_res
.irq
= platform_get_irq(plat_dev
, 0);
1283 err
= fjes_hw_init(&adapter
->hw
);
1285 goto err_free_control_wq
;
1287 /* setup MAC address (02:00:00:00:00:[epid])*/
1288 netdev
->dev_addr
[0] = 2;
1289 netdev
->dev_addr
[1] = 0;
1290 netdev
->dev_addr
[2] = 0;
1291 netdev
->dev_addr
[3] = 0;
1292 netdev
->dev_addr
[4] = 0;
1293 netdev
->dev_addr
[5] = hw
->my_epid
; /* EPID */
1295 err
= register_netdev(netdev
);
1299 netif_carrier_off(netdev
);
1301 fjes_dbg_adapter_init(adapter
);
1306 fjes_hw_exit(&adapter
->hw
);
1307 err_free_control_wq
:
1308 destroy_workqueue(adapter
->control_wq
);
1310 destroy_workqueue(adapter
->txrx_wq
);
1312 free_netdev(netdev
);
1317 /* fjes_remove - Device Removal Routine */
1318 static int fjes_remove(struct platform_device
*plat_dev
)
1320 struct net_device
*netdev
= dev_get_drvdata(&plat_dev
->dev
);
1321 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
1322 struct fjes_hw
*hw
= &adapter
->hw
;
1324 fjes_dbg_adapter_exit(adapter
);
1326 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
1327 cancel_work_sync(&adapter
->unshare_watch_task
);
1328 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
1329 cancel_work_sync(&adapter
->tx_stall_task
);
1330 if (adapter
->control_wq
)
1331 destroy_workqueue(adapter
->control_wq
);
1332 if (adapter
->txrx_wq
)
1333 destroy_workqueue(adapter
->txrx_wq
);
1335 unregister_netdev(netdev
);
1339 netif_napi_del(&adapter
->napi
);
1341 free_netdev(netdev
);
1346 static int fjes_sw_init(struct fjes_adapter
*adapter
)
1348 struct net_device
*netdev
= adapter
->netdev
;
1350 netif_napi_add(netdev
, &adapter
->napi
, fjes_poll
, 64);
1355 /* fjes_netdev_setup - netdevice initialization routine */
1356 static void fjes_netdev_setup(struct net_device
*netdev
)
1358 ether_setup(netdev
);
1360 netdev
->watchdog_timeo
= FJES_TX_RETRY_INTERVAL
;
1361 netdev
->netdev_ops
= &fjes_netdev_ops
;
1362 fjes_set_ethtool_ops(netdev
);
1363 netdev
->mtu
= fjes_support_mtu
[3];
1364 netdev
->min_mtu
= fjes_support_mtu
[0];
1365 netdev
->max_mtu
= fjes_support_mtu
[3];
1366 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1369 static void fjes_irq_watch_task(struct work_struct
*work
)
1371 struct fjes_adapter
*adapter
= container_of(to_delayed_work(work
),
1372 struct fjes_adapter
, interrupt_watch_task
);
1374 local_irq_disable();
1375 fjes_intr(adapter
->hw
.hw_res
.irq
, adapter
);
1378 if (fjes_rxframe_search_exist(adapter
, 0) >= 0)
1379 napi_schedule(&adapter
->napi
);
1381 if (adapter
->interrupt_watch_enable
) {
1382 if (!delayed_work_pending(&adapter
->interrupt_watch_task
))
1383 queue_delayed_work(adapter
->control_wq
,
1384 &adapter
->interrupt_watch_task
,
1385 FJES_IRQ_WATCH_DELAY
);
1389 static void fjes_watch_unshare_task(struct work_struct
*work
)
1391 struct fjes_adapter
*adapter
=
1392 container_of(work
, struct fjes_adapter
, unshare_watch_task
);
1394 struct net_device
*netdev
= adapter
->netdev
;
1395 struct fjes_hw
*hw
= &adapter
->hw
;
1397 int unshare_watch
, unshare_reserve
;
1398 int max_epid
, my_epid
, epidx
;
1399 int stop_req
, stop_req_done
;
1400 ulong unshare_watch_bitmask
;
1401 unsigned long flags
;
1406 my_epid
= hw
->my_epid
;
1407 max_epid
= hw
->max_epid
;
1409 unshare_watch_bitmask
= adapter
->unshare_watch_bitmask
;
1410 adapter
->unshare_watch_bitmask
= 0;
1412 while ((unshare_watch_bitmask
|| hw
->txrx_stop_req_bit
) &&
1413 (wait_time
< 3000)) {
1414 for (epidx
= 0; epidx
< max_epid
; epidx
++) {
1415 if (epidx
== my_epid
)
1418 is_shared
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
1421 stop_req
= test_bit(epidx
, &hw
->txrx_stop_req_bit
);
1423 stop_req_done
= hw
->ep_shm_info
[epidx
].rx
.info
->v1i
.rx_status
&
1424 FJES_RX_STOP_REQ_DONE
;
1426 unshare_watch
= test_bit(epidx
, &unshare_watch_bitmask
);
1428 unshare_reserve
= test_bit(epidx
,
1429 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1432 (is_shared
&& (!is_shared
|| !stop_req_done
))) &&
1433 (is_shared
|| !unshare_watch
|| !unshare_reserve
))
1436 mutex_lock(&hw
->hw_info
.lock
);
1437 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1445 &adapter
->force_close_task
)) {
1446 adapter
->force_reset
= true;
1448 &adapter
->force_close_task
);
1452 mutex_unlock(&hw
->hw_info
.lock
);
1453 hw
->ep_shm_info
[epidx
].ep_stats
1454 .com_unregist_buf_exec
+= 1;
1456 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1457 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1458 netdev
->dev_addr
, netdev
->mtu
);
1459 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1461 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1462 clear_bit(epidx
, &unshare_watch_bitmask
);
1464 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1471 if (hw
->hw_info
.buffer_unshare_reserve_bit
) {
1472 for (epidx
= 0; epidx
< max_epid
; epidx
++) {
1473 if (epidx
== my_epid
)
1477 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
1478 mutex_lock(&hw
->hw_info
.lock
);
1480 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1488 &adapter
->force_close_task
)) {
1489 adapter
->force_reset
= true;
1491 &adapter
->force_close_task
);
1495 mutex_unlock(&hw
->hw_info
.lock
);
1497 hw
->ep_shm_info
[epidx
].ep_stats
1498 .com_unregist_buf_exec
+= 1;
1500 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1501 fjes_hw_setup_epbuf(
1502 &hw
->ep_shm_info
[epidx
].tx
,
1503 netdev
->dev_addr
, netdev
->mtu
);
1504 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1507 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1508 clear_bit(epidx
, &unshare_watch_bitmask
);
1509 clear_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1512 if (test_bit(epidx
, &unshare_watch_bitmask
)) {
1513 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1514 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
1515 ~FJES_RX_STOP_REQ_DONE
;
1516 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1524 acpi_find_extended_socket_device(acpi_handle obj_handle
, u32 level
,
1525 void *context
, void **return_value
)
1527 struct acpi_device
*device
;
1528 bool *found
= context
;
1531 result
= acpi_bus_get_device(obj_handle
, &device
);
1535 if (strcmp(acpi_device_hid(device
), ACPI_MOTHERBOARD_RESOURCE_HID
))
1538 if (!is_extended_socket_device(device
))
1541 if (acpi_check_extended_socket_status(device
))
1545 return AE_CTRL_TERMINATE
;
1548 /* fjes_init_module - Driver Registration Routine */
1549 static int __init
fjes_init_module(void)
1554 acpi_walk_namespace(ACPI_TYPE_DEVICE
, ACPI_ROOT_OBJECT
, ACPI_UINT32_MAX
,
1555 acpi_find_extended_socket_device
, NULL
, &found
,
1561 pr_info("%s - version %s - %s\n",
1562 fjes_driver_string
, fjes_driver_version
, fjes_copyright
);
1566 result
= platform_driver_register(&fjes_driver
);
1572 result
= acpi_bus_register_driver(&fjes_acpi_driver
);
1574 goto fail_acpi_driver
;
1579 platform_driver_unregister(&fjes_driver
);
1584 module_init(fjes_init_module
);
1586 /* fjes_exit_module - Driver Exit Cleanup Routine */
1587 static void __exit
fjes_exit_module(void)
1589 acpi_bus_unregister_driver(&fjes_acpi_driver
);
1590 platform_driver_unregister(&fjes_driver
);
1594 module_exit(fjes_exit_module
);