2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
25 static void fjes_hw_update_zone_task(struct work_struct
*);
26 static void fjes_hw_epstop_task(struct work_struct
*);
28 /* supported MTU list */
29 const u32 fjes_support_mtu
[] = {
30 FJES_MTU_DEFINE(8 * 1024),
31 FJES_MTU_DEFINE(16 * 1024),
32 FJES_MTU_DEFINE(32 * 1024),
33 FJES_MTU_DEFINE(64 * 1024),
37 u32
fjes_hw_rd32(struct fjes_hw
*hw
, u32 reg
)
42 value
= readl(&base
[reg
]);
47 static u8
*fjes_hw_iomap(struct fjes_hw
*hw
)
51 if (!request_mem_region(hw
->hw_res
.start
, hw
->hw_res
.size
,
53 pr_err("request_mem_region failed\n");
57 base
= (u8
*)ioremap_nocache(hw
->hw_res
.start
, hw
->hw_res
.size
);
62 static void fjes_hw_iounmap(struct fjes_hw
*hw
)
65 release_mem_region(hw
->hw_res
.start
, hw
->hw_res
.size
);
68 int fjes_hw_reset(struct fjes_hw
*hw
)
75 wr32(XSCT_DCTL
, dctl
.reg
);
77 timeout
= FJES_DEVICE_RESET_TIMEOUT
* 1000;
78 dctl
.reg
= rd32(XSCT_DCTL
);
79 while ((dctl
.bits
.reset
== 1) && (timeout
> 0)) {
81 dctl
.reg
= rd32(XSCT_DCTL
);
85 return timeout
> 0 ? 0 : -EIO
;
88 static int fjes_hw_get_max_epid(struct fjes_hw
*hw
)
90 union REG_MAX_EP info
;
92 info
.reg
= rd32(XSCT_MAX_EP
);
94 return info
.bits
.maxep
;
97 static int fjes_hw_get_my_epid(struct fjes_hw
*hw
)
99 union REG_OWNER_EPID info
;
101 info
.reg
= rd32(XSCT_OWNER_EPID
);
103 return info
.bits
.epid
;
106 static int fjes_hw_alloc_shared_status_region(struct fjes_hw
*hw
)
110 size
= sizeof(struct fjes_device_shared_info
) +
111 (sizeof(u8
) * hw
->max_epid
);
112 hw
->hw_info
.share
= kzalloc(size
, GFP_KERNEL
);
113 if (!hw
->hw_info
.share
)
116 hw
->hw_info
.share
->epnum
= hw
->max_epid
;
121 static void fjes_hw_free_shared_status_region(struct fjes_hw
*hw
)
123 kfree(hw
->hw_info
.share
);
124 hw
->hw_info
.share
= NULL
;
127 static int fjes_hw_alloc_epbuf(struct epbuf_handler
*epbh
)
131 mem
= vzalloc(EP_BUFFER_SIZE
);
136 epbh
->size
= EP_BUFFER_SIZE
;
138 epbh
->info
= (union ep_buffer_info
*)mem
;
139 epbh
->ring
= (u8
*)(mem
+ sizeof(union ep_buffer_info
));
144 static void fjes_hw_free_epbuf(struct epbuf_handler
*epbh
)
154 void fjes_hw_setup_epbuf(struct epbuf_handler
*epbh
, u8
*mac_addr
, u32 mtu
)
156 union ep_buffer_info
*info
= epbh
->info
;
157 u16 vlan_id
[EP_BUFFER_SUPPORT_VLAN_MAX
];
160 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++)
161 vlan_id
[i
] = info
->v1i
.vlan_id
[i
];
163 memset(info
, 0, sizeof(union ep_buffer_info
));
165 info
->v1i
.version
= 0; /* version 0 */
167 for (i
= 0; i
< ETH_ALEN
; i
++)
168 info
->v1i
.mac_addr
[i
] = mac_addr
[i
];
173 info
->v1i
.info_size
= sizeof(union ep_buffer_info
);
174 info
->v1i
.buffer_size
= epbh
->size
- info
->v1i
.info_size
;
176 info
->v1i
.frame_max
= FJES_MTU_TO_FRAME_SIZE(mtu
);
177 info
->v1i
.count_max
=
178 EP_RING_NUM(info
->v1i
.buffer_size
, info
->v1i
.frame_max
);
180 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++)
181 info
->v1i
.vlan_id
[i
] = vlan_id
[i
];
183 info
->v1i
.rx_status
|= FJES_RX_MTU_CHANGING_DONE
;
187 fjes_hw_init_command_registers(struct fjes_hw
*hw
,
188 struct fjes_device_command_param
*param
)
190 /* Request Buffer length */
191 wr32(XSCT_REQBL
, (__le32
)(param
->req_len
));
192 /* Response Buffer Length */
193 wr32(XSCT_RESPBL
, (__le32
)(param
->res_len
));
195 /* Request Buffer Address */
197 (__le32
)(param
->req_start
& GENMASK_ULL(31, 0)));
199 (__le32
)((param
->req_start
& GENMASK_ULL(63, 32)) >> 32));
201 /* Response Buffer Address */
203 (__le32
)(param
->res_start
& GENMASK_ULL(31, 0)));
205 (__le32
)((param
->res_start
& GENMASK_ULL(63, 32)) >> 32));
207 /* Share status address */
209 (__le32
)(param
->share_start
& GENMASK_ULL(31, 0)));
211 (__le32
)((param
->share_start
& GENMASK_ULL(63, 32)) >> 32));
214 static int fjes_hw_setup(struct fjes_hw
*hw
)
216 u8 mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
217 struct fjes_device_command_param param
;
218 struct ep_share_mem_info
*buf_pair
;
225 hw
->hw_info
.max_epid
= &hw
->max_epid
;
226 hw
->hw_info
.my_epid
= &hw
->my_epid
;
228 buf
= kcalloc(hw
->max_epid
, sizeof(struct ep_share_mem_info
),
233 hw
->ep_shm_info
= (struct ep_share_mem_info
*)buf
;
235 mem_size
= FJES_DEV_REQ_BUF_SIZE(hw
->max_epid
);
236 hw
->hw_info
.req_buf
= kzalloc(mem_size
, GFP_KERNEL
);
237 if (!(hw
->hw_info
.req_buf
))
240 hw
->hw_info
.req_buf_size
= mem_size
;
242 mem_size
= FJES_DEV_RES_BUF_SIZE(hw
->max_epid
);
243 hw
->hw_info
.res_buf
= kzalloc(mem_size
, GFP_KERNEL
);
244 if (!(hw
->hw_info
.res_buf
))
247 hw
->hw_info
.res_buf_size
= mem_size
;
249 result
= fjes_hw_alloc_shared_status_region(hw
);
253 hw
->hw_info
.buffer_share_bit
= 0;
254 hw
->hw_info
.buffer_unshare_reserve_bit
= 0;
256 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
257 if (epidx
!= hw
->my_epid
) {
258 buf_pair
= &hw
->ep_shm_info
[epidx
];
260 result
= fjes_hw_alloc_epbuf(&buf_pair
->tx
);
264 result
= fjes_hw_alloc_epbuf(&buf_pair
->rx
);
268 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
269 fjes_hw_setup_epbuf(&buf_pair
->tx
, mac
,
270 fjes_support_mtu
[0]);
271 fjes_hw_setup_epbuf(&buf_pair
->rx
, mac
,
272 fjes_support_mtu
[0]);
273 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
277 memset(¶m
, 0, sizeof(param
));
279 param
.req_len
= hw
->hw_info
.req_buf_size
;
280 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
281 param
.res_len
= hw
->hw_info
.res_buf_size
;
282 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
284 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
286 fjes_hw_init_command_registers(hw
, ¶m
);
291 static void fjes_hw_cleanup(struct fjes_hw
*hw
)
295 if (!hw
->ep_shm_info
)
298 fjes_hw_free_shared_status_region(hw
);
300 kfree(hw
->hw_info
.req_buf
);
301 hw
->hw_info
.req_buf
= NULL
;
303 kfree(hw
->hw_info
.res_buf
);
304 hw
->hw_info
.res_buf
= NULL
;
306 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
307 if (epidx
== hw
->my_epid
)
309 fjes_hw_free_epbuf(&hw
->ep_shm_info
[epidx
].tx
);
310 fjes_hw_free_epbuf(&hw
->ep_shm_info
[epidx
].rx
);
313 kfree(hw
->ep_shm_info
);
314 hw
->ep_shm_info
= NULL
;
317 int fjes_hw_init(struct fjes_hw
*hw
)
321 hw
->base
= fjes_hw_iomap(hw
);
325 ret
= fjes_hw_reset(hw
);
329 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
331 INIT_WORK(&hw
->update_zone_task
, fjes_hw_update_zone_task
);
332 INIT_WORK(&hw
->epstop_task
, fjes_hw_epstop_task
);
334 mutex_init(&hw
->hw_info
.lock
);
335 spin_lock_init(&hw
->rx_status_lock
);
337 hw
->max_epid
= fjes_hw_get_max_epid(hw
);
338 hw
->my_epid
= fjes_hw_get_my_epid(hw
);
340 if ((hw
->max_epid
== 0) || (hw
->my_epid
>= hw
->max_epid
))
343 ret
= fjes_hw_setup(hw
);
348 void fjes_hw_exit(struct fjes_hw
*hw
)
353 ret
= fjes_hw_reset(hw
);
355 pr_err("%s: reset error", __func__
);
363 cancel_work_sync(&hw
->update_zone_task
);
364 cancel_work_sync(&hw
->epstop_task
);
367 static enum fjes_dev_command_response_e
368 fjes_hw_issue_request_command(struct fjes_hw
*hw
,
369 enum fjes_dev_command_request_type type
)
371 enum fjes_dev_command_response_e ret
= FJES_CMD_STATUS_UNKNOWN
;
377 cr
.bits
.req_start
= 1;
378 cr
.bits
.req_code
= type
;
379 wr32(XSCT_CR
, cr
.reg
);
380 cr
.reg
= rd32(XSCT_CR
);
382 if (cr
.bits
.error
== 0) {
383 timeout
= FJES_COMMAND_REQ_TIMEOUT
* 1000;
384 cs
.reg
= rd32(XSCT_CS
);
386 while ((cs
.bits
.complete
!= 1) && timeout
> 0) {
388 cs
.reg
= rd32(XSCT_CS
);
392 if (cs
.bits
.complete
== 1)
393 ret
= FJES_CMD_STATUS_NORMAL
;
394 else if (timeout
<= 0)
395 ret
= FJES_CMD_STATUS_TIMEOUT
;
398 switch (cr
.bits
.err_info
) {
399 case FJES_CMD_REQ_ERR_INFO_PARAM
:
400 ret
= FJES_CMD_STATUS_ERROR_PARAM
;
402 case FJES_CMD_REQ_ERR_INFO_STATUS
:
403 ret
= FJES_CMD_STATUS_ERROR_STATUS
;
406 ret
= FJES_CMD_STATUS_UNKNOWN
;
414 int fjes_hw_request_info(struct fjes_hw
*hw
)
416 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
417 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
418 enum fjes_dev_command_response_e ret
;
421 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
422 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
424 req_buf
->info
.length
= FJES_DEV_COMMAND_INFO_REQ_LEN
;
426 res_buf
->info
.length
= 0;
427 res_buf
->info
.code
= 0;
429 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_INFO
);
433 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw
->hw_info
.max_epid
)) !=
434 res_buf
->info
.length
) {
436 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
437 switch (res_buf
->info
.code
) {
438 case FJES_CMD_REQ_RES_CODE_NORMAL
:
447 case FJES_CMD_STATUS_UNKNOWN
:
450 case FJES_CMD_STATUS_TIMEOUT
:
453 case FJES_CMD_STATUS_ERROR_PARAM
:
456 case FJES_CMD_STATUS_ERROR_STATUS
:
468 int fjes_hw_register_buff_addr(struct fjes_hw
*hw
, int dest_epid
,
469 struct ep_share_mem_info
*buf_pair
)
471 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
472 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
473 enum fjes_dev_command_response_e ret
;
480 if (test_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
))
483 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
484 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
486 req_buf
->share_buffer
.length
= FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
489 req_buf
->share_buffer
.epid
= dest_epid
;
492 req_buf
->share_buffer
.buffer
[idx
++] = buf_pair
->tx
.size
;
493 page_count
= buf_pair
->tx
.size
/ EP_BUFFER_INFO_SIZE
;
494 for (i
= 0; i
< page_count
; i
++) {
495 addr
= ((u8
*)(buf_pair
->tx
.buffer
)) +
496 (i
* EP_BUFFER_INFO_SIZE
);
497 req_buf
->share_buffer
.buffer
[idx
++] =
498 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
499 offset_in_page(addr
));
502 req_buf
->share_buffer
.buffer
[idx
++] = buf_pair
->rx
.size
;
503 page_count
= buf_pair
->rx
.size
/ EP_BUFFER_INFO_SIZE
;
504 for (i
= 0; i
< page_count
; i
++) {
505 addr
= ((u8
*)(buf_pair
->rx
.buffer
)) +
506 (i
* EP_BUFFER_INFO_SIZE
);
507 req_buf
->share_buffer
.buffer
[idx
++] =
508 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
509 offset_in_page(addr
));
512 res_buf
->share_buffer
.length
= 0;
513 res_buf
->share_buffer
.code
= 0;
515 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_SHARE_BUFFER
);
517 timeout
= FJES_COMMAND_REQ_BUFF_TIMEOUT
* 1000;
518 while ((ret
== FJES_CMD_STATUS_NORMAL
) &&
519 (res_buf
->share_buffer
.length
==
520 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN
) &&
521 (res_buf
->share_buffer
.code
== FJES_CMD_REQ_RES_CODE_BUSY
) &&
523 msleep(200 + hw
->my_epid
* 20);
524 timeout
-= (200 + hw
->my_epid
* 20);
526 res_buf
->share_buffer
.length
= 0;
527 res_buf
->share_buffer
.code
= 0;
529 ret
= fjes_hw_issue_request_command(
530 hw
, FJES_CMD_REQ_SHARE_BUFFER
);
535 if (res_buf
->share_buffer
.length
!=
536 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN
)
538 else if (ret
== FJES_CMD_STATUS_NORMAL
) {
539 switch (res_buf
->share_buffer
.code
) {
540 case FJES_CMD_REQ_RES_CODE_NORMAL
:
542 set_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
);
544 case FJES_CMD_REQ_RES_CODE_BUSY
:
553 case FJES_CMD_STATUS_UNKNOWN
:
556 case FJES_CMD_STATUS_TIMEOUT
:
559 case FJES_CMD_STATUS_ERROR_PARAM
:
560 case FJES_CMD_STATUS_ERROR_STATUS
:
570 int fjes_hw_unregister_buff_addr(struct fjes_hw
*hw
, int dest_epid
)
572 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
573 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
574 struct fjes_device_shared_info
*share
= hw
->hw_info
.share
;
575 enum fjes_dev_command_response_e ret
;
582 if (!req_buf
|| !res_buf
|| !share
)
585 if (!test_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
))
588 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
589 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
591 req_buf
->unshare_buffer
.length
=
592 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN
;
593 req_buf
->unshare_buffer
.epid
= dest_epid
;
595 res_buf
->unshare_buffer
.length
= 0;
596 res_buf
->unshare_buffer
.code
= 0;
598 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_UNSHARE_BUFFER
);
600 timeout
= FJES_COMMAND_REQ_BUFF_TIMEOUT
* 1000;
601 while ((ret
== FJES_CMD_STATUS_NORMAL
) &&
602 (res_buf
->unshare_buffer
.length
==
603 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN
) &&
604 (res_buf
->unshare_buffer
.code
==
605 FJES_CMD_REQ_RES_CODE_BUSY
) &&
607 msleep(200 + hw
->my_epid
* 20);
608 timeout
-= (200 + hw
->my_epid
* 20);
610 res_buf
->unshare_buffer
.length
= 0;
611 res_buf
->unshare_buffer
.code
= 0;
614 fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_UNSHARE_BUFFER
);
619 if (res_buf
->unshare_buffer
.length
!=
620 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN
) {
622 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
623 switch (res_buf
->unshare_buffer
.code
) {
624 case FJES_CMD_REQ_RES_CODE_NORMAL
:
626 clear_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
);
628 case FJES_CMD_REQ_RES_CODE_BUSY
:
637 case FJES_CMD_STATUS_UNKNOWN
:
640 case FJES_CMD_STATUS_TIMEOUT
:
643 case FJES_CMD_STATUS_ERROR_PARAM
:
644 case FJES_CMD_STATUS_ERROR_STATUS
:
654 int fjes_hw_raise_interrupt(struct fjes_hw
*hw
, int dest_epid
,
655 enum REG_ICTL_MASK mask
)
657 u32 ig
= mask
| dest_epid
;
659 wr32(XSCT_IG
, cpu_to_le32(ig
));
664 u32
fjes_hw_capture_interrupt_status(struct fjes_hw
*hw
)
668 cur_is
= rd32(XSCT_IS
);
673 void fjes_hw_set_irqmask(struct fjes_hw
*hw
,
674 enum REG_ICTL_MASK intr_mask
, bool mask
)
677 wr32(XSCT_IMS
, intr_mask
);
679 wr32(XSCT_IMC
, intr_mask
);
682 bool fjes_hw_epid_is_same_zone(struct fjes_hw
*hw
, int epid
)
684 if (epid
>= hw
->max_epid
)
687 if ((hw
->ep_shm_info
[epid
].es_status
!=
688 FJES_ZONING_STATUS_ENABLE
) ||
689 (hw
->ep_shm_info
[hw
->my_epid
].zone
==
690 FJES_ZONING_ZONE_TYPE_NONE
))
693 return (hw
->ep_shm_info
[epid
].zone
==
694 hw
->ep_shm_info
[hw
->my_epid
].zone
);
697 int fjes_hw_epid_is_shared(struct fjes_device_shared_info
*share
,
702 if (dest_epid
< share
->epnum
)
703 value
= share
->ep_status
[dest_epid
];
708 static bool fjes_hw_epid_is_stop_requested(struct fjes_hw
*hw
, int src_epid
)
710 return test_bit(src_epid
, &hw
->txrx_stop_req_bit
);
713 static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw
*hw
, int src_epid
)
715 return (hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
&
716 FJES_RX_STOP_REQ_DONE
);
719 enum ep_partner_status
720 fjes_hw_get_partner_ep_status(struct fjes_hw
*hw
, int epid
)
722 enum ep_partner_status status
;
724 if (fjes_hw_epid_is_shared(hw
->hw_info
.share
, epid
)) {
725 if (fjes_hw_epid_is_stop_requested(hw
, epid
)) {
726 status
= EP_PARTNER_WAITING
;
728 if (fjes_hw_epid_is_stop_process_done(hw
, epid
))
729 status
= EP_PARTNER_COMPLETE
;
731 status
= EP_PARTNER_SHARED
;
734 status
= EP_PARTNER_UNSHARE
;
740 void fjes_hw_raise_epstop(struct fjes_hw
*hw
)
742 enum ep_partner_status status
;
746 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
747 if (epidx
== hw
->my_epid
)
750 status
= fjes_hw_get_partner_ep_status(hw
, epidx
);
752 case EP_PARTNER_SHARED
:
753 fjes_hw_raise_interrupt(hw
, epidx
,
754 REG_ICTL_MASK_TXRX_STOP_REQ
);
760 set_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
761 set_bit(epidx
, &hw
->txrx_stop_req_bit
);
763 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
764 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
765 FJES_RX_STOP_REQ_REQUEST
;
766 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
770 int fjes_hw_wait_epstop(struct fjes_hw
*hw
)
772 enum ep_partner_status status
;
773 union ep_buffer_info
*info
;
777 while (hw
->hw_info
.buffer_unshare_reserve_bit
&&
778 (wait_time
< FJES_COMMAND_EPSTOP_WAIT_TIMEOUT
* 1000)) {
779 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
780 if (epidx
== hw
->my_epid
)
782 status
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
784 info
= hw
->ep_shm_info
[epidx
].rx
.info
;
786 (info
->v1i
.rx_status
&
787 FJES_RX_STOP_REQ_DONE
)) &&
789 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
791 &hw
->hw_info
.buffer_unshare_reserve_bit
);
799 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
800 if (epidx
== hw
->my_epid
)
802 if (test_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
))
804 &hw
->hw_info
.buffer_unshare_reserve_bit
);
807 return (wait_time
< FJES_COMMAND_EPSTOP_WAIT_TIMEOUT
* 1000)
811 bool fjes_hw_check_epbuf_version(struct epbuf_handler
*epbh
, u32 version
)
813 union ep_buffer_info
*info
= epbh
->info
;
815 return (info
->common
.version
== version
);
818 bool fjes_hw_check_mtu(struct epbuf_handler
*epbh
, u32 mtu
)
820 union ep_buffer_info
*info
= epbh
->info
;
822 return ((info
->v1i
.frame_max
== FJES_MTU_TO_FRAME_SIZE(mtu
)) &&
823 info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
);
826 bool fjes_hw_check_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
828 union ep_buffer_info
*info
= epbh
->info
;
835 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
836 if (vlan_id
== info
->v1i
.vlan_id
[i
]) {
845 bool fjes_hw_set_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
847 union ep_buffer_info
*info
= epbh
->info
;
850 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
851 if (info
->v1i
.vlan_id
[i
] == 0) {
852 info
->v1i
.vlan_id
[i
] = vlan_id
;
859 void fjes_hw_del_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
861 union ep_buffer_info
*info
= epbh
->info
;
865 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
866 if (vlan_id
== info
->v1i
.vlan_id
[i
])
867 info
->v1i
.vlan_id
[i
] = 0;
872 bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler
*epbh
)
874 union ep_buffer_info
*info
= epbh
->info
;
876 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
879 if (info
->v1i
.count_max
== 0)
882 return EP_RING_EMPTY(info
->v1i
.head
, info
->v1i
.tail
,
883 info
->v1i
.count_max
);
886 void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler
*epbh
,
889 union ep_buffer_info
*info
= epbh
->info
;
890 struct esmem_frame
*ring_frame
;
893 ring_frame
= (struct esmem_frame
*)&(epbh
->ring
[EP_RING_INDEX
895 info
->v1i
.count_max
) *
896 info
->v1i
.frame_max
]);
898 *psize
= (size_t)ring_frame
->frame_size
;
900 frame
= ring_frame
->frame_data
;
905 void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler
*epbh
)
907 union ep_buffer_info
*info
= epbh
->info
;
909 if (fjes_hw_epbuf_rx_is_empty(epbh
))
912 EP_RING_INDEX_INC(epbh
->info
->v1i
.head
, info
->v1i
.count_max
);
915 int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler
*epbh
,
916 void *frame
, size_t size
)
918 union ep_buffer_info
*info
= epbh
->info
;
919 struct esmem_frame
*ring_frame
;
921 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
, info
->v1i
.count_max
))
924 ring_frame
= (struct esmem_frame
*)&(epbh
->ring
[EP_RING_INDEX
926 info
->v1i
.count_max
) *
927 info
->v1i
.frame_max
]);
929 ring_frame
->frame_size
= size
;
930 memcpy((void *)(ring_frame
->frame_data
), (void *)frame
, size
);
932 EP_RING_INDEX_INC(epbh
->info
->v1i
.tail
, info
->v1i
.count_max
);
937 static void fjes_hw_update_zone_task(struct work_struct
*work
)
939 struct fjes_hw
*hw
= container_of(work
,
940 struct fjes_hw
, update_zone_task
);
942 struct my_s
{u8 es_status
; u8 zone
; } *info
;
943 union fjes_device_command_res
*res_buf
;
944 enum ep_partner_status pstatus
;
946 struct fjes_adapter
*adapter
;
947 struct net_device
*netdev
;
950 ulong unshare_bit
= 0;
957 adapter
= (struct fjes_adapter
*)hw
->back
;
958 netdev
= adapter
->netdev
;
959 res_buf
= hw
->hw_info
.res_buf
;
960 info
= (struct my_s
*)&res_buf
->info
.info
;
962 mutex_lock(&hw
->hw_info
.lock
);
964 ret
= fjes_hw_request_info(hw
);
969 if (!work_pending(&adapter
->force_close_task
)) {
970 adapter
->force_reset
= true;
971 schedule_work(&adapter
->force_close_task
);
977 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
978 if (epidx
== hw
->my_epid
) {
979 hw
->ep_shm_info
[epidx
].es_status
=
980 info
[epidx
].es_status
;
981 hw
->ep_shm_info
[epidx
].zone
=
986 pstatus
= fjes_hw_get_partner_ep_status(hw
, epidx
);
988 case EP_PARTNER_UNSHARE
:
990 if ((info
[epidx
].zone
!=
991 FJES_ZONING_ZONE_TYPE_NONE
) &&
992 (info
[epidx
].es_status
==
993 FJES_ZONING_STATUS_ENABLE
) &&
995 info
[hw
->my_epid
].zone
))
996 set_bit(epidx
, &share_bit
);
998 set_bit(epidx
, &unshare_bit
);
1001 case EP_PARTNER_COMPLETE
:
1002 case EP_PARTNER_WAITING
:
1003 if ((info
[epidx
].zone
==
1004 FJES_ZONING_ZONE_TYPE_NONE
) ||
1005 (info
[epidx
].es_status
!=
1006 FJES_ZONING_STATUS_ENABLE
) ||
1007 (info
[epidx
].zone
!=
1008 info
[hw
->my_epid
].zone
)) {
1010 &adapter
->unshare_watch_bitmask
);
1012 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1016 case EP_PARTNER_SHARED
:
1017 if ((info
[epidx
].zone
==
1018 FJES_ZONING_ZONE_TYPE_NONE
) ||
1019 (info
[epidx
].es_status
!=
1020 FJES_ZONING_STATUS_ENABLE
) ||
1021 (info
[epidx
].zone
!=
1022 info
[hw
->my_epid
].zone
))
1023 set_bit(epidx
, &irq_bit
);
1027 hw
->ep_shm_info
[epidx
].es_status
=
1028 info
[epidx
].es_status
;
1029 hw
->ep_shm_info
[epidx
].zone
= info
[epidx
].zone
;
1034 mutex_unlock(&hw
->hw_info
.lock
);
1036 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1037 if (epidx
== hw
->my_epid
)
1040 if (test_bit(epidx
, &share_bit
)) {
1041 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1042 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1043 netdev
->dev_addr
, netdev
->mtu
);
1044 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1046 mutex_lock(&hw
->hw_info
.lock
);
1048 ret
= fjes_hw_register_buff_addr(
1049 hw
, epidx
, &hw
->ep_shm_info
[epidx
]);
1057 if (!work_pending(&adapter
->force_close_task
)) {
1058 adapter
->force_reset
= true;
1060 &adapter
->force_close_task
);
1064 mutex_unlock(&hw
->hw_info
.lock
);
1067 if (test_bit(epidx
, &unshare_bit
)) {
1068 mutex_lock(&hw
->hw_info
.lock
);
1070 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1078 if (!work_pending(&adapter
->force_close_task
)) {
1079 adapter
->force_reset
= true;
1081 &adapter
->force_close_task
);
1086 mutex_unlock(&hw
->hw_info
.lock
);
1089 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1090 fjes_hw_setup_epbuf(
1091 &hw
->ep_shm_info
[epidx
].tx
,
1092 netdev
->dev_addr
, netdev
->mtu
);
1093 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1098 if (test_bit(epidx
, &irq_bit
)) {
1099 fjes_hw_raise_interrupt(hw
, epidx
,
1100 REG_ICTL_MASK_TXRX_STOP_REQ
);
1102 set_bit(epidx
, &hw
->txrx_stop_req_bit
);
1103 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1104 hw
->ep_shm_info
[epidx
].tx
.
1105 info
->v1i
.rx_status
|=
1106 FJES_RX_STOP_REQ_REQUEST
;
1107 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1108 set_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1112 if (irq_bit
|| adapter
->unshare_watch_bitmask
) {
1113 if (!work_pending(&adapter
->unshare_watch_task
))
1114 queue_work(adapter
->control_wq
,
1115 &adapter
->unshare_watch_task
);
1119 static void fjes_hw_epstop_task(struct work_struct
*work
)
1121 struct fjes_hw
*hw
= container_of(work
, struct fjes_hw
, epstop_task
);
1122 struct fjes_adapter
*adapter
= (struct fjes_adapter
*)hw
->back
;
1123 unsigned long flags
;
1128 while ((remain_bit
= hw
->epstop_req_bit
)) {
1129 for (epid_bit
= 0; remain_bit
; remain_bit
>>= 1, epid_bit
++) {
1130 if (remain_bit
& 1) {
1131 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1132 hw
->ep_shm_info
[epid_bit
].
1133 tx
.info
->v1i
.rx_status
|=
1134 FJES_RX_STOP_REQ_DONE
;
1135 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1138 clear_bit(epid_bit
, &hw
->epstop_req_bit
);
1140 &adapter
->unshare_watch_bitmask
);
1142 if (!work_pending(&adapter
->unshare_watch_task
))
1144 adapter
->control_wq
,
1145 &adapter
->unshare_watch_task
);