2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
24 #include "fjes_trace.h"
26 static void fjes_hw_update_zone_task(struct work_struct
*);
27 static void fjes_hw_epstop_task(struct work_struct
*);
29 /* supported MTU list */
30 const u32 fjes_support_mtu
[] = {
31 FJES_MTU_DEFINE(8 * 1024),
32 FJES_MTU_DEFINE(16 * 1024),
33 FJES_MTU_DEFINE(32 * 1024),
34 FJES_MTU_DEFINE(64 * 1024),
38 u32
fjes_hw_rd32(struct fjes_hw
*hw
, u32 reg
)
43 value
= readl(&base
[reg
]);
48 static u8
*fjes_hw_iomap(struct fjes_hw
*hw
)
52 if (!request_mem_region(hw
->hw_res
.start
, hw
->hw_res
.size
,
54 pr_err("request_mem_region failed\n");
58 base
= (u8
*)ioremap_nocache(hw
->hw_res
.start
, hw
->hw_res
.size
);
63 static void fjes_hw_iounmap(struct fjes_hw
*hw
)
66 release_mem_region(hw
->hw_res
.start
, hw
->hw_res
.size
);
69 int fjes_hw_reset(struct fjes_hw
*hw
)
76 wr32(XSCT_DCTL
, dctl
.reg
);
78 timeout
= FJES_DEVICE_RESET_TIMEOUT
* 1000;
79 dctl
.reg
= rd32(XSCT_DCTL
);
80 while ((dctl
.bits
.reset
== 1) && (timeout
> 0)) {
82 dctl
.reg
= rd32(XSCT_DCTL
);
86 return timeout
> 0 ? 0 : -EIO
;
89 static int fjes_hw_get_max_epid(struct fjes_hw
*hw
)
91 union REG_MAX_EP info
;
93 info
.reg
= rd32(XSCT_MAX_EP
);
95 return info
.bits
.maxep
;
98 static int fjes_hw_get_my_epid(struct fjes_hw
*hw
)
100 union REG_OWNER_EPID info
;
102 info
.reg
= rd32(XSCT_OWNER_EPID
);
104 return info
.bits
.epid
;
107 static int fjes_hw_alloc_shared_status_region(struct fjes_hw
*hw
)
111 size
= sizeof(struct fjes_device_shared_info
) +
112 (sizeof(u8
) * hw
->max_epid
);
113 hw
->hw_info
.share
= kzalloc(size
, GFP_KERNEL
);
114 if (!hw
->hw_info
.share
)
117 hw
->hw_info
.share
->epnum
= hw
->max_epid
;
122 static void fjes_hw_free_shared_status_region(struct fjes_hw
*hw
)
124 kfree(hw
->hw_info
.share
);
125 hw
->hw_info
.share
= NULL
;
128 static int fjes_hw_alloc_epbuf(struct epbuf_handler
*epbh
)
132 mem
= vzalloc(EP_BUFFER_SIZE
);
137 epbh
->size
= EP_BUFFER_SIZE
;
139 epbh
->info
= (union ep_buffer_info
*)mem
;
140 epbh
->ring
= (u8
*)(mem
+ sizeof(union ep_buffer_info
));
145 static void fjes_hw_free_epbuf(struct epbuf_handler
*epbh
)
155 void fjes_hw_setup_epbuf(struct epbuf_handler
*epbh
, u8
*mac_addr
, u32 mtu
)
157 union ep_buffer_info
*info
= epbh
->info
;
158 u16 vlan_id
[EP_BUFFER_SUPPORT_VLAN_MAX
];
161 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++)
162 vlan_id
[i
] = info
->v1i
.vlan_id
[i
];
164 memset(info
, 0, sizeof(union ep_buffer_info
));
166 info
->v1i
.version
= 0; /* version 0 */
168 for (i
= 0; i
< ETH_ALEN
; i
++)
169 info
->v1i
.mac_addr
[i
] = mac_addr
[i
];
174 info
->v1i
.info_size
= sizeof(union ep_buffer_info
);
175 info
->v1i
.buffer_size
= epbh
->size
- info
->v1i
.info_size
;
177 info
->v1i
.frame_max
= FJES_MTU_TO_FRAME_SIZE(mtu
);
178 info
->v1i
.count_max
=
179 EP_RING_NUM(info
->v1i
.buffer_size
, info
->v1i
.frame_max
);
181 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++)
182 info
->v1i
.vlan_id
[i
] = vlan_id
[i
];
184 info
->v1i
.rx_status
|= FJES_RX_MTU_CHANGING_DONE
;
188 fjes_hw_init_command_registers(struct fjes_hw
*hw
,
189 struct fjes_device_command_param
*param
)
191 /* Request Buffer length */
192 wr32(XSCT_REQBL
, (__le32
)(param
->req_len
));
193 /* Response Buffer Length */
194 wr32(XSCT_RESPBL
, (__le32
)(param
->res_len
));
196 /* Request Buffer Address */
198 (__le32
)(param
->req_start
& GENMASK_ULL(31, 0)));
200 (__le32
)((param
->req_start
& GENMASK_ULL(63, 32)) >> 32));
202 /* Response Buffer Address */
204 (__le32
)(param
->res_start
& GENMASK_ULL(31, 0)));
206 (__le32
)((param
->res_start
& GENMASK_ULL(63, 32)) >> 32));
208 /* Share status address */
210 (__le32
)(param
->share_start
& GENMASK_ULL(31, 0)));
212 (__le32
)((param
->share_start
& GENMASK_ULL(63, 32)) >> 32));
215 static int fjes_hw_setup(struct fjes_hw
*hw
)
217 u8 mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
218 struct fjes_device_command_param param
;
219 struct ep_share_mem_info
*buf_pair
;
226 hw
->hw_info
.max_epid
= &hw
->max_epid
;
227 hw
->hw_info
.my_epid
= &hw
->my_epid
;
229 buf
= kcalloc(hw
->max_epid
, sizeof(struct ep_share_mem_info
),
234 hw
->ep_shm_info
= (struct ep_share_mem_info
*)buf
;
236 mem_size
= FJES_DEV_REQ_BUF_SIZE(hw
->max_epid
);
237 hw
->hw_info
.req_buf
= kzalloc(mem_size
, GFP_KERNEL
);
238 if (!(hw
->hw_info
.req_buf
))
241 hw
->hw_info
.req_buf_size
= mem_size
;
243 mem_size
= FJES_DEV_RES_BUF_SIZE(hw
->max_epid
);
244 hw
->hw_info
.res_buf
= kzalloc(mem_size
, GFP_KERNEL
);
245 if (!(hw
->hw_info
.res_buf
))
248 hw
->hw_info
.res_buf_size
= mem_size
;
250 result
= fjes_hw_alloc_shared_status_region(hw
);
254 hw
->hw_info
.buffer_share_bit
= 0;
255 hw
->hw_info
.buffer_unshare_reserve_bit
= 0;
257 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
258 if (epidx
!= hw
->my_epid
) {
259 buf_pair
= &hw
->ep_shm_info
[epidx
];
261 result
= fjes_hw_alloc_epbuf(&buf_pair
->tx
);
265 result
= fjes_hw_alloc_epbuf(&buf_pair
->rx
);
269 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
270 fjes_hw_setup_epbuf(&buf_pair
->tx
, mac
,
271 fjes_support_mtu
[0]);
272 fjes_hw_setup_epbuf(&buf_pair
->rx
, mac
,
273 fjes_support_mtu
[0]);
274 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
278 memset(¶m
, 0, sizeof(param
));
280 param
.req_len
= hw
->hw_info
.req_buf_size
;
281 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
282 param
.res_len
= hw
->hw_info
.res_buf_size
;
283 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
285 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
287 fjes_hw_init_command_registers(hw
, ¶m
);
292 static void fjes_hw_cleanup(struct fjes_hw
*hw
)
296 if (!hw
->ep_shm_info
)
299 fjes_hw_free_shared_status_region(hw
);
301 kfree(hw
->hw_info
.req_buf
);
302 hw
->hw_info
.req_buf
= NULL
;
304 kfree(hw
->hw_info
.res_buf
);
305 hw
->hw_info
.res_buf
= NULL
;
307 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
308 if (epidx
== hw
->my_epid
)
310 fjes_hw_free_epbuf(&hw
->ep_shm_info
[epidx
].tx
);
311 fjes_hw_free_epbuf(&hw
->ep_shm_info
[epidx
].rx
);
314 kfree(hw
->ep_shm_info
);
315 hw
->ep_shm_info
= NULL
;
318 int fjes_hw_init(struct fjes_hw
*hw
)
322 hw
->base
= fjes_hw_iomap(hw
);
326 ret
= fjes_hw_reset(hw
);
330 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
332 INIT_WORK(&hw
->update_zone_task
, fjes_hw_update_zone_task
);
333 INIT_WORK(&hw
->epstop_task
, fjes_hw_epstop_task
);
335 mutex_init(&hw
->hw_info
.lock
);
336 spin_lock_init(&hw
->rx_status_lock
);
338 hw
->max_epid
= fjes_hw_get_max_epid(hw
);
339 hw
->my_epid
= fjes_hw_get_my_epid(hw
);
341 if ((hw
->max_epid
== 0) || (hw
->my_epid
>= hw
->max_epid
))
344 ret
= fjes_hw_setup(hw
);
346 hw
->hw_info
.trace
= vzalloc(FJES_DEBUG_BUFFER_SIZE
);
347 hw
->hw_info
.trace_size
= FJES_DEBUG_BUFFER_SIZE
;
352 void fjes_hw_exit(struct fjes_hw
*hw
)
358 if (hw
->debug_mode
) {
359 /* disable debug mode */
360 mutex_lock(&hw
->hw_info
.lock
);
361 fjes_hw_stop_debug(hw
);
362 mutex_unlock(&hw
->hw_info
.lock
);
364 vfree(hw
->hw_info
.trace
);
365 hw
->hw_info
.trace
= NULL
;
366 hw
->hw_info
.trace_size
= 0;
369 ret
= fjes_hw_reset(hw
);
371 pr_err("%s: reset error", __func__
);
379 cancel_work_sync(&hw
->update_zone_task
);
380 cancel_work_sync(&hw
->epstop_task
);
383 static enum fjes_dev_command_response_e
384 fjes_hw_issue_request_command(struct fjes_hw
*hw
,
385 enum fjes_dev_command_request_type type
)
387 enum fjes_dev_command_response_e ret
= FJES_CMD_STATUS_UNKNOWN
;
390 int timeout
= FJES_COMMAND_REQ_TIMEOUT
* 1000;
393 cr
.bits
.req_start
= 1;
394 cr
.bits
.req_code
= type
;
395 wr32(XSCT_CR
, cr
.reg
);
396 cr
.reg
= rd32(XSCT_CR
);
398 if (cr
.bits
.error
== 0) {
399 timeout
= FJES_COMMAND_REQ_TIMEOUT
* 1000;
400 cs
.reg
= rd32(XSCT_CS
);
402 while ((cs
.bits
.complete
!= 1) && timeout
> 0) {
404 cs
.reg
= rd32(XSCT_CS
);
408 if (cs
.bits
.complete
== 1)
409 ret
= FJES_CMD_STATUS_NORMAL
;
410 else if (timeout
<= 0)
411 ret
= FJES_CMD_STATUS_TIMEOUT
;
414 switch (cr
.bits
.err_info
) {
415 case FJES_CMD_REQ_ERR_INFO_PARAM
:
416 ret
= FJES_CMD_STATUS_ERROR_PARAM
;
418 case FJES_CMD_REQ_ERR_INFO_STATUS
:
419 ret
= FJES_CMD_STATUS_ERROR_STATUS
;
422 ret
= FJES_CMD_STATUS_UNKNOWN
;
427 trace_fjes_hw_issue_request_command(&cr
, &cs
, timeout
, ret
);
432 int fjes_hw_request_info(struct fjes_hw
*hw
)
434 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
435 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
436 enum fjes_dev_command_response_e ret
;
439 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
440 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
442 req_buf
->info
.length
= FJES_DEV_COMMAND_INFO_REQ_LEN
;
444 res_buf
->info
.length
= 0;
445 res_buf
->info
.code
= 0;
447 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_INFO
);
448 trace_fjes_hw_request_info(hw
, res_buf
);
452 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw
->hw_info
.max_epid
)) !=
453 res_buf
->info
.length
) {
454 trace_fjes_hw_request_info_err("Invalid res_buf");
456 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
457 switch (res_buf
->info
.code
) {
458 case FJES_CMD_REQ_RES_CODE_NORMAL
:
467 case FJES_CMD_STATUS_UNKNOWN
:
470 case FJES_CMD_STATUS_TIMEOUT
:
471 trace_fjes_hw_request_info_err("Timeout");
474 case FJES_CMD_STATUS_ERROR_PARAM
:
477 case FJES_CMD_STATUS_ERROR_STATUS
:
489 int fjes_hw_register_buff_addr(struct fjes_hw
*hw
, int dest_epid
,
490 struct ep_share_mem_info
*buf_pair
)
492 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
493 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
494 enum fjes_dev_command_response_e ret
;
501 if (test_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
))
504 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
505 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
507 req_buf
->share_buffer
.length
= FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
510 req_buf
->share_buffer
.epid
= dest_epid
;
513 req_buf
->share_buffer
.buffer
[idx
++] = buf_pair
->tx
.size
;
514 page_count
= buf_pair
->tx
.size
/ EP_BUFFER_INFO_SIZE
;
515 for (i
= 0; i
< page_count
; i
++) {
516 addr
= ((u8
*)(buf_pair
->tx
.buffer
)) +
517 (i
* EP_BUFFER_INFO_SIZE
);
518 req_buf
->share_buffer
.buffer
[idx
++] =
519 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
520 offset_in_page(addr
));
523 req_buf
->share_buffer
.buffer
[idx
++] = buf_pair
->rx
.size
;
524 page_count
= buf_pair
->rx
.size
/ EP_BUFFER_INFO_SIZE
;
525 for (i
= 0; i
< page_count
; i
++) {
526 addr
= ((u8
*)(buf_pair
->rx
.buffer
)) +
527 (i
* EP_BUFFER_INFO_SIZE
);
528 req_buf
->share_buffer
.buffer
[idx
++] =
529 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
530 offset_in_page(addr
));
533 res_buf
->share_buffer
.length
= 0;
534 res_buf
->share_buffer
.code
= 0;
536 trace_fjes_hw_register_buff_addr_req(req_buf
, buf_pair
);
538 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_SHARE_BUFFER
);
540 timeout
= FJES_COMMAND_REQ_BUFF_TIMEOUT
* 1000;
541 while ((ret
== FJES_CMD_STATUS_NORMAL
) &&
542 (res_buf
->share_buffer
.length
==
543 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN
) &&
544 (res_buf
->share_buffer
.code
== FJES_CMD_REQ_RES_CODE_BUSY
) &&
546 msleep(200 + hw
->my_epid
* 20);
547 timeout
-= (200 + hw
->my_epid
* 20);
549 res_buf
->share_buffer
.length
= 0;
550 res_buf
->share_buffer
.code
= 0;
552 ret
= fjes_hw_issue_request_command(
553 hw
, FJES_CMD_REQ_SHARE_BUFFER
);
558 trace_fjes_hw_register_buff_addr(res_buf
, timeout
);
560 if (res_buf
->share_buffer
.length
!=
561 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN
) {
562 trace_fjes_hw_register_buff_addr_err("Invalid res_buf");
564 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
565 switch (res_buf
->share_buffer
.code
) {
566 case FJES_CMD_REQ_RES_CODE_NORMAL
:
568 set_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
);
570 case FJES_CMD_REQ_RES_CODE_BUSY
:
571 trace_fjes_hw_register_buff_addr_err("Busy Timeout");
580 case FJES_CMD_STATUS_UNKNOWN
:
583 case FJES_CMD_STATUS_TIMEOUT
:
584 trace_fjes_hw_register_buff_addr_err("Timeout");
587 case FJES_CMD_STATUS_ERROR_PARAM
:
588 case FJES_CMD_STATUS_ERROR_STATUS
:
598 int fjes_hw_unregister_buff_addr(struct fjes_hw
*hw
, int dest_epid
)
600 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
601 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
602 struct fjes_device_shared_info
*share
= hw
->hw_info
.share
;
603 enum fjes_dev_command_response_e ret
;
610 if (!req_buf
|| !res_buf
|| !share
)
613 if (!test_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
))
616 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
617 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
619 req_buf
->unshare_buffer
.length
=
620 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN
;
621 req_buf
->unshare_buffer
.epid
= dest_epid
;
623 res_buf
->unshare_buffer
.length
= 0;
624 res_buf
->unshare_buffer
.code
= 0;
626 trace_fjes_hw_unregister_buff_addr_req(req_buf
);
627 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_UNSHARE_BUFFER
);
629 timeout
= FJES_COMMAND_REQ_BUFF_TIMEOUT
* 1000;
630 while ((ret
== FJES_CMD_STATUS_NORMAL
) &&
631 (res_buf
->unshare_buffer
.length
==
632 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN
) &&
633 (res_buf
->unshare_buffer
.code
==
634 FJES_CMD_REQ_RES_CODE_BUSY
) &&
636 msleep(200 + hw
->my_epid
* 20);
637 timeout
-= (200 + hw
->my_epid
* 20);
639 res_buf
->unshare_buffer
.length
= 0;
640 res_buf
->unshare_buffer
.code
= 0;
643 fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_UNSHARE_BUFFER
);
648 trace_fjes_hw_unregister_buff_addr(res_buf
, timeout
);
650 if (res_buf
->unshare_buffer
.length
!=
651 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN
) {
652 trace_fjes_hw_unregister_buff_addr_err("Invalid res_buf");
654 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
655 switch (res_buf
->unshare_buffer
.code
) {
656 case FJES_CMD_REQ_RES_CODE_NORMAL
:
658 clear_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
);
660 case FJES_CMD_REQ_RES_CODE_BUSY
:
661 trace_fjes_hw_unregister_buff_addr_err("Busy Timeout");
670 case FJES_CMD_STATUS_UNKNOWN
:
673 case FJES_CMD_STATUS_TIMEOUT
:
674 trace_fjes_hw_unregister_buff_addr_err("Timeout");
677 case FJES_CMD_STATUS_ERROR_PARAM
:
678 case FJES_CMD_STATUS_ERROR_STATUS
:
688 int fjes_hw_raise_interrupt(struct fjes_hw
*hw
, int dest_epid
,
689 enum REG_ICTL_MASK mask
)
691 u32 ig
= mask
| dest_epid
;
693 wr32(XSCT_IG
, cpu_to_le32(ig
));
698 u32
fjes_hw_capture_interrupt_status(struct fjes_hw
*hw
)
702 cur_is
= rd32(XSCT_IS
);
707 void fjes_hw_set_irqmask(struct fjes_hw
*hw
,
708 enum REG_ICTL_MASK intr_mask
, bool mask
)
711 wr32(XSCT_IMS
, intr_mask
);
713 wr32(XSCT_IMC
, intr_mask
);
716 bool fjes_hw_epid_is_same_zone(struct fjes_hw
*hw
, int epid
)
718 if (epid
>= hw
->max_epid
)
721 if ((hw
->ep_shm_info
[epid
].es_status
!=
722 FJES_ZONING_STATUS_ENABLE
) ||
723 (hw
->ep_shm_info
[hw
->my_epid
].zone
==
724 FJES_ZONING_ZONE_TYPE_NONE
))
727 return (hw
->ep_shm_info
[epid
].zone
==
728 hw
->ep_shm_info
[hw
->my_epid
].zone
);
731 int fjes_hw_epid_is_shared(struct fjes_device_shared_info
*share
,
736 if (dest_epid
< share
->epnum
)
737 value
= share
->ep_status
[dest_epid
];
742 static bool fjes_hw_epid_is_stop_requested(struct fjes_hw
*hw
, int src_epid
)
744 return test_bit(src_epid
, &hw
->txrx_stop_req_bit
);
747 static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw
*hw
, int src_epid
)
749 return (hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
&
750 FJES_RX_STOP_REQ_DONE
);
753 enum ep_partner_status
754 fjes_hw_get_partner_ep_status(struct fjes_hw
*hw
, int epid
)
756 enum ep_partner_status status
;
758 if (fjes_hw_epid_is_shared(hw
->hw_info
.share
, epid
)) {
759 if (fjes_hw_epid_is_stop_requested(hw
, epid
)) {
760 status
= EP_PARTNER_WAITING
;
762 if (fjes_hw_epid_is_stop_process_done(hw
, epid
))
763 status
= EP_PARTNER_COMPLETE
;
765 status
= EP_PARTNER_SHARED
;
768 status
= EP_PARTNER_UNSHARE
;
774 void fjes_hw_raise_epstop(struct fjes_hw
*hw
)
776 enum ep_partner_status status
;
780 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
781 if (epidx
== hw
->my_epid
)
784 status
= fjes_hw_get_partner_ep_status(hw
, epidx
);
786 case EP_PARTNER_SHARED
:
787 fjes_hw_raise_interrupt(hw
, epidx
,
788 REG_ICTL_MASK_TXRX_STOP_REQ
);
789 hw
->ep_shm_info
[epidx
].ep_stats
.send_intr_unshare
+= 1;
795 set_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
796 set_bit(epidx
, &hw
->txrx_stop_req_bit
);
798 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
799 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
800 FJES_RX_STOP_REQ_REQUEST
;
801 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
805 int fjes_hw_wait_epstop(struct fjes_hw
*hw
)
807 enum ep_partner_status status
;
808 union ep_buffer_info
*info
;
812 while (hw
->hw_info
.buffer_unshare_reserve_bit
&&
813 (wait_time
< FJES_COMMAND_EPSTOP_WAIT_TIMEOUT
* 1000)) {
814 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
815 if (epidx
== hw
->my_epid
)
817 status
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
819 info
= hw
->ep_shm_info
[epidx
].rx
.info
;
821 (info
->v1i
.rx_status
&
822 FJES_RX_STOP_REQ_DONE
)) &&
824 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
826 &hw
->hw_info
.buffer_unshare_reserve_bit
);
834 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
835 if (epidx
== hw
->my_epid
)
837 if (test_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
))
839 &hw
->hw_info
.buffer_unshare_reserve_bit
);
842 return (wait_time
< FJES_COMMAND_EPSTOP_WAIT_TIMEOUT
* 1000)
846 bool fjes_hw_check_epbuf_version(struct epbuf_handler
*epbh
, u32 version
)
848 union ep_buffer_info
*info
= epbh
->info
;
850 return (info
->common
.version
== version
);
853 bool fjes_hw_check_mtu(struct epbuf_handler
*epbh
, u32 mtu
)
855 union ep_buffer_info
*info
= epbh
->info
;
857 return ((info
->v1i
.frame_max
== FJES_MTU_TO_FRAME_SIZE(mtu
)) &&
858 info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
);
861 bool fjes_hw_check_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
863 union ep_buffer_info
*info
= epbh
->info
;
870 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
871 if (vlan_id
== info
->v1i
.vlan_id
[i
]) {
880 bool fjes_hw_set_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
882 union ep_buffer_info
*info
= epbh
->info
;
885 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
886 if (info
->v1i
.vlan_id
[i
] == 0) {
887 info
->v1i
.vlan_id
[i
] = vlan_id
;
894 void fjes_hw_del_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
896 union ep_buffer_info
*info
= epbh
->info
;
900 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
901 if (vlan_id
== info
->v1i
.vlan_id
[i
])
902 info
->v1i
.vlan_id
[i
] = 0;
907 bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler
*epbh
)
909 union ep_buffer_info
*info
= epbh
->info
;
911 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
914 if (info
->v1i
.count_max
== 0)
917 return EP_RING_EMPTY(info
->v1i
.head
, info
->v1i
.tail
,
918 info
->v1i
.count_max
);
921 void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler
*epbh
,
924 union ep_buffer_info
*info
= epbh
->info
;
925 struct esmem_frame
*ring_frame
;
928 ring_frame
= (struct esmem_frame
*)&(epbh
->ring
[EP_RING_INDEX
930 info
->v1i
.count_max
) *
931 info
->v1i
.frame_max
]);
933 *psize
= (size_t)ring_frame
->frame_size
;
935 frame
= ring_frame
->frame_data
;
940 void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler
*epbh
)
942 union ep_buffer_info
*info
= epbh
->info
;
944 if (fjes_hw_epbuf_rx_is_empty(epbh
))
947 EP_RING_INDEX_INC(epbh
->info
->v1i
.head
, info
->v1i
.count_max
);
950 int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler
*epbh
,
951 void *frame
, size_t size
)
953 union ep_buffer_info
*info
= epbh
->info
;
954 struct esmem_frame
*ring_frame
;
956 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
, info
->v1i
.count_max
))
959 ring_frame
= (struct esmem_frame
*)&(epbh
->ring
[EP_RING_INDEX
961 info
->v1i
.count_max
) *
962 info
->v1i
.frame_max
]);
964 ring_frame
->frame_size
= size
;
965 memcpy((void *)(ring_frame
->frame_data
), (void *)frame
, size
);
967 EP_RING_INDEX_INC(epbh
->info
->v1i
.tail
, info
->v1i
.count_max
);
972 static void fjes_hw_update_zone_task(struct work_struct
*work
)
974 struct fjes_hw
*hw
= container_of(work
,
975 struct fjes_hw
, update_zone_task
);
977 struct my_s
{u8 es_status
; u8 zone
; } *info
;
978 union fjes_device_command_res
*res_buf
;
979 enum ep_partner_status pstatus
;
981 struct fjes_adapter
*adapter
;
982 struct net_device
*netdev
;
985 ulong unshare_bit
= 0;
992 adapter
= (struct fjes_adapter
*)hw
->back
;
993 netdev
= adapter
->netdev
;
994 res_buf
= hw
->hw_info
.res_buf
;
995 info
= (struct my_s
*)&res_buf
->info
.info
;
997 mutex_lock(&hw
->hw_info
.lock
);
999 ret
= fjes_hw_request_info(hw
);
1004 if (!work_pending(&adapter
->force_close_task
)) {
1005 adapter
->force_reset
= true;
1006 schedule_work(&adapter
->force_close_task
);
1012 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1013 if (epidx
== hw
->my_epid
) {
1014 hw
->ep_shm_info
[epidx
].es_status
=
1015 info
[epidx
].es_status
;
1016 hw
->ep_shm_info
[epidx
].zone
=
1021 pstatus
= fjes_hw_get_partner_ep_status(hw
, epidx
);
1023 case EP_PARTNER_UNSHARE
:
1025 if ((info
[epidx
].zone
!=
1026 FJES_ZONING_ZONE_TYPE_NONE
) &&
1027 (info
[epidx
].es_status
==
1028 FJES_ZONING_STATUS_ENABLE
) &&
1029 (info
[epidx
].zone
==
1030 info
[hw
->my_epid
].zone
))
1031 set_bit(epidx
, &share_bit
);
1033 set_bit(epidx
, &unshare_bit
);
1036 case EP_PARTNER_COMPLETE
:
1037 case EP_PARTNER_WAITING
:
1038 if ((info
[epidx
].zone
==
1039 FJES_ZONING_ZONE_TYPE_NONE
) ||
1040 (info
[epidx
].es_status
!=
1041 FJES_ZONING_STATUS_ENABLE
) ||
1042 (info
[epidx
].zone
!=
1043 info
[hw
->my_epid
].zone
)) {
1045 &adapter
->unshare_watch_bitmask
);
1047 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1051 case EP_PARTNER_SHARED
:
1052 if ((info
[epidx
].zone
==
1053 FJES_ZONING_ZONE_TYPE_NONE
) ||
1054 (info
[epidx
].es_status
!=
1055 FJES_ZONING_STATUS_ENABLE
) ||
1056 (info
[epidx
].zone
!=
1057 info
[hw
->my_epid
].zone
))
1058 set_bit(epidx
, &irq_bit
);
1062 hw
->ep_shm_info
[epidx
].es_status
=
1063 info
[epidx
].es_status
;
1064 hw
->ep_shm_info
[epidx
].zone
= info
[epidx
].zone
;
1069 mutex_unlock(&hw
->hw_info
.lock
);
1071 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1072 if (epidx
== hw
->my_epid
)
1075 if (test_bit(epidx
, &share_bit
)) {
1076 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1077 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1078 netdev
->dev_addr
, netdev
->mtu
);
1079 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1081 mutex_lock(&hw
->hw_info
.lock
);
1083 ret
= fjes_hw_register_buff_addr(
1084 hw
, epidx
, &hw
->ep_shm_info
[epidx
]);
1092 if (!work_pending(&adapter
->force_close_task
)) {
1093 adapter
->force_reset
= true;
1095 &adapter
->force_close_task
);
1099 mutex_unlock(&hw
->hw_info
.lock
);
1101 hw
->ep_shm_info
[epidx
].ep_stats
1102 .com_regist_buf_exec
+= 1;
1105 if (test_bit(epidx
, &unshare_bit
)) {
1106 mutex_lock(&hw
->hw_info
.lock
);
1108 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1116 if (!work_pending(&adapter
->force_close_task
)) {
1117 adapter
->force_reset
= true;
1119 &adapter
->force_close_task
);
1124 mutex_unlock(&hw
->hw_info
.lock
);
1126 hw
->ep_shm_info
[epidx
].ep_stats
1127 .com_unregist_buf_exec
+= 1;
1130 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1131 fjes_hw_setup_epbuf(
1132 &hw
->ep_shm_info
[epidx
].tx
,
1133 netdev
->dev_addr
, netdev
->mtu
);
1134 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1139 if (test_bit(epidx
, &irq_bit
)) {
1140 fjes_hw_raise_interrupt(hw
, epidx
,
1141 REG_ICTL_MASK_TXRX_STOP_REQ
);
1143 hw
->ep_shm_info
[epidx
].ep_stats
.send_intr_unshare
+= 1;
1145 set_bit(epidx
, &hw
->txrx_stop_req_bit
);
1146 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1147 hw
->ep_shm_info
[epidx
].tx
.
1148 info
->v1i
.rx_status
|=
1149 FJES_RX_STOP_REQ_REQUEST
;
1150 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1151 set_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1155 if (irq_bit
|| adapter
->unshare_watch_bitmask
) {
1156 if (!work_pending(&adapter
->unshare_watch_task
))
1157 queue_work(adapter
->control_wq
,
1158 &adapter
->unshare_watch_task
);
1162 static void fjes_hw_epstop_task(struct work_struct
*work
)
1164 struct fjes_hw
*hw
= container_of(work
, struct fjes_hw
, epstop_task
);
1165 struct fjes_adapter
*adapter
= (struct fjes_adapter
*)hw
->back
;
1166 unsigned long flags
;
1171 while ((remain_bit
= hw
->epstop_req_bit
)) {
1172 for (epid_bit
= 0; remain_bit
; remain_bit
>>= 1, epid_bit
++) {
1173 if (remain_bit
& 1) {
1174 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1175 hw
->ep_shm_info
[epid_bit
].
1176 tx
.info
->v1i
.rx_status
|=
1177 FJES_RX_STOP_REQ_DONE
;
1178 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1181 clear_bit(epid_bit
, &hw
->epstop_req_bit
);
1183 &adapter
->unshare_watch_bitmask
);
1185 if (!work_pending(&adapter
->unshare_watch_task
))
1187 adapter
->control_wq
,
1188 &adapter
->unshare_watch_task
);
1194 int fjes_hw_start_debug(struct fjes_hw
*hw
)
1196 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
1197 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
1198 enum fjes_dev_command_response_e ret
;
1204 if (!hw
->hw_info
.trace
)
1206 memset(hw
->hw_info
.trace
, 0, FJES_DEBUG_BUFFER_SIZE
);
1208 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
1209 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
1211 req_buf
->start_trace
.length
=
1212 FJES_DEV_COMMAND_START_DBG_REQ_LEN(hw
->hw_info
.trace_size
);
1213 req_buf
->start_trace
.mode
= hw
->debug_mode
;
1214 req_buf
->start_trace
.buffer_len
= hw
->hw_info
.trace_size
;
1215 page_count
= hw
->hw_info
.trace_size
/ FJES_DEBUG_PAGE_SIZE
;
1216 for (i
= 0; i
< page_count
; i
++) {
1217 addr
= ((u8
*)hw
->hw_info
.trace
) + i
* FJES_DEBUG_PAGE_SIZE
;
1218 req_buf
->start_trace
.buffer
[i
] =
1219 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
1220 offset_in_page(addr
));
1223 res_buf
->start_trace
.length
= 0;
1224 res_buf
->start_trace
.code
= 0;
1226 trace_fjes_hw_start_debug_req(req_buf
);
1227 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_START_DEBUG
);
1228 trace_fjes_hw_start_debug(res_buf
);
1230 if (res_buf
->start_trace
.length
!=
1231 FJES_DEV_COMMAND_START_DBG_RES_LEN
) {
1233 trace_fjes_hw_start_debug_err("Invalid res_buf");
1234 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
1235 switch (res_buf
->start_trace
.code
) {
1236 case FJES_CMD_REQ_RES_CODE_NORMAL
:
1245 case FJES_CMD_STATUS_UNKNOWN
:
1248 case FJES_CMD_STATUS_TIMEOUT
:
1249 trace_fjes_hw_start_debug_err("Busy Timeout");
1252 case FJES_CMD_STATUS_ERROR_PARAM
:
1253 case FJES_CMD_STATUS_ERROR_STATUS
:
1263 int fjes_hw_stop_debug(struct fjes_hw
*hw
)
1265 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
1266 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
1267 enum fjes_dev_command_response_e ret
;
1270 if (!hw
->hw_info
.trace
)
1273 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
1274 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
1275 req_buf
->stop_trace
.length
= FJES_DEV_COMMAND_STOP_DBG_REQ_LEN
;
1277 res_buf
->stop_trace
.length
= 0;
1278 res_buf
->stop_trace
.code
= 0;
1280 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_STOP_DEBUG
);
1281 trace_fjes_hw_stop_debug(res_buf
);
1283 if (res_buf
->stop_trace
.length
!= FJES_DEV_COMMAND_STOP_DBG_RES_LEN
) {
1284 trace_fjes_hw_stop_debug_err("Invalid res_buf");
1286 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
1287 switch (res_buf
->stop_trace
.code
) {
1288 case FJES_CMD_REQ_RES_CODE_NORMAL
:
1298 case FJES_CMD_STATUS_UNKNOWN
:
1301 case FJES_CMD_STATUS_TIMEOUT
:
1303 trace_fjes_hw_stop_debug_err("Busy Timeout");
1305 case FJES_CMD_STATUS_ERROR_PARAM
:
1306 case FJES_CMD_STATUS_ERROR_STATUS
: