dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / net / fjes / fjes_hw.c
blob9c652c04375bf562632fcda5729809186fd22e99
1 /*
2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
22 #include "fjes_hw.h"
23 #include "fjes.h"
24 #include "fjes_trace.h"
26 static void fjes_hw_update_zone_task(struct work_struct *);
27 static void fjes_hw_epstop_task(struct work_struct *);
29 /* supported MTU list */
30 const u32 fjes_support_mtu[] = {
31 FJES_MTU_DEFINE(8 * 1024),
32 FJES_MTU_DEFINE(16 * 1024),
33 FJES_MTU_DEFINE(32 * 1024),
34 FJES_MTU_DEFINE(64 * 1024),
38 u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
40 u8 *base = hw->base;
41 u32 value = 0;
43 value = readl(&base[reg]);
45 return value;
48 static u8 *fjes_hw_iomap(struct fjes_hw *hw)
50 u8 *base;
52 if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
53 fjes_driver_name)) {
54 pr_err("request_mem_region failed\n");
55 return NULL;
58 base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
60 return base;
63 static void fjes_hw_iounmap(struct fjes_hw *hw)
65 iounmap(hw->base);
66 release_mem_region(hw->hw_res.start, hw->hw_res.size);
69 int fjes_hw_reset(struct fjes_hw *hw)
71 union REG_DCTL dctl;
72 int timeout;
74 dctl.reg = 0;
75 dctl.bits.reset = 1;
76 wr32(XSCT_DCTL, dctl.reg);
78 timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
79 dctl.reg = rd32(XSCT_DCTL);
80 while ((dctl.bits.reset == 1) && (timeout > 0)) {
81 msleep(1000);
82 dctl.reg = rd32(XSCT_DCTL);
83 timeout -= 1000;
86 return timeout > 0 ? 0 : -EIO;
89 static int fjes_hw_get_max_epid(struct fjes_hw *hw)
91 union REG_MAX_EP info;
93 info.reg = rd32(XSCT_MAX_EP);
95 return info.bits.maxep;
98 static int fjes_hw_get_my_epid(struct fjes_hw *hw)
100 union REG_OWNER_EPID info;
102 info.reg = rd32(XSCT_OWNER_EPID);
104 return info.bits.epid;
107 static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
109 size_t size;
111 size = sizeof(struct fjes_device_shared_info) +
112 (sizeof(u8) * hw->max_epid);
113 hw->hw_info.share = kzalloc(size, GFP_KERNEL);
114 if (!hw->hw_info.share)
115 return -ENOMEM;
117 hw->hw_info.share->epnum = hw->max_epid;
119 return 0;
122 static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
124 kfree(hw->hw_info.share);
125 hw->hw_info.share = NULL;
128 static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
130 void *mem;
132 mem = vzalloc(EP_BUFFER_SIZE);
133 if (!mem)
134 return -ENOMEM;
136 epbh->buffer = mem;
137 epbh->size = EP_BUFFER_SIZE;
139 epbh->info = (union ep_buffer_info *)mem;
140 epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
142 return 0;
145 static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
147 vfree(epbh->buffer);
148 epbh->buffer = NULL;
149 epbh->size = 0;
151 epbh->info = NULL;
152 epbh->ring = NULL;
155 void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
157 union ep_buffer_info *info = epbh->info;
158 u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
159 int i;
161 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
162 vlan_id[i] = info->v1i.vlan_id[i];
164 memset(info, 0, sizeof(union ep_buffer_info));
166 info->v1i.version = 0; /* version 0 */
168 for (i = 0; i < ETH_ALEN; i++)
169 info->v1i.mac_addr[i] = mac_addr[i];
171 info->v1i.head = 0;
172 info->v1i.tail = 1;
174 info->v1i.info_size = sizeof(union ep_buffer_info);
175 info->v1i.buffer_size = epbh->size - info->v1i.info_size;
177 info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
178 info->v1i.count_max =
179 EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
181 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
182 info->v1i.vlan_id[i] = vlan_id[i];
184 info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
187 void
188 fjes_hw_init_command_registers(struct fjes_hw *hw,
189 struct fjes_device_command_param *param)
191 /* Request Buffer length */
192 wr32(XSCT_REQBL, (__le32)(param->req_len));
193 /* Response Buffer Length */
194 wr32(XSCT_RESPBL, (__le32)(param->res_len));
196 /* Request Buffer Address */
197 wr32(XSCT_REQBAL,
198 (__le32)(param->req_start & GENMASK_ULL(31, 0)));
199 wr32(XSCT_REQBAH,
200 (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
202 /* Response Buffer Address */
203 wr32(XSCT_RESPBAL,
204 (__le32)(param->res_start & GENMASK_ULL(31, 0)));
205 wr32(XSCT_RESPBAH,
206 (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
208 /* Share status address */
209 wr32(XSCT_SHSTSAL,
210 (__le32)(param->share_start & GENMASK_ULL(31, 0)));
211 wr32(XSCT_SHSTSAH,
212 (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
215 static int fjes_hw_setup(struct fjes_hw *hw)
217 u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
218 struct fjes_device_command_param param;
219 struct ep_share_mem_info *buf_pair;
220 unsigned long flags;
221 size_t mem_size;
222 int result;
223 int epidx;
224 void *buf;
226 hw->hw_info.max_epid = &hw->max_epid;
227 hw->hw_info.my_epid = &hw->my_epid;
229 buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
230 GFP_KERNEL);
231 if (!buf)
232 return -ENOMEM;
234 hw->ep_shm_info = (struct ep_share_mem_info *)buf;
236 mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
237 hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
238 if (!(hw->hw_info.req_buf))
239 return -ENOMEM;
241 hw->hw_info.req_buf_size = mem_size;
243 mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
244 hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
245 if (!(hw->hw_info.res_buf))
246 return -ENOMEM;
248 hw->hw_info.res_buf_size = mem_size;
250 result = fjes_hw_alloc_shared_status_region(hw);
251 if (result)
252 return result;
254 hw->hw_info.buffer_share_bit = 0;
255 hw->hw_info.buffer_unshare_reserve_bit = 0;
257 for (epidx = 0; epidx < hw->max_epid; epidx++) {
258 if (epidx != hw->my_epid) {
259 buf_pair = &hw->ep_shm_info[epidx];
261 result = fjes_hw_alloc_epbuf(&buf_pair->tx);
262 if (result)
263 return result;
265 result = fjes_hw_alloc_epbuf(&buf_pair->rx);
266 if (result)
267 return result;
269 spin_lock_irqsave(&hw->rx_status_lock, flags);
270 fjes_hw_setup_epbuf(&buf_pair->tx, mac,
271 fjes_support_mtu[0]);
272 fjes_hw_setup_epbuf(&buf_pair->rx, mac,
273 fjes_support_mtu[0]);
274 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
278 memset(&param, 0, sizeof(param));
280 param.req_len = hw->hw_info.req_buf_size;
281 param.req_start = __pa(hw->hw_info.req_buf);
282 param.res_len = hw->hw_info.res_buf_size;
283 param.res_start = __pa(hw->hw_info.res_buf);
285 param.share_start = __pa(hw->hw_info.share->ep_status);
287 fjes_hw_init_command_registers(hw, &param);
289 return 0;
292 static void fjes_hw_cleanup(struct fjes_hw *hw)
294 int epidx;
296 if (!hw->ep_shm_info)
297 return;
299 fjes_hw_free_shared_status_region(hw);
301 kfree(hw->hw_info.req_buf);
302 hw->hw_info.req_buf = NULL;
304 kfree(hw->hw_info.res_buf);
305 hw->hw_info.res_buf = NULL;
307 for (epidx = 0; epidx < hw->max_epid ; epidx++) {
308 if (epidx == hw->my_epid)
309 continue;
310 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
311 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
314 kfree(hw->ep_shm_info);
315 hw->ep_shm_info = NULL;
318 int fjes_hw_init(struct fjes_hw *hw)
320 int ret;
322 hw->base = fjes_hw_iomap(hw);
323 if (!hw->base)
324 return -EIO;
326 ret = fjes_hw_reset(hw);
327 if (ret)
328 return ret;
330 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
332 INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
333 INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
335 mutex_init(&hw->hw_info.lock);
336 spin_lock_init(&hw->rx_status_lock);
338 hw->max_epid = fjes_hw_get_max_epid(hw);
339 hw->my_epid = fjes_hw_get_my_epid(hw);
341 if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
342 return -ENXIO;
344 ret = fjes_hw_setup(hw);
346 hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE);
347 hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
349 return ret;
352 void fjes_hw_exit(struct fjes_hw *hw)
354 int ret;
356 if (hw->base) {
358 if (hw->debug_mode) {
359 /* disable debug mode */
360 mutex_lock(&hw->hw_info.lock);
361 fjes_hw_stop_debug(hw);
362 mutex_unlock(&hw->hw_info.lock);
364 vfree(hw->hw_info.trace);
365 hw->hw_info.trace = NULL;
366 hw->hw_info.trace_size = 0;
367 hw->debug_mode = 0;
369 ret = fjes_hw_reset(hw);
370 if (ret)
371 pr_err("%s: reset error", __func__);
373 fjes_hw_iounmap(hw);
374 hw->base = NULL;
377 fjes_hw_cleanup(hw);
379 cancel_work_sync(&hw->update_zone_task);
380 cancel_work_sync(&hw->epstop_task);
383 static enum fjes_dev_command_response_e
384 fjes_hw_issue_request_command(struct fjes_hw *hw,
385 enum fjes_dev_command_request_type type)
387 enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
388 union REG_CR cr;
389 union REG_CS cs;
390 int timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
392 cr.reg = 0;
393 cr.bits.req_start = 1;
394 cr.bits.req_code = type;
395 wr32(XSCT_CR, cr.reg);
396 cr.reg = rd32(XSCT_CR);
398 if (cr.bits.error == 0) {
399 timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
400 cs.reg = rd32(XSCT_CS);
402 while ((cs.bits.complete != 1) && timeout > 0) {
403 msleep(1000);
404 cs.reg = rd32(XSCT_CS);
405 timeout -= 1000;
408 if (cs.bits.complete == 1)
409 ret = FJES_CMD_STATUS_NORMAL;
410 else if (timeout <= 0)
411 ret = FJES_CMD_STATUS_TIMEOUT;
413 } else {
414 switch (cr.bits.err_info) {
415 case FJES_CMD_REQ_ERR_INFO_PARAM:
416 ret = FJES_CMD_STATUS_ERROR_PARAM;
417 break;
418 case FJES_CMD_REQ_ERR_INFO_STATUS:
419 ret = FJES_CMD_STATUS_ERROR_STATUS;
420 break;
421 default:
422 ret = FJES_CMD_STATUS_UNKNOWN;
423 break;
427 trace_fjes_hw_issue_request_command(&cr, &cs, timeout, ret);
429 return ret;
432 int fjes_hw_request_info(struct fjes_hw *hw)
434 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
435 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
436 enum fjes_dev_command_response_e ret;
437 int result;
439 memset(req_buf, 0, hw->hw_info.req_buf_size);
440 memset(res_buf, 0, hw->hw_info.res_buf_size);
442 req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
444 res_buf->info.length = 0;
445 res_buf->info.code = 0;
447 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
448 trace_fjes_hw_request_info(hw, res_buf);
450 result = 0;
452 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
453 res_buf->info.length) {
454 trace_fjes_hw_request_info_err("Invalid res_buf");
455 result = -ENOMSG;
456 } else if (ret == FJES_CMD_STATUS_NORMAL) {
457 switch (res_buf->info.code) {
458 case FJES_CMD_REQ_RES_CODE_NORMAL:
459 result = 0;
460 break;
461 default:
462 result = -EPERM;
463 break;
465 } else {
466 switch (ret) {
467 case FJES_CMD_STATUS_UNKNOWN:
468 result = -EPERM;
469 break;
470 case FJES_CMD_STATUS_TIMEOUT:
471 trace_fjes_hw_request_info_err("Timeout");
472 result = -EBUSY;
473 break;
474 case FJES_CMD_STATUS_ERROR_PARAM:
475 result = -EPERM;
476 break;
477 case FJES_CMD_STATUS_ERROR_STATUS:
478 result = -EPERM;
479 break;
480 default:
481 result = -EPERM;
482 break;
486 return result;
489 int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
490 struct ep_share_mem_info *buf_pair)
492 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
493 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
494 enum fjes_dev_command_response_e ret;
495 int page_count;
496 int timeout;
497 int i, idx;
498 void *addr;
499 int result;
501 if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
502 return 0;
504 memset(req_buf, 0, hw->hw_info.req_buf_size);
505 memset(res_buf, 0, hw->hw_info.res_buf_size);
507 req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
508 buf_pair->tx.size,
509 buf_pair->rx.size);
510 req_buf->share_buffer.epid = dest_epid;
512 idx = 0;
513 req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
514 page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
515 for (i = 0; i < page_count; i++) {
516 addr = ((u8 *)(buf_pair->tx.buffer)) +
517 (i * EP_BUFFER_INFO_SIZE);
518 req_buf->share_buffer.buffer[idx++] =
519 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
520 offset_in_page(addr));
523 req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
524 page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
525 for (i = 0; i < page_count; i++) {
526 addr = ((u8 *)(buf_pair->rx.buffer)) +
527 (i * EP_BUFFER_INFO_SIZE);
528 req_buf->share_buffer.buffer[idx++] =
529 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
530 offset_in_page(addr));
533 res_buf->share_buffer.length = 0;
534 res_buf->share_buffer.code = 0;
536 trace_fjes_hw_register_buff_addr_req(req_buf, buf_pair);
538 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
540 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
541 while ((ret == FJES_CMD_STATUS_NORMAL) &&
542 (res_buf->share_buffer.length ==
543 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
544 (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
545 (timeout > 0)) {
546 msleep(200 + hw->my_epid * 20);
547 timeout -= (200 + hw->my_epid * 20);
549 res_buf->share_buffer.length = 0;
550 res_buf->share_buffer.code = 0;
552 ret = fjes_hw_issue_request_command(
553 hw, FJES_CMD_REQ_SHARE_BUFFER);
556 result = 0;
558 trace_fjes_hw_register_buff_addr(res_buf, timeout);
560 if (res_buf->share_buffer.length !=
561 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) {
562 trace_fjes_hw_register_buff_addr_err("Invalid res_buf");
563 result = -ENOMSG;
564 } else if (ret == FJES_CMD_STATUS_NORMAL) {
565 switch (res_buf->share_buffer.code) {
566 case FJES_CMD_REQ_RES_CODE_NORMAL:
567 result = 0;
568 set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
569 break;
570 case FJES_CMD_REQ_RES_CODE_BUSY:
571 trace_fjes_hw_register_buff_addr_err("Busy Timeout");
572 result = -EBUSY;
573 break;
574 default:
575 result = -EPERM;
576 break;
578 } else {
579 switch (ret) {
580 case FJES_CMD_STATUS_UNKNOWN:
581 result = -EPERM;
582 break;
583 case FJES_CMD_STATUS_TIMEOUT:
584 trace_fjes_hw_register_buff_addr_err("Timeout");
585 result = -EBUSY;
586 break;
587 case FJES_CMD_STATUS_ERROR_PARAM:
588 case FJES_CMD_STATUS_ERROR_STATUS:
589 default:
590 result = -EPERM;
591 break;
595 return result;
598 int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
600 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
601 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
602 struct fjes_device_shared_info *share = hw->hw_info.share;
603 enum fjes_dev_command_response_e ret;
604 int timeout;
605 int result;
607 if (!hw->base)
608 return -EPERM;
610 if (!req_buf || !res_buf || !share)
611 return -EPERM;
613 if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
614 return 0;
616 memset(req_buf, 0, hw->hw_info.req_buf_size);
617 memset(res_buf, 0, hw->hw_info.res_buf_size);
619 req_buf->unshare_buffer.length =
620 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
621 req_buf->unshare_buffer.epid = dest_epid;
623 res_buf->unshare_buffer.length = 0;
624 res_buf->unshare_buffer.code = 0;
626 trace_fjes_hw_unregister_buff_addr_req(req_buf);
627 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
629 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
630 while ((ret == FJES_CMD_STATUS_NORMAL) &&
631 (res_buf->unshare_buffer.length ==
632 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
633 (res_buf->unshare_buffer.code ==
634 FJES_CMD_REQ_RES_CODE_BUSY) &&
635 (timeout > 0)) {
636 msleep(200 + hw->my_epid * 20);
637 timeout -= (200 + hw->my_epid * 20);
639 res_buf->unshare_buffer.length = 0;
640 res_buf->unshare_buffer.code = 0;
642 ret =
643 fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
646 result = 0;
648 trace_fjes_hw_unregister_buff_addr(res_buf, timeout);
650 if (res_buf->unshare_buffer.length !=
651 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
652 trace_fjes_hw_unregister_buff_addr_err("Invalid res_buf");
653 result = -ENOMSG;
654 } else if (ret == FJES_CMD_STATUS_NORMAL) {
655 switch (res_buf->unshare_buffer.code) {
656 case FJES_CMD_REQ_RES_CODE_NORMAL:
657 result = 0;
658 clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
659 break;
660 case FJES_CMD_REQ_RES_CODE_BUSY:
661 trace_fjes_hw_unregister_buff_addr_err("Busy Timeout");
662 result = -EBUSY;
663 break;
664 default:
665 result = -EPERM;
666 break;
668 } else {
669 switch (ret) {
670 case FJES_CMD_STATUS_UNKNOWN:
671 result = -EPERM;
672 break;
673 case FJES_CMD_STATUS_TIMEOUT:
674 trace_fjes_hw_unregister_buff_addr_err("Timeout");
675 result = -EBUSY;
676 break;
677 case FJES_CMD_STATUS_ERROR_PARAM:
678 case FJES_CMD_STATUS_ERROR_STATUS:
679 default:
680 result = -EPERM;
681 break;
685 return result;
688 int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
689 enum REG_ICTL_MASK mask)
691 u32 ig = mask | dest_epid;
693 wr32(XSCT_IG, cpu_to_le32(ig));
695 return 0;
698 u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
700 u32 cur_is;
702 cur_is = rd32(XSCT_IS);
704 return cur_is;
707 void fjes_hw_set_irqmask(struct fjes_hw *hw,
708 enum REG_ICTL_MASK intr_mask, bool mask)
710 if (mask)
711 wr32(XSCT_IMS, intr_mask);
712 else
713 wr32(XSCT_IMC, intr_mask);
716 bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
718 if (epid >= hw->max_epid)
719 return false;
721 if ((hw->ep_shm_info[epid].es_status !=
722 FJES_ZONING_STATUS_ENABLE) ||
723 (hw->ep_shm_info[hw->my_epid].zone ==
724 FJES_ZONING_ZONE_TYPE_NONE))
725 return false;
726 else
727 return (hw->ep_shm_info[epid].zone ==
728 hw->ep_shm_info[hw->my_epid].zone);
731 int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
732 int dest_epid)
734 int value = false;
736 if (dest_epid < share->epnum)
737 value = share->ep_status[dest_epid];
739 return value;
742 static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
744 return test_bit(src_epid, &hw->txrx_stop_req_bit);
747 static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
749 return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
750 FJES_RX_STOP_REQ_DONE);
753 enum ep_partner_status
754 fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
756 enum ep_partner_status status;
758 if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
759 if (fjes_hw_epid_is_stop_requested(hw, epid)) {
760 status = EP_PARTNER_WAITING;
761 } else {
762 if (fjes_hw_epid_is_stop_process_done(hw, epid))
763 status = EP_PARTNER_COMPLETE;
764 else
765 status = EP_PARTNER_SHARED;
767 } else {
768 status = EP_PARTNER_UNSHARE;
771 return status;
774 void fjes_hw_raise_epstop(struct fjes_hw *hw)
776 enum ep_partner_status status;
777 unsigned long flags;
778 int epidx;
780 for (epidx = 0; epidx < hw->max_epid; epidx++) {
781 if (epidx == hw->my_epid)
782 continue;
784 status = fjes_hw_get_partner_ep_status(hw, epidx);
785 switch (status) {
786 case EP_PARTNER_SHARED:
787 fjes_hw_raise_interrupt(hw, epidx,
788 REG_ICTL_MASK_TXRX_STOP_REQ);
789 hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
790 break;
791 default:
792 break;
795 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
796 set_bit(epidx, &hw->txrx_stop_req_bit);
798 spin_lock_irqsave(&hw->rx_status_lock, flags);
799 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
800 FJES_RX_STOP_REQ_REQUEST;
801 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
805 int fjes_hw_wait_epstop(struct fjes_hw *hw)
807 enum ep_partner_status status;
808 union ep_buffer_info *info;
809 int wait_time = 0;
810 int epidx;
812 while (hw->hw_info.buffer_unshare_reserve_bit &&
813 (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
814 for (epidx = 0; epidx < hw->max_epid; epidx++) {
815 if (epidx == hw->my_epid)
816 continue;
817 status = fjes_hw_epid_is_shared(hw->hw_info.share,
818 epidx);
819 info = hw->ep_shm_info[epidx].rx.info;
820 if ((!status ||
821 (info->v1i.rx_status &
822 FJES_RX_STOP_REQ_DONE)) &&
823 test_bit(epidx,
824 &hw->hw_info.buffer_unshare_reserve_bit)) {
825 clear_bit(epidx,
826 &hw->hw_info.buffer_unshare_reserve_bit);
830 msleep(100);
831 wait_time += 100;
834 for (epidx = 0; epidx < hw->max_epid; epidx++) {
835 if (epidx == hw->my_epid)
836 continue;
837 if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
838 clear_bit(epidx,
839 &hw->hw_info.buffer_unshare_reserve_bit);
842 return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
843 ? 0 : -EBUSY;
846 bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
848 union ep_buffer_info *info = epbh->info;
850 return (info->common.version == version);
853 bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
855 union ep_buffer_info *info = epbh->info;
857 return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
858 info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
861 bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
863 union ep_buffer_info *info = epbh->info;
864 bool ret = false;
865 int i;
867 if (vlan_id == 0) {
868 ret = true;
869 } else {
870 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
871 if (vlan_id == info->v1i.vlan_id[i]) {
872 ret = true;
873 break;
877 return ret;
880 bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
882 union ep_buffer_info *info = epbh->info;
883 int i;
885 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
886 if (info->v1i.vlan_id[i] == 0) {
887 info->v1i.vlan_id[i] = vlan_id;
888 return true;
891 return false;
894 void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
896 union ep_buffer_info *info = epbh->info;
897 int i;
899 if (0 != vlan_id) {
900 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
901 if (vlan_id == info->v1i.vlan_id[i])
902 info->v1i.vlan_id[i] = 0;
907 bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
909 union ep_buffer_info *info = epbh->info;
911 if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
912 return true;
914 if (info->v1i.count_max == 0)
915 return true;
917 return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
918 info->v1i.count_max);
921 void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
922 size_t *psize)
924 union ep_buffer_info *info = epbh->info;
925 struct esmem_frame *ring_frame;
926 void *frame;
928 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
929 (info->v1i.head,
930 info->v1i.count_max) *
931 info->v1i.frame_max]);
933 *psize = (size_t)ring_frame->frame_size;
935 frame = ring_frame->frame_data;
937 return frame;
940 void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
942 union ep_buffer_info *info = epbh->info;
944 if (fjes_hw_epbuf_rx_is_empty(epbh))
945 return;
947 EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
950 int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
951 void *frame, size_t size)
953 union ep_buffer_info *info = epbh->info;
954 struct esmem_frame *ring_frame;
956 if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
957 return -ENOBUFS;
959 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
960 (info->v1i.tail - 1,
961 info->v1i.count_max) *
962 info->v1i.frame_max]);
964 ring_frame->frame_size = size;
965 memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
967 EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
969 return 0;
972 static void fjes_hw_update_zone_task(struct work_struct *work)
974 struct fjes_hw *hw = container_of(work,
975 struct fjes_hw, update_zone_task);
977 struct my_s {u8 es_status; u8 zone; } *info;
978 union fjes_device_command_res *res_buf;
979 enum ep_partner_status pstatus;
981 struct fjes_adapter *adapter;
982 struct net_device *netdev;
983 unsigned long flags;
985 ulong unshare_bit = 0;
986 ulong share_bit = 0;
987 ulong irq_bit = 0;
989 int epidx;
990 int ret;
992 adapter = (struct fjes_adapter *)hw->back;
993 netdev = adapter->netdev;
994 res_buf = hw->hw_info.res_buf;
995 info = (struct my_s *)&res_buf->info.info;
997 mutex_lock(&hw->hw_info.lock);
999 ret = fjes_hw_request_info(hw);
1000 switch (ret) {
1001 case -ENOMSG:
1002 case -EBUSY:
1003 default:
1004 if (!work_pending(&adapter->force_close_task)) {
1005 adapter->force_reset = true;
1006 schedule_work(&adapter->force_close_task);
1008 break;
1010 case 0:
1012 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1013 if (epidx == hw->my_epid) {
1014 hw->ep_shm_info[epidx].es_status =
1015 info[epidx].es_status;
1016 hw->ep_shm_info[epidx].zone =
1017 info[epidx].zone;
1018 continue;
1021 pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
1022 switch (pstatus) {
1023 case EP_PARTNER_UNSHARE:
1024 default:
1025 if ((info[epidx].zone !=
1026 FJES_ZONING_ZONE_TYPE_NONE) &&
1027 (info[epidx].es_status ==
1028 FJES_ZONING_STATUS_ENABLE) &&
1029 (info[epidx].zone ==
1030 info[hw->my_epid].zone))
1031 set_bit(epidx, &share_bit);
1032 else
1033 set_bit(epidx, &unshare_bit);
1034 break;
1036 case EP_PARTNER_COMPLETE:
1037 case EP_PARTNER_WAITING:
1038 if ((info[epidx].zone ==
1039 FJES_ZONING_ZONE_TYPE_NONE) ||
1040 (info[epidx].es_status !=
1041 FJES_ZONING_STATUS_ENABLE) ||
1042 (info[epidx].zone !=
1043 info[hw->my_epid].zone)) {
1044 set_bit(epidx,
1045 &adapter->unshare_watch_bitmask);
1046 set_bit(epidx,
1047 &hw->hw_info.buffer_unshare_reserve_bit);
1049 break;
1051 case EP_PARTNER_SHARED:
1052 if ((info[epidx].zone ==
1053 FJES_ZONING_ZONE_TYPE_NONE) ||
1054 (info[epidx].es_status !=
1055 FJES_ZONING_STATUS_ENABLE) ||
1056 (info[epidx].zone !=
1057 info[hw->my_epid].zone))
1058 set_bit(epidx, &irq_bit);
1059 break;
1062 hw->ep_shm_info[epidx].es_status =
1063 info[epidx].es_status;
1064 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1066 break;
1069 mutex_unlock(&hw->hw_info.lock);
1071 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1072 if (epidx == hw->my_epid)
1073 continue;
1075 if (test_bit(epidx, &share_bit)) {
1076 spin_lock_irqsave(&hw->rx_status_lock, flags);
1077 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1078 netdev->dev_addr, netdev->mtu);
1079 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1081 mutex_lock(&hw->hw_info.lock);
1083 ret = fjes_hw_register_buff_addr(
1084 hw, epidx, &hw->ep_shm_info[epidx]);
1086 switch (ret) {
1087 case 0:
1088 break;
1089 case -ENOMSG:
1090 case -EBUSY:
1091 default:
1092 if (!work_pending(&adapter->force_close_task)) {
1093 adapter->force_reset = true;
1094 schedule_work(
1095 &adapter->force_close_task);
1097 break;
1099 mutex_unlock(&hw->hw_info.lock);
1101 hw->ep_shm_info[epidx].ep_stats
1102 .com_regist_buf_exec += 1;
1105 if (test_bit(epidx, &unshare_bit)) {
1106 mutex_lock(&hw->hw_info.lock);
1108 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1110 switch (ret) {
1111 case 0:
1112 break;
1113 case -ENOMSG:
1114 case -EBUSY:
1115 default:
1116 if (!work_pending(&adapter->force_close_task)) {
1117 adapter->force_reset = true;
1118 schedule_work(
1119 &adapter->force_close_task);
1121 break;
1124 mutex_unlock(&hw->hw_info.lock);
1126 hw->ep_shm_info[epidx].ep_stats
1127 .com_unregist_buf_exec += 1;
1129 if (ret == 0) {
1130 spin_lock_irqsave(&hw->rx_status_lock, flags);
1131 fjes_hw_setup_epbuf(
1132 &hw->ep_shm_info[epidx].tx,
1133 netdev->dev_addr, netdev->mtu);
1134 spin_unlock_irqrestore(&hw->rx_status_lock,
1135 flags);
1139 if (test_bit(epidx, &irq_bit)) {
1140 fjes_hw_raise_interrupt(hw, epidx,
1141 REG_ICTL_MASK_TXRX_STOP_REQ);
1143 hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
1145 set_bit(epidx, &hw->txrx_stop_req_bit);
1146 spin_lock_irqsave(&hw->rx_status_lock, flags);
1147 hw->ep_shm_info[epidx].tx.
1148 info->v1i.rx_status |=
1149 FJES_RX_STOP_REQ_REQUEST;
1150 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1151 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1155 if (irq_bit || adapter->unshare_watch_bitmask) {
1156 if (!work_pending(&adapter->unshare_watch_task))
1157 queue_work(adapter->control_wq,
1158 &adapter->unshare_watch_task);
1162 static void fjes_hw_epstop_task(struct work_struct *work)
1164 struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
1165 struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
1166 unsigned long flags;
1168 ulong remain_bit;
1169 int epid_bit;
1171 while ((remain_bit = hw->epstop_req_bit)) {
1172 for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
1173 if (remain_bit & 1) {
1174 spin_lock_irqsave(&hw->rx_status_lock, flags);
1175 hw->ep_shm_info[epid_bit].
1176 tx.info->v1i.rx_status |=
1177 FJES_RX_STOP_REQ_DONE;
1178 spin_unlock_irqrestore(&hw->rx_status_lock,
1179 flags);
1181 clear_bit(epid_bit, &hw->epstop_req_bit);
1182 set_bit(epid_bit,
1183 &adapter->unshare_watch_bitmask);
1185 if (!work_pending(&adapter->unshare_watch_task))
1186 queue_work(
1187 adapter->control_wq,
1188 &adapter->unshare_watch_task);
1194 int fjes_hw_start_debug(struct fjes_hw *hw)
1196 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
1197 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
1198 enum fjes_dev_command_response_e ret;
1199 int page_count;
1200 int result = 0;
1201 void *addr;
1202 int i;
1204 if (!hw->hw_info.trace)
1205 return -EPERM;
1206 memset(hw->hw_info.trace, 0, FJES_DEBUG_BUFFER_SIZE);
1208 memset(req_buf, 0, hw->hw_info.req_buf_size);
1209 memset(res_buf, 0, hw->hw_info.res_buf_size);
1211 req_buf->start_trace.length =
1212 FJES_DEV_COMMAND_START_DBG_REQ_LEN(hw->hw_info.trace_size);
1213 req_buf->start_trace.mode = hw->debug_mode;
1214 req_buf->start_trace.buffer_len = hw->hw_info.trace_size;
1215 page_count = hw->hw_info.trace_size / FJES_DEBUG_PAGE_SIZE;
1216 for (i = 0; i < page_count; i++) {
1217 addr = ((u8 *)hw->hw_info.trace) + i * FJES_DEBUG_PAGE_SIZE;
1218 req_buf->start_trace.buffer[i] =
1219 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
1220 offset_in_page(addr));
1223 res_buf->start_trace.length = 0;
1224 res_buf->start_trace.code = 0;
1226 trace_fjes_hw_start_debug_req(req_buf);
1227 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_START_DEBUG);
1228 trace_fjes_hw_start_debug(res_buf);
1230 if (res_buf->start_trace.length !=
1231 FJES_DEV_COMMAND_START_DBG_RES_LEN) {
1232 result = -ENOMSG;
1233 trace_fjes_hw_start_debug_err("Invalid res_buf");
1234 } else if (ret == FJES_CMD_STATUS_NORMAL) {
1235 switch (res_buf->start_trace.code) {
1236 case FJES_CMD_REQ_RES_CODE_NORMAL:
1237 result = 0;
1238 break;
1239 default:
1240 result = -EPERM;
1241 break;
1243 } else {
1244 switch (ret) {
1245 case FJES_CMD_STATUS_UNKNOWN:
1246 result = -EPERM;
1247 break;
1248 case FJES_CMD_STATUS_TIMEOUT:
1249 trace_fjes_hw_start_debug_err("Busy Timeout");
1250 result = -EBUSY;
1251 break;
1252 case FJES_CMD_STATUS_ERROR_PARAM:
1253 case FJES_CMD_STATUS_ERROR_STATUS:
1254 default:
1255 result = -EPERM;
1256 break;
1260 return result;
1263 int fjes_hw_stop_debug(struct fjes_hw *hw)
1265 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
1266 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
1267 enum fjes_dev_command_response_e ret;
1268 int result = 0;
1270 if (!hw->hw_info.trace)
1271 return -EPERM;
1273 memset(req_buf, 0, hw->hw_info.req_buf_size);
1274 memset(res_buf, 0, hw->hw_info.res_buf_size);
1275 req_buf->stop_trace.length = FJES_DEV_COMMAND_STOP_DBG_REQ_LEN;
1277 res_buf->stop_trace.length = 0;
1278 res_buf->stop_trace.code = 0;
1280 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_STOP_DEBUG);
1281 trace_fjes_hw_stop_debug(res_buf);
1283 if (res_buf->stop_trace.length != FJES_DEV_COMMAND_STOP_DBG_RES_LEN) {
1284 trace_fjes_hw_stop_debug_err("Invalid res_buf");
1285 result = -ENOMSG;
1286 } else if (ret == FJES_CMD_STATUS_NORMAL) {
1287 switch (res_buf->stop_trace.code) {
1288 case FJES_CMD_REQ_RES_CODE_NORMAL:
1289 result = 0;
1290 hw->debug_mode = 0;
1291 break;
1292 default:
1293 result = -EPERM;
1294 break;
1296 } else {
1297 switch (ret) {
1298 case FJES_CMD_STATUS_UNKNOWN:
1299 result = -EPERM;
1300 break;
1301 case FJES_CMD_STATUS_TIMEOUT:
1302 result = -EBUSY;
1303 trace_fjes_hw_stop_debug_err("Busy Timeout");
1304 break;
1305 case FJES_CMD_STATUS_ERROR_PARAM:
1306 case FJES_CMD_STATUS_ERROR_STATUS:
1307 default:
1308 result = -EPERM;
1309 break;
1313 return result;