Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / pci / cxgb_offload.c
blob9bc47d3bd856f2ebd3d845d8f3a9127cdb786de0
2 /**************************************************************************
4 Copyright (c) 2007, Chelsio Inc.
5 All rights reserved.
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
30 ***************************************************************************/
33 #include <sys/cdefs.h>
34 #ifdef __NetBSD__
35 __KERNEL_RCSID(0, "$NetBSD: cxgb_offload.c,v 1.5 2007/12/15 00:39:29 perry Exp $");
36 #endif
37 #ifdef __FreeBSD__
38 __FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_offload.c,v 1.8 2007/08/17 05:57:04 kmacy Exp $");
39 #endif
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #ifdef __FreeBSD__
45 #include <sys/bus.h>
46 #include <sys/module.h>
47 #include <sys/pciio.h>
48 #endif
49 #include <sys/conf.h>
50 #include <machine/bus.h>
51 #ifdef __FreeBSD__
52 #include <machine/resource.h>
53 #include <sys/bus_dma.h>
54 #include <sys/rman.h>
55 #endif
56 #include <sys/ioccom.h>
57 #include <sys/mbuf.h>
58 #ifdef __FreeBSD__
59 #include <sys/linker.h>
60 #include <sys/firmware.h>
61 #endif
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #ifdef __FreeBSD__
65 #include <sys/smp.h>
66 #endif
67 #include <sys/sysctl.h>
68 #include <sys/queue.h>
69 #ifdef __FreeBSD__
70 #include <sys/taskqueue.h>
71 #endif
73 #ifdef CONFIG_DEFINED
74 #include <cxgb_include.h>
75 #else
76 #ifdef __FreeBSD__
77 #include <dev/cxgb/cxgb_include.h>
78 #endif
79 #ifdef __NetBSD__
80 #include "cxgb_include.h"
81 #endif
82 #endif
84 #ifdef __FreeBSD__
85 #include <net/if_vlan_var.h>
86 #endif
87 #include <net/route.h>
90 * XXX
92 #define LOG_NOTICE 2
93 #define BUG_ON(...)
94 #define VALIDATE_TID 0
97 TAILQ_HEAD(, cxgb_client) client_list;
98 TAILQ_HEAD(, toedev) ofld_dev_list;
99 TAILQ_HEAD(, adapter) adapter_list;
101 static struct mtx cxgb_db_lock;
102 static struct rwlock adapter_list_lock;
105 static const unsigned int MAX_ATIDS = 64 * 1024;
106 static const unsigned int ATID_BASE = 0x100000;
107 static int inited = 0;
109 static inline int
110 offload_activated(struct toedev *tdev)
112 struct adapter *adapter = tdev2adap(tdev);
114 return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
118 * cxgb_register_client - register an offload client
119 * @client: the client
121 * Add the client to the client list,
122 * and call backs the client for each activated offload device
124 void
125 cxgb_register_client(struct cxgb_client *client)
127 struct toedev *tdev;
129 mtx_lock(&cxgb_db_lock);
130 TAILQ_INSERT_TAIL(&client_list, client, client_entry);
132 if (client->add) {
133 TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
134 if (offload_activated(tdev))
135 client->add(tdev);
138 mtx_unlock(&cxgb_db_lock);
142 * cxgb_unregister_client - unregister an offload client
143 * @client: the client
145 * Remove the client to the client list,
146 * and call backs the client for each activated offload device.
148 void
149 cxgb_unregister_client(struct cxgb_client *client)
151 struct toedev *tdev;
153 mtx_lock(&cxgb_db_lock);
154 TAILQ_REMOVE(&client_list, client, client_entry);
156 if (client->remove) {
157 TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
158 if (offload_activated(tdev))
159 client->remove(tdev);
162 mtx_unlock(&cxgb_db_lock);
166 * cxgb_add_clients - activate register clients for an offload device
167 * @tdev: the offload device
169 * Call backs all registered clients once a offload device is activated
171 void
172 cxgb_add_clients(struct toedev *tdev)
174 struct cxgb_client *client;
176 mtx_lock(&cxgb_db_lock);
177 TAILQ_FOREACH(client, &client_list, client_entry) {
178 if (client->add)
179 client->add(tdev);
181 mtx_unlock(&cxgb_db_lock);
185 * cxgb_remove_clients - activate register clients for an offload device
186 * @tdev: the offload device
188 * Call backs all registered clients once a offload device is deactivated
190 void
191 cxgb_remove_clients(struct toedev *tdev)
193 struct cxgb_client *client;
195 mtx_lock(&cxgb_db_lock);
196 TAILQ_FOREACH(client, &client_list, client_entry) {
197 if (client->remove)
198 client->remove(tdev);
200 mtx_unlock(&cxgb_db_lock);
203 static int
204 is_offloading(struct ifnet *ifp)
206 struct adapter *adapter;
207 int port;
209 rw_rlock(&adapter_list_lock);
210 TAILQ_FOREACH(adapter, &adapter_list, adapter_entry) {
211 for_each_port(adapter, port) {
212 if (ifp == adapter->port[port].ifp) {
213 rw_runlock(&adapter_list_lock);
214 return 1;
218 rw_runlock(&adapter_list_lock);
219 return 0;
222 static struct ifnet *
223 get_iff_from_mac(adapter_t *adapter, const uint8_t *mac, unsigned int vlan)
225 #ifdef notyet
226 int i;
228 for_each_port(adapter, i) {
229 const struct vlan_group *grp;
230 const struct port_info *p = &adapter->port[i];
231 struct ifnet *ifnet = p->ifp;
233 if (!memcmp(p->hw_addr, mac, ETHER_ADDR_LEN)) {
234 if (vlan && vlan != EVL_VLID_MASK) {
235 grp = p->vlan_grp;
236 dev = grp ? grp->vlan_devices[vlan] : NULL;
237 } else
238 while (dev->master)
239 dev = dev->master;
240 return dev;
243 #endif
244 return NULL;
247 static inline void
248 failover_fixup(adapter_t *adapter, int port)
250 if (adapter->params.rev == 0) {
251 struct ifnet *ifp = adapter->port[port].ifp;
252 struct cmac *mac = &adapter->port[port].mac;
253 if (!(ifp->if_flags & IFF_UP)) {
254 /* Failover triggered by the interface ifdown */
255 t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset,
256 F_TXEN);
257 t3_read_reg(adapter, A_XGM_TX_CTRL + mac->offset);
258 } else {
259 /* Failover triggered by the interface link down */
260 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
261 t3_read_reg(adapter, A_XGM_RX_CTRL + mac->offset);
262 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset,
263 F_RXEN);
268 static int
269 cxgb_ulp_iscsi_ctl(adapter_t *adapter, unsigned int req, void *data)
271 int ret = 0;
272 struct ulp_iscsi_info *uiip = data;
274 switch (req) {
275 case ULP_ISCSI_GET_PARAMS:
276 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
277 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
278 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
280 * On tx, the iscsi pdu has to be <= tx page size and has to
281 * fit into the Tx PM FIFO.
283 uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
284 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
285 /* on rx, the iscsi pdu has to be < rx page size and the
286 whole pdu + cpl headers has to fit into one sge buffer */
287 uiip->max_rxsz =
288 (unsigned int)min(adapter->params.tp.rx_pg_size,
289 (adapter->sge.qs[0].fl[1].buf_size -
290 sizeof(struct cpl_rx_data) * 2 -
291 sizeof(struct cpl_rx_data_ddp)) );
292 break;
293 case ULP_ISCSI_SET_PARAMS:
294 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
295 break;
296 default:
297 ret = (EOPNOTSUPP);
299 return ret;
302 /* Response queue used for RDMA events. */
303 #define ASYNC_NOTIF_RSPQ 0
305 static int
306 cxgb_rdma_ctl(adapter_t *adapter, unsigned int req, void *data)
308 int ret = 0;
310 switch (req) {
311 case RDMA_GET_PARAMS: {
312 struct rdma_info *req2 = data;
314 #ifdef __FreeBSD__
315 req2->udbell_physbase = rman_get_start(adapter->regs_res);
316 req2->udbell_len = rman_get_size(adapter->regs_res);
317 #endif
318 req2->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
319 req2->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
320 req2->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
321 req2->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
322 req2->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
323 req2->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
324 #ifdef __FreeBSD__
325 req2->kdb_addr = (void *)(rman_get_start(adapter->regs_res) + A_SG_KDOORBELL);
326 #endif
327 break;
329 case RDMA_CQ_OP: {
330 struct rdma_cq_op *req2 = data;
332 /* may be called in any context */
333 mtx_lock(&adapter->sge.reg_lock);
334 ret = t3_sge_cqcntxt_op(adapter, req2->id, req2->op,
335 req2->credits);
336 mtx_unlock(&adapter->sge.reg_lock);
337 break;
339 case RDMA_GET_MEM: {
340 struct ch_mem_range *t = data;
341 struct mc7 *mem;
343 if ((t->addr & 7) || (t->len & 7))
344 return (EINVAL);
345 if (t->mem_id == MEM_CM)
346 mem = &adapter->cm;
347 else if (t->mem_id == MEM_PMRX)
348 mem = &adapter->pmrx;
349 else if (t->mem_id == MEM_PMTX)
350 mem = &adapter->pmtx;
351 else
352 return (EINVAL);
354 ret = t3_mc7_bd_read(mem, t->addr/8, t->len/8, (u64 *)t->buf);
355 if (ret)
356 return (ret);
357 break;
359 case RDMA_CQ_SETUP: {
360 struct rdma_cq_setup *req2 = data;
362 mtx_lock(&adapter->sge.reg_lock);
363 ret = t3_sge_init_cqcntxt(adapter, req2->id, req2->base_addr,
364 req2->size, ASYNC_NOTIF_RSPQ,
365 req2->ovfl_mode, req2->credits,
366 req2->credit_thres);
367 mtx_unlock(&adapter->sge.reg_lock);
368 break;
370 case RDMA_CQ_DISABLE:
371 mtx_lock(&adapter->sge.reg_lock);
372 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
373 mtx_unlock(&adapter->sge.reg_lock);
374 break;
375 case RDMA_CTRL_QP_SETUP: {
376 struct rdma_ctrlqp_setup *req2 = data;
378 mtx_lock(&adapter->sge.reg_lock);
379 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
380 SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ,
381 req2->base_addr, req2->size,
382 FW_RI_TID_START, 1, 0);
383 mtx_unlock(&adapter->sge.reg_lock);
384 break;
386 default:
387 ret = EOPNOTSUPP;
389 return (ret);
392 static int
393 cxgb_offload_ctl(struct toedev *tdev, unsigned int req, void *data)
395 struct adapter *adapter = tdev2adap(tdev);
396 struct tid_range *tid;
397 struct mtutab *mtup;
398 struct iff_mac *iffmacp;
399 struct ddp_params *ddpp;
400 struct adap_ports *ports;
401 int port;
403 switch (req) {
404 case GET_MAX_OUTSTANDING_WR:
405 *(unsigned int *)data = FW_WR_NUM;
406 break;
407 case GET_WR_LEN:
408 *(unsigned int *)data = WR_FLITS;
409 break;
410 case GET_TX_MAX_CHUNK:
411 *(unsigned int *)data = 1 << 20; /* 1MB */
412 break;
413 case GET_TID_RANGE:
414 tid = data;
415 tid->num = t3_mc5_size(&adapter->mc5) -
416 adapter->params.mc5.nroutes -
417 adapter->params.mc5.nfilters -
418 adapter->params.mc5.nservers;
419 tid->base = 0;
420 break;
421 case GET_STID_RANGE:
422 tid = data;
423 tid->num = adapter->params.mc5.nservers;
424 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
425 adapter->params.mc5.nfilters -
426 adapter->params.mc5.nroutes;
427 break;
428 case GET_L2T_CAPACITY:
429 *(unsigned int *)data = 2048;
430 break;
431 case GET_MTUS:
432 mtup = data;
433 mtup->size = NMTUS;
434 mtup->mtus = adapter->params.mtus;
435 break;
436 case GET_IFF_FROM_MAC:
437 iffmacp = data;
438 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
439 iffmacp->vlan_tag & EVL_VLID_MASK);
440 break;
441 case GET_DDP_PARAMS:
442 ddpp = data;
443 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
444 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
445 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
446 break;
447 case GET_PORTS:
448 ports = data;
449 ports->nports = adapter->params.nports;
450 for_each_port(adapter, port)
451 ports->lldevs[port] = adapter->port[port].ifp;
452 break;
453 case FAILOVER:
454 port = *(int *)data;
455 t3_port_failover(adapter, port);
456 failover_fixup(adapter, port);
457 break;
458 case FAILOVER_DONE:
459 port = *(int *)data;
460 t3_failover_done(adapter, port);
461 break;
462 case FAILOVER_CLEAR:
463 t3_failover_clear(adapter);
464 break;
465 case ULP_ISCSI_GET_PARAMS:
466 case ULP_ISCSI_SET_PARAMS:
467 if (!offload_running(adapter))
468 return (EAGAIN);
469 return cxgb_ulp_iscsi_ctl(adapter, req, data);
470 case RDMA_GET_PARAMS:
471 case RDMA_CQ_OP:
472 case RDMA_CQ_SETUP:
473 case RDMA_CQ_DISABLE:
474 case RDMA_CTRL_QP_SETUP:
475 case RDMA_GET_MEM:
476 if (!offload_running(adapter))
477 return (EAGAIN);
478 return cxgb_rdma_ctl(adapter, req, data);
479 default:
480 return (EOPNOTSUPP);
482 return 0;
486 * Dummy handler for Rx offload packets in case we get an offload packet before
487 * proper processing is setup. This complains and drops the packet as it isn't
488 * normal to get offload packets at this stage.
490 static int
491 rx_offload_blackhole(struct toedev *dev, struct mbuf **m, int n)
493 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data 0x%x\n",
494 n, *mtod(m[0], uint32_t *));
495 while (n--)
496 m_freem(m[n]);
497 return 0;
500 static void
501 dummy_neigh_update(struct toedev *dev, struct rtentry *neigh)
505 void
506 cxgb_set_dummy_ops(struct toedev *dev)
508 dev->recv = rx_offload_blackhole;
509 dev->neigh_update = dummy_neigh_update;
513 * Free an active-open TID.
515 void *
516 cxgb_free_atid(struct toedev *tdev, int atid)
518 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
519 union active_open_entry *p = atid2entry(t, atid);
520 void *ctx = p->toe_tid.ctx;
522 mtx_lock(&t->atid_lock);
523 p->next = t->afree;
524 t->afree = p;
525 t->atids_in_use--;
526 mtx_lock(&t->atid_lock);
528 return ctx;
532 * Free a server TID and return it to the free pool.
534 void
535 cxgb_free_stid(struct toedev *tdev, int stid)
537 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
538 union listen_entry *p = stid2entry(t, stid);
540 mtx_lock(&t->stid_lock);
541 p->next = t->sfree;
542 t->sfree = p;
543 t->stids_in_use--;
544 mtx_unlock(&t->stid_lock);
547 void
548 cxgb_insert_tid(struct toedev *tdev, struct cxgb_client *client,
549 void *ctx, unsigned int tid)
551 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
553 t->tid_tab[tid].client = client;
554 t->tid_tab[tid].ctx = ctx;
555 atomic_add_int(&t->tids_in_use, 1);
559 * Populate a TID_RELEASE WR. The mbuf must be already propely sized.
561 static inline void
562 mk_tid_release(struct mbuf *m, unsigned int tid)
564 struct cpl_tid_release *req;
566 m_set_priority(m, CPL_PRIORITY_SETUP);
567 req = mtod(m, struct cpl_tid_release *);
568 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
569 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
572 static void
573 #ifdef __FreeBSD__
574 t3_process_tid_release_list(void *data, int pending)
575 #endif
576 #ifdef __NetBSD__
577 t3_process_tid_release_list(struct work *wk, void *data)
578 #endif
580 struct mbuf *m;
581 struct toedev *tdev = data;
582 struct toe_data *td = TOE_DATA(tdev);
584 mtx_lock(&td->tid_release_lock);
585 while (td->tid_release_list) {
586 struct toe_tid_entry *p = td->tid_release_list;
588 td->tid_release_list = (struct toe_tid_entry *)p->ctx;
589 mtx_unlock(&td->tid_release_lock);
590 m = m_get(M_WAIT, MT_DATA);
591 mk_tid_release(m, p - td->tid_maps.tid_tab);
592 cxgb_ofld_send(tdev, m);
593 p->ctx = NULL;
594 mtx_lock(&td->tid_release_lock);
596 mtx_unlock(&td->tid_release_lock);
599 /* use ctx as a next pointer in the tid release list */
600 void
601 cxgb_queue_tid_release(struct toedev *tdev, unsigned int tid)
603 struct toe_data *td = TOE_DATA(tdev);
604 struct toe_tid_entry *p = &td->tid_maps.tid_tab[tid];
606 mtx_lock(&td->tid_release_lock);
607 p->ctx = td->tid_release_list;
608 td->tid_release_list = p;
610 #ifdef __FreeBSD__
611 if (!p->ctx)
612 taskqueue_enqueue(tdev->adapter->tq, &td->tid_release_task);
613 #endif
614 #ifdef __NetBSD__
615 if (!p->ctx)
616 workqueue_enqueue(td->tid_release_task.wq, &td->tid_release_task.w, NULL);
617 #endif
619 mtx_unlock(&td->tid_release_lock);
623 * Remove a tid from the TID table. A client may defer processing its last
624 * CPL message if it is locked at the time it arrives, and while the message
625 * sits in the client's backlog the TID may be reused for another connection.
626 * To handle this we atomically switch the TID association if it still points
627 * to the original client context.
629 void
630 cxgb_remove_tid(struct toedev *tdev, void *ctx, unsigned int tid)
632 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
634 BUG_ON(tid >= t->ntids);
635 if (tdev->type == T3A)
636 atomic_cmpset_ptr((uintptr_t *)&t->tid_tab[tid].ctx, (long)NULL, (long)ctx);
637 else {
638 struct mbuf *m;
640 m = m_get(M_NOWAIT, MT_DATA);
641 if (__predict_true(m != NULL)) {
642 mk_tid_release(m, tid);
643 cxgb_ofld_send(tdev, m);
644 t->tid_tab[tid].ctx = NULL;
645 } else
646 cxgb_queue_tid_release(tdev, tid);
648 atomic_add_int(&t->tids_in_use, -1);
652 cxgb_alloc_atid(struct toedev *tdev, struct cxgb_client *client,
653 void *ctx)
655 int atid = -1;
656 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
658 mtx_lock(&t->atid_lock);
659 if (t->afree) {
660 union active_open_entry *p = t->afree;
662 atid = (p - t->atid_tab) + t->atid_base;
663 t->afree = p->next;
664 p->toe_tid.ctx = ctx;
665 p->toe_tid.client = client;
666 t->atids_in_use++;
668 mtx_unlock(&t->atid_lock);
669 return atid;
673 cxgb_alloc_stid(struct toedev *tdev, struct cxgb_client *client,
674 void *ctx)
676 int stid = -1;
677 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
679 mtx_lock(&t->stid_lock);
680 if (t->sfree) {
681 union listen_entry *p = t->sfree;
683 stid = (p - t->stid_tab) + t->stid_base;
684 t->sfree = p->next;
685 p->toe_tid.ctx = ctx;
686 p->toe_tid.client = client;
687 t->stids_in_use++;
689 mtx_unlock(&t->stid_lock);
690 return stid;
693 static int
694 do_smt_write_rpl(struct toedev *dev, struct mbuf *m)
696 struct cpl_smt_write_rpl *rpl = cplhdr(m);
698 if (rpl->status != CPL_ERR_NONE)
699 log(LOG_ERR,
700 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
701 rpl->status, GET_TID(rpl));
703 return CPL_RET_BUF_DONE;
706 static int
707 do_l2t_write_rpl(struct toedev *dev, struct mbuf *m)
709 struct cpl_l2t_write_rpl *rpl = cplhdr(m);
711 if (rpl->status != CPL_ERR_NONE)
712 log(LOG_ERR,
713 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
714 rpl->status, GET_TID(rpl));
716 return CPL_RET_BUF_DONE;
719 static int
720 do_act_open_rpl(struct toedev *dev, struct mbuf *m)
722 struct cpl_act_open_rpl *rpl = cplhdr(m);
723 unsigned int atid = G_TID(ntohl(rpl->atid));
724 struct toe_tid_entry *toe_tid;
726 toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
727 if (toe_tid->ctx && toe_tid->client && toe_tid->client->handlers &&
728 toe_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
729 return toe_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, m,
730 toe_tid->ctx);
731 } else {
732 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
733 dev->name, CPL_ACT_OPEN_RPL);
734 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
738 static int
739 do_stid_rpl(struct toedev *dev, struct mbuf *m)
741 union opcode_tid *p = cplhdr(m);
742 unsigned int stid = G_TID(ntohl(p->opcode_tid));
743 struct toe_tid_entry *toe_tid;
745 toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
746 if (toe_tid->ctx && toe_tid->client->handlers &&
747 toe_tid->client->handlers[p->opcode]) {
748 return toe_tid->client->handlers[p->opcode] (dev, m, toe_tid->ctx);
749 } else {
750 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
751 dev->name, p->opcode);
752 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
756 static int
757 do_hwtid_rpl(struct toedev *dev, struct mbuf *m)
759 union opcode_tid *p = cplhdr(m);
760 unsigned int hwtid;
761 struct toe_tid_entry *toe_tid;
763 printf("do_hwtid_rpl m=%p\n", m);
764 return (0);
767 hwtid = G_TID(ntohl(p->opcode_tid));
769 toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
770 if (toe_tid->ctx && toe_tid->client->handlers &&
771 toe_tid->client->handlers[p->opcode]) {
772 return toe_tid->client->handlers[p->opcode]
773 (dev, m, toe_tid->ctx);
774 } else {
775 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
776 dev->name, p->opcode);
777 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
781 static int
782 do_cr(struct toedev *dev, struct mbuf *m)
784 struct cpl_pass_accept_req *req = cplhdr(m);
785 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
786 struct toe_tid_entry *toe_tid;
788 toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
789 if (toe_tid->ctx && toe_tid->client->handlers &&
790 toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
791 return toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
792 (dev, m, toe_tid->ctx);
793 } else {
794 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
795 dev->name, CPL_PASS_ACCEPT_REQ);
796 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
800 static int
801 do_abort_req_rss(struct toedev *dev, struct mbuf *m)
803 union opcode_tid *p = cplhdr(m);
804 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
805 struct toe_tid_entry *toe_tid;
807 toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
808 if (toe_tid->ctx && toe_tid->client->handlers &&
809 toe_tid->client->handlers[p->opcode]) {
810 return toe_tid->client->handlers[p->opcode]
811 (dev, m, toe_tid->ctx);
812 } else {
813 struct cpl_abort_req_rss *req = cplhdr(m);
814 struct cpl_abort_rpl *rpl;
816 struct mbuf *m2 = m_get(M_NOWAIT, MT_DATA);
817 if (!m2) {
818 log(LOG_NOTICE, "do_abort_req_rss: couldn't get mbuf!\n");
819 goto out;
822 m_set_priority(m2, CPL_PRIORITY_DATA);
823 #if 0
824 __skb_put(skb, sizeof(struct cpl_abort_rpl));
825 #endif
826 rpl = cplhdr(m2);
827 rpl->wr.wr_hi =
828 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
829 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
830 OPCODE_TID(rpl) =
831 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
832 rpl->cmd = req->status;
833 cxgb_ofld_send(dev, m2);
834 out:
835 return CPL_RET_BUF_DONE;
839 static int
840 do_act_establish(struct toedev *dev, struct mbuf *m)
842 struct cpl_act_establish *req = cplhdr(m);
843 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
844 struct toe_tid_entry *toe_tid;
846 toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
847 if (toe_tid->ctx && toe_tid->client->handlers &&
848 toe_tid->client->handlers[CPL_ACT_ESTABLISH]) {
849 return toe_tid->client->handlers[CPL_ACT_ESTABLISH]
850 (dev, m, toe_tid->ctx);
851 } else {
852 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
853 dev->name, CPL_PASS_ACCEPT_REQ);
854 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
858 static int
859 do_set_tcb_rpl(struct toedev *dev, struct mbuf *m)
861 struct cpl_set_tcb_rpl *rpl = cplhdr(m);
863 if (rpl->status != CPL_ERR_NONE)
864 log(LOG_ERR,
865 "Unexpected SET_TCB_RPL status %u for tid %u\n",
866 rpl->status, GET_TID(rpl));
867 return CPL_RET_BUF_DONE;
870 static int
871 do_trace(struct toedev *dev, struct mbuf *m)
873 #if 0
874 struct cpl_trace_pkt *p = cplhdr(m);
877 skb->protocol = 0xffff;
878 skb->dev = dev->lldev;
879 skb_pull(skb, sizeof(*p));
880 skb->mac.raw = mtod(m, (char *));
881 netif_receive_skb(skb);
882 #endif
883 return 0;
886 static int
887 do_term(struct toedev *dev, struct mbuf *m)
889 unsigned int hwtid = ntohl(m_get_priority(m)) >> 8 & 0xfffff;
890 unsigned int opcode = G_OPCODE(ntohl(m->m_pkthdr.csum_data));
891 struct toe_tid_entry *toe_tid;
893 toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
894 if (toe_tid->ctx && toe_tid->client->handlers &&
895 toe_tid->client->handlers[opcode]) {
896 return toe_tid->client->handlers[opcode](dev, m, toe_tid->ctx);
897 } else {
898 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
899 dev->name, opcode);
900 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
902 return (0);
905 #if defined(FOO)
906 #include <linux/config.h>
907 #include <linux/kallsyms.h>
908 #include <linux/kprobes.h>
909 #include <net/arp.h>
911 static int (*orig_arp_constructor)(struct ifnet *);
913 static void
914 neigh_suspect(struct ifnet *neigh)
916 struct hh_cache *hh;
918 neigh->output = neigh->ops->output;
920 for (hh = neigh->hh; hh; hh = hh->hh_next)
921 hh->hh_output = neigh->ops->output;
924 static void
925 neigh_connect(struct ifnet *neigh)
927 struct hh_cache *hh;
929 neigh->output = neigh->ops->connected_output;
931 for (hh = neigh->hh; hh; hh = hh->hh_next)
932 hh->hh_output = neigh->ops->hh_output;
935 static inline int
936 neigh_max_probes(const struct neighbour *n)
938 const struct neigh_parms *p = n->parms;
939 return (n->nud_state & NUD_PROBE ?
940 p->ucast_probes :
941 p->ucast_probes + p->app_probes + p->mcast_probes);
944 static void
945 neigh_timer_handler_offload(unsigned long arg)
947 unsigned long now, next;
948 struct neighbour *neigh = (struct neighbour *)arg;
949 unsigned state;
950 int notify = 0;
952 write_lock(&neigh->lock);
954 state = neigh->nud_state;
955 now = jiffies;
956 next = now + HZ;
958 if (!(state & NUD_IN_TIMER)) {
959 #ifndef CONFIG_SMP
960 log(LOG_WARNING, "neigh: timer & !nud_in_timer\n");
961 #endif
962 goto out;
965 if (state & NUD_REACHABLE) {
966 if (time_before_eq(now,
967 neigh->confirmed +
968 neigh->parms->reachable_time)) {
969 next = neigh->confirmed + neigh->parms->reachable_time;
970 } else if (time_before_eq(now,
971 neigh->used +
972 neigh->parms->delay_probe_time)) {
973 neigh->nud_state = NUD_DELAY;
974 neigh->updated = jiffies;
975 neigh_suspect(neigh);
976 next = now + neigh->parms->delay_probe_time;
977 } else {
978 neigh->nud_state = NUD_STALE;
979 neigh->updated = jiffies;
980 neigh_suspect(neigh);
981 cxgb_neigh_update(neigh);
983 } else if (state & NUD_DELAY) {
984 if (time_before_eq(now,
985 neigh->confirmed +
986 neigh->parms->delay_probe_time)) {
987 neigh->nud_state = NUD_REACHABLE;
988 neigh->updated = jiffies;
989 neigh_connect(neigh);
990 cxgb_neigh_update(neigh);
991 next = neigh->confirmed + neigh->parms->reachable_time;
992 } else {
993 neigh->nud_state = NUD_PROBE;
994 neigh->updated = jiffies;
995 atomic_set_int(&neigh->probes, 0);
996 next = now + neigh->parms->retrans_time;
998 } else {
999 /* NUD_PROBE|NUD_INCOMPLETE */
1000 next = now + neigh->parms->retrans_time;
1003 * Needed for read of probes
1005 mb();
1006 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1007 neigh->probes >= neigh_max_probes(neigh)) {
1008 struct mbuf *m;
1010 neigh->nud_state = NUD_FAILED;
1011 neigh->updated = jiffies;
1012 notify = 1;
1013 cxgb_neigh_update(neigh);
1014 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1016 /* It is very thin place. report_unreachable is very
1017 complicated routine. Particularly, it can hit the same
1018 neighbour entry!
1019 So that, we try to be accurate and avoid dead loop. --ANK
1021 while (neigh->nud_state == NUD_FAILED &&
1022 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1023 write_unlock(&neigh->lock);
1024 neigh->ops->error_report(neigh, skb);
1025 write_lock(&neigh->lock);
1027 skb_queue_purge(&neigh->arp_queue);
1030 if (neigh->nud_state & NUD_IN_TIMER) {
1031 if (time_before(next, jiffies + HZ/2))
1032 next = jiffies + HZ/2;
1033 if (!mod_timer(&neigh->timer, next))
1034 neigh_hold(neigh);
1036 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1037 struct mbuf *m = skb_peek(&neigh->arp_queue);
1039 write_unlock(&neigh->lock);
1040 neigh->ops->solicit(neigh, skb);
1041 atomic_add_int(&neigh->probes, 1);
1042 if (m)
1043 m_free(m);
1044 } else {
1045 out:
1046 write_unlock(&neigh->lock);
1049 #ifdef CONFIG_ARPD
1050 if (notify && neigh->parms->app_probes)
1051 neigh_app_notify(neigh);
1052 #endif
1053 neigh_release(neigh);
1056 static int
1057 arp_constructor_offload(struct neighbour *neigh)
1059 if (neigh->ifp && is_offloading(neigh->ifp))
1060 neigh->timer.function = neigh_timer_handler_offload;
1061 return orig_arp_constructor(neigh);
1065 * This must match exactly the signature of neigh_update for jprobes to work.
1066 * It runs from a trap handler with interrupts off so don't disable BH.
1068 static int
1069 neigh_update_offload(struct neighbour *neigh, const u8 *lladdr,
1070 u8 new, u32 flags)
1072 write_lock(&neigh->lock);
1073 cxgb_neigh_update(neigh);
1074 write_unlock(&neigh->lock);
1075 jprobe_return();
1076 /* NOTREACHED */
1077 return 0;
1080 static struct jprobe neigh_update_jprobe = {
1081 .entry = (kprobe_opcode_t *) neigh_update_offload,
1082 .kp.addr = (kprobe_opcode_t *) neigh_update
1085 #ifdef MODULE_SUPPORT
1086 static int
1087 prepare_arp_with_t3core(void)
1089 int err;
1091 err = register_jprobe(&neigh_update_jprobe);
1092 if (err) {
1093 log(LOG_ERR, "Could not install neigh_update jprobe, "
1094 "error %d\n", err);
1095 return err;
1098 orig_arp_constructor = arp_tbl.constructor;
1099 arp_tbl.constructor = arp_constructor_offload;
1101 return 0;
1104 static void
1105 restore_arp_sans_t3core(void)
1107 arp_tbl.constructor = orig_arp_constructor;
1108 unregister_jprobe(&neigh_update_jprobe);
1111 #else /* Module suport */
1112 static inline int
1113 prepare_arp_with_t3core(void)
1115 return 0;
1118 static inline void
1119 restore_arp_sans_t3core(void)
1121 #endif
1122 #endif
1124 * Process a received packet with an unknown/unexpected CPL opcode.
1126 static int
1127 do_bad_cpl(struct toedev *dev, struct mbuf *m)
1129 log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
1130 *mtod(m, uint32_t *));
1131 return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
1135 * Handlers for each CPL opcode
1137 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
1140 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1141 * to unregister an existing handler.
1143 void
1144 t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
1146 if (opcode < NUM_CPL_CMDS)
1147 cpl_handlers[opcode] = h ? h : do_bad_cpl;
1148 else
1149 log(LOG_ERR, "T3C: handler registration for "
1150 "opcode %x failed\n", opcode);
1154 * TOEDEV's receive method.
1157 process_rx(struct toedev *dev, struct mbuf **m, int n)
1159 while (n--) {
1160 struct mbuf *m0 = *m++;
1161 unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
1162 int ret = cpl_handlers[opcode] (dev, m0);
1164 #if VALIDATE_TID
1165 if (ret & CPL_RET_UNKNOWN_TID) {
1166 union opcode_tid *p = cplhdr(m0);
1168 log(LOG_ERR, "%s: CPL message (opcode %u) had "
1169 "unknown TID %u\n", dev->name, opcode,
1170 G_TID(ntohl(p->opcode_tid)));
1172 #endif
1173 if (ret & CPL_RET_BUF_DONE)
1174 m_freem(m0);
1176 return 0;
1180 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1183 cxgb_ofld_send(struct toedev *dev, struct mbuf *m)
1185 int r;
1187 critical_enter();
1188 r = dev->send(dev, m);
1189 critical_exit();
1190 return r;
1195 * cxgb_ofld_recv - process n received offload packets
1196 * @dev: the offload device
1197 * @m: an array of offload packets
1198 * @n: the number of offload packets
1200 * Process an array of ingress offload packets. Each packet is forwarded
1201 * to any active network taps and then passed to the offload device's receive
1202 * method. We optimize passing packets to the receive method by passing
1203 * it the whole array at once except when there are active taps.
1206 cxgb_ofld_recv(struct toedev *dev, struct mbuf **m, int n)
1209 #if defined(CONFIG_CHELSIO_T3)
1210 if (likely(!netdev_nit))
1211 return dev->recv(dev, skb, n);
1213 for ( ; n; n--, skb++) {
1214 skb[0]->dev = dev->lldev;
1215 dev_queue_xmit_nit(skb[0], dev->lldev);
1216 skb[0]->dev = NULL;
1217 dev->recv(dev, skb, 1);
1219 return 0;
1220 #else
1221 return dev->recv(dev, m, n);
1222 #endif
1225 void
1226 cxgb_neigh_update(struct rtentry *rt)
1229 if (is_offloading(rt->rt_ifp)) {
1230 struct toedev *tdev = TOEDEV(rt->rt_ifp);
1232 BUG_ON(!tdev);
1233 t3_l2t_update(tdev, rt);
1237 static void
1238 set_l2t_ix(struct toedev *tdev, u32 tid, struct l2t_entry *e)
1240 struct mbuf *m;
1241 struct cpl_set_tcb_field *req;
1243 m = m_gethdr(M_NOWAIT, MT_DATA);
1244 if (!m) {
1245 log(LOG_ERR, "%s: cannot allocate mbuf!\n", __func__);
1246 return;
1249 m_set_priority(m, CPL_PRIORITY_CONTROL);
1250 req = mtod(m, struct cpl_set_tcb_field *);
1251 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1252 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1253 req->reply = 0;
1254 req->cpu_idx = 0;
1255 req->word = htons(W_TCB_L2T_IX);
1256 req->mask = htobe64(V_TCB_L2T_IX(M_TCB_L2T_IX));
1257 req->val = htobe64(V_TCB_L2T_IX(e->idx));
1258 tdev->send(tdev, m);
1261 void
1262 cxgb_redirect(struct rtentry *old, struct rtentry *new)
1264 struct ifnet *olddev, *newdev;
1265 struct tid_info *ti;
1266 struct toedev *tdev;
1267 u32 tid;
1268 int update_tcb;
1269 struct l2t_entry *e;
1270 struct toe_tid_entry *te;
1272 olddev = old->rt_ifp;
1273 newdev = new->rt_ifp;
1274 if (!is_offloading(olddev))
1275 return;
1276 if (!is_offloading(newdev)) {
1277 log(LOG_WARNING, "%s: Redirect to non-offload"
1278 "device ignored.\n", __func__);
1279 return;
1281 tdev = TOEDEV(olddev);
1282 BUG_ON(!tdev);
1283 if (tdev != TOEDEV(newdev)) {
1284 log(LOG_WARNING, "%s: Redirect to different "
1285 "offload device ignored.\n", __func__);
1286 return;
1289 /* Add new L2T entry */
1290 e = t3_l2t_get(tdev, new, ((struct port_info *)new->rt_ifp->if_softc)->port_id);
1291 if (!e) {
1292 log(LOG_ERR, "%s: couldn't allocate new l2t entry!\n",
1293 __func__);
1294 return;
1297 /* Walk tid table and notify clients of dst change. */
1298 ti = &(TOE_DATA(tdev))->tid_maps;
1299 for (tid=0; tid < ti->ntids; tid++) {
1300 te = lookup_tid(ti, tid);
1301 BUG_ON(!te);
1302 if (te->ctx && te->client && te->client->redirect) {
1303 update_tcb = te->client->redirect(te->ctx, old, new,
1305 if (update_tcb) {
1306 l2t_hold(L2DATA(tdev), e);
1307 set_l2t_ix(tdev, tid, e);
1311 l2t_release(L2DATA(tdev), e);
1315 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1316 * The allocated memory is cleared.
1318 void *
1319 cxgb_alloc_mem(unsigned long size)
1321 return malloc(size, M_DEVBUF, M_ZERO);
1325 * Free memory allocated through t3_alloc_mem().
1327 void
1328 cxgb_free_mem(void *addr)
1330 free(addr, M_DEVBUF);
1335 * Allocate and initialize the TID tables. Returns 0 on success.
1337 static int
1338 init_tid_tabs(struct tid_info *t, unsigned int ntids,
1339 unsigned int natids, unsigned int nstids,
1340 unsigned int atid_base, unsigned int stid_base)
1342 unsigned long size = ntids * sizeof(*t->tid_tab) +
1343 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1345 t->tid_tab = cxgb_alloc_mem(size);
1346 if (!t->tid_tab)
1347 return (ENOMEM);
1349 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1350 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1351 t->ntids = ntids;
1352 t->nstids = nstids;
1353 t->stid_base = stid_base;
1354 t->sfree = NULL;
1355 t->natids = natids;
1356 t->atid_base = atid_base;
1357 t->afree = NULL;
1358 t->stids_in_use = t->atids_in_use = 0;
1359 atomic_set_int(&t->tids_in_use, 0);
1360 mtx_init(&t->stid_lock, "stid", NULL, MTX_DEF);
1361 mtx_init(&t->atid_lock, "atid", NULL, MTX_DEF);
1364 * Setup the free lists for stid_tab and atid_tab.
1366 if (nstids) {
1367 while (--nstids)
1368 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1369 t->sfree = t->stid_tab;
1371 if (natids) {
1372 while (--natids)
1373 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1374 t->afree = t->atid_tab;
1376 return 0;
1379 static void
1380 free_tid_maps(struct tid_info *t)
1382 cxgb_free_mem(t->tid_tab);
1385 static inline void
1386 add_adapter(adapter_t *adap)
1388 rw_wlock(&adapter_list_lock);
1389 TAILQ_INSERT_TAIL(&adapter_list, adap, adapter_entry);
1390 rw_wunlock(&adapter_list_lock);
1393 static inline void
1394 remove_adapter(adapter_t *adap)
1396 rw_wlock(&adapter_list_lock);
1397 TAILQ_REMOVE(&adapter_list, adap, adapter_entry);
1398 rw_wunlock(&adapter_list_lock);
1402 * XXX
1404 #define t3_free_l2t(...)
1407 cxgb_offload_activate(struct adapter *adapter)
1409 struct toedev *dev = &adapter->tdev;
1410 int natids, err;
1411 struct toe_data *t;
1412 struct tid_range stid_range, tid_range;
1413 struct mtutab mtutab;
1414 unsigned int l2t_capacity;
1416 t = malloc(sizeof(*t), M_DEVBUF, M_WAITOK);
1417 if (!t)
1418 return (ENOMEM);
1420 err = (EOPNOTSUPP);
1421 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1422 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1423 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1424 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1425 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1426 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1427 goto out_free;
1429 err = (ENOMEM);
1430 L2DATA(dev) = t3_init_l2t(l2t_capacity);
1431 if (!L2DATA(dev))
1432 goto out_free;
1434 natids = min(tid_range.num / 2, MAX_ATIDS);
1435 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1436 stid_range.num, ATID_BASE, stid_range.base);
1437 if (err)
1438 goto out_free_l2t;
1440 t->mtus = mtutab.mtus;
1441 t->nmtus = mtutab.size;
1443 #ifdef __FreeBSD__
1444 TASK_INIT(&t->tid_release_task, 0 /* XXX? */, t3_process_tid_release_list, dev);
1445 #endif
1446 #ifdef __NetBSD__
1447 t->tid_release_task.name = "t3_process_tid_release_list";
1448 t->tid_release_task.func = t3_process_tid_release_list;
1449 t->tid_release_task.context = adapter;
1450 kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &t->tid_release_task, NULL, "cxgb_make_task");
1451 #endif
1452 mtx_init(&t->tid_release_lock, "tid release", NULL, MTX_DEF);
1453 t->dev = dev;
1455 TOE_DATA(dev) = t;
1456 dev->recv = process_rx;
1457 dev->neigh_update = t3_l2t_update;
1458 #if 0
1459 offload_proc_dev_setup(dev);
1460 #endif
1461 /* Register netevent handler once */
1462 if (TAILQ_EMPTY(&adapter_list)) {
1463 #if defined(CONFIG_CHELSIO_T3_MODULE)
1464 if (prepare_arp_with_t3core())
1465 log(LOG_ERR, "Unable to set offload capabilities\n");
1466 #endif
1468 add_adapter(adapter);
1469 return 0;
1471 out_free_l2t:
1472 t3_free_l2t(L2DATA(dev));
1473 L2DATA(dev) = NULL;
1474 out_free:
1475 free(t, M_DEVBUF);
1476 return err;
1480 void
1481 cxgb_offload_deactivate(struct adapter *adapter)
1483 struct toedev *tdev = &adapter->tdev;
1484 struct toe_data *t = TOE_DATA(tdev);
1486 remove_adapter(adapter);
1487 if (TAILQ_EMPTY(&adapter_list)) {
1488 #if defined(CONFIG_CHELSIO_T3_MODULE)
1489 restore_arp_sans_t3core();
1490 #endif
1492 free_tid_maps(&t->tid_maps);
1493 TOE_DATA(tdev) = NULL;
1494 t3_free_l2t(L2DATA(tdev));
1495 L2DATA(tdev) = NULL;
1496 free(t, M_DEVBUF);
1500 static inline void
1501 register_tdev(struct toedev *tdev)
1503 static int unit;
1505 mtx_lock(&cxgb_db_lock);
1506 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1507 TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, ofld_entry);
1508 mtx_unlock(&cxgb_db_lock);
1511 static inline void
1512 unregister_tdev(struct toedev *tdev)
1514 mtx_lock(&cxgb_db_lock);
1515 TAILQ_REMOVE(&ofld_dev_list, tdev, ofld_entry);
1516 mtx_unlock(&cxgb_db_lock);
1519 void
1520 cxgb_adapter_ofld(struct adapter *adapter)
1522 struct toedev *tdev = &adapter->tdev;
1524 cxgb_set_dummy_ops(tdev);
1525 tdev->send = t3_offload_tx;
1526 tdev->ctl = cxgb_offload_ctl;
1527 tdev->type = adapter->params.rev == 0 ?
1528 T3A : T3B;
1530 register_tdev(tdev);
1531 #if 0
1532 offload_proc_dev_init(tdev);
1533 #endif
1536 void
1537 cxgb_adapter_unofld(struct adapter *adapter)
1539 struct toedev *tdev = &adapter->tdev;
1540 #if 0
1541 offload_proc_dev_cleanup(tdev);
1542 offload_proc_dev_exit(tdev);
1543 #endif
1544 tdev->recv = NULL;
1545 tdev->neigh_update = NULL;
1547 unregister_tdev(tdev);
1550 void
1551 cxgb_offload_init(void)
1553 int i;
1555 if (inited)
1556 return;
1557 else
1558 inited = 1;
1560 mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
1561 #ifdef __FreeBSD__
1562 rw_init(&adapter_list_lock, "ofld adap list");
1563 #endif
1564 #ifdef __NetBSD__
1565 rw_init(&adapter_list_lock);
1566 #endif
1567 TAILQ_INIT(&client_list);
1568 TAILQ_INIT(&ofld_dev_list);
1569 TAILQ_INIT(&adapter_list);
1571 for (i = 0; i < NUM_CPL_CMDS; ++i)
1572 cpl_handlers[i] = do_bad_cpl;
1574 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1575 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1576 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1577 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1578 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1579 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1580 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1581 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1582 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1583 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1584 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1585 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1586 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1587 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1588 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1589 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1590 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1591 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1592 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1593 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1594 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1595 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1596 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1597 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1598 #if 0
1599 if (offload_proc_init())
1600 log(LOG_WARNING, "Unable to create /proc/net/cxgb3 dir\n");
1601 #endif
1604 void
1605 cxgb_offload_exit(void)
1607 static int deinited = 0;
1609 if (deinited)
1610 return;
1612 deinited = 1;
1613 mtx_destroy(&cxgb_db_lock);
1614 rw_destroy(&adapter_list_lock);
1615 #if 0
1616 offload_proc_cleanup();
1617 #endif
1620 #if 0
1621 static int
1622 offload_info_read_proc(char *buf, char **start, off_t offset,
1623 int length, int *eof, void *data)
1625 struct toe_data *d = data;
1626 struct tid_info *t = &d->tid_maps;
1627 int len;
1629 len = sprintf(buf, "TID range: 0..%d, in use: %u\n"
1630 "STID range: %d..%d, in use: %u\n"
1631 "ATID range: %d..%d, in use: %u\n"
1632 "MSS: %u\n",
1633 t->ntids - 1, atomic_read(&t->tids_in_use), t->stid_base,
1634 t->stid_base + t->nstids - 1, t->stids_in_use,
1635 t->atid_base, t->atid_base + t->natids - 1,
1636 t->atids_in_use, d->tx_max_chunk);
1637 if (len > length)
1638 len = length;
1639 *eof = 1;
1640 return len;
1643 static int
1644 offload_info_proc_setup(struct proc_dir_entry *dir,
1645 struct toe_data *d)
1647 struct proc_dir_entry *p;
1649 if (!dir)
1650 return (EINVAL);
1652 p = create_proc_read_entry("info", 0, dir, offload_info_read_proc, d);
1653 if (!p)
1654 return (ENOMEM);
1656 p->owner = THIS_MODULE;
1657 return 0;
1661 static int
1662 offload_devices_read_proc(char *buf, char **start, off_t offset,
1663 int length, int *eof, void *data)
1665 int len;
1666 struct toedev *dev;
1667 struct net_device *ndev;
1669 len = sprintf(buf, "Device Interfaces\n");
1671 mtx_lock(&cxgb_db_lock);
1672 TAILQ_FOREACH(dev, &ofld_dev_list, ofld_entry) {
1673 len += sprintf(buf + len, "%-16s", dev->name);
1674 read_lock(&dev_base_lock);
1675 for (ndev = dev_base; ndev; ndev = ndev->next) {
1676 if (TOEDEV(ndev) == dev)
1677 len += sprintf(buf + len, " %s", ndev->name);
1679 read_unlock(&dev_base_lock);
1680 len += sprintf(buf + len, "\n");
1681 if (len >= length)
1682 break;
1684 mtx_unlock(&cxgb_db_lock);
1686 if (len > length)
1687 len = length;
1688 *eof = 1;
1689 return len;
1692 #endif