Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / pci / cxgb_l2t.h
blobb9708180f89b2ea67704f985663151ed4701fd1a
1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 $FreeBSD: src/sys/dev/cxgb/cxgb_l2t.h,v 1.2 2007/08/17 05:57:03 kmacy Exp $
30 ***************************************************************************/
31 #ifndef _CHELSIO_L2T_H
32 #define _CHELSIO_L2T_H
34 #ifdef __FreeBSD__
35 #include <dev/cxgb/ulp/toecore/toedev.h>
36 #endif
37 #include <sys/lock.h>
39 #ifdef __FreeBSD__
40 #if __FreeBSD_version > 700000
41 #include <sys/rwlock.h>
42 #else
43 #define rwlock mtx
44 #define rw_wlock(x) mtx_lock((x))
45 #define rw_wunlock(x) mtx_unlock((x))
46 #define rw_rlock(x) mtx_lock((x))
47 #define rw_runlock(x) mtx_unlock((x))
48 #define rw_init(x, str) mtx_init((x), (str), NULL, MTX_DEF)
49 #define rw_destroy(x) mtx_destroy((x))
50 #endif
51 #endif
52 #ifdef __NetBSD__
53 #define rwlock mtx
54 #define rw_wlock(x) mtx_lock((x))
55 #define rw_wunlock(x) mtx_unlock((x))
56 #define rw_rlock(x) mtx_lock((x))
57 #define rw_runlock(x) mtx_unlock((x))
58 #define rw_init(x, str) mtx_init((x), (str), NULL, MTX_DEF)
59 #define rw_destroy(x) mtx_destroy((x))
60 #endif
62 enum {
63 L2T_STATE_VALID, /* entry is up to date */
64 L2T_STATE_STALE, /* entry may be used but needs revalidation */
65 L2T_STATE_RESOLVING, /* entry needs address resolution */
66 L2T_STATE_UNUSED /* entry not in use */
70 * Each L2T entry plays multiple roles. First of all, it keeps state for the
71 * corresponding entry of the HW L2 table and maintains a queue of offload
72 * packets awaiting address resolution. Second, it is a node of a hash table
73 * chain, where the nodes of the chain are linked together through their next
74 * pointer. Finally, each node is a bucket of a hash table, pointing to the
75 * first element in its chain through its first pointer.
77 struct l2t_entry {
78 uint16_t state; /* entry state */
79 uint16_t idx; /* entry index */
80 uint32_t addr; /* dest IP address */
81 int ifindex; /* neighbor's net_device's ifindex */
82 uint16_t smt_idx; /* SMT index */
83 uint16_t vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
84 struct rtentry *neigh; /* associated neighbour */
85 struct l2t_entry *first; /* start of hash chain */
86 struct l2t_entry *next; /* next l2t_entry on chain */
87 struct mbuf *arpq_head; /* queue of packets awaiting resolution */
88 struct mbuf *arpq_tail;
89 struct mtx lock;
90 volatile uint32_t refcnt; /* entry reference count */
91 uint8_t dmac[6]; /* neighbour's MAC address */
92 #ifndef NETEVENT
93 #ifdef CONFIG_CHELSIO_T3_MODULE
94 struct timer_list update_timer;
95 #ifdef __FreeBSD__
96 struct toedev *tdev;
97 #endif
98 #endif
99 #endif
102 struct l2t_data {
103 unsigned int nentries; /* number of entries */
104 struct l2t_entry *rover; /* starting point for next allocation */
105 volatile uint32_t nfree; /* number of free entries */
106 struct rwlock lock;
107 struct l2t_entry l2tab[0];
110 typedef void (*arp_failure_handler_func)(struct toedev *dev,
111 struct mbuf *m);
114 * Callback stored in an skb to handle address resolution failure.
116 struct l2t_mbuf_cb {
117 arp_failure_handler_func arp_failure_handler;
121 * XXX
123 #define L2T_MBUF_CB(skb) ((struct l2t_mbuf_cb *)(skb)->cb)
126 static __inline void set_arp_failure_handler(struct mbuf *m,
127 arp_failure_handler_func hnd)
129 #if 0
130 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
131 #endif
132 panic("implement me");
136 * Getting to the L2 data from an offload device.
138 #define L2DATA(dev) ((dev)->l2opt)
140 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
141 void t3_l2t_update(struct toedev *dev, struct rtentry *ifp);
142 struct l2t_entry *t3_l2t_get(struct toedev *dev, struct rtentry *neigh,
143 unsigned int smt_idx);
144 int t3_l2t_send_slow(struct toedev *dev, struct mbuf *m,
145 struct l2t_entry *e);
146 void t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e);
147 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
148 void t3_free_l2t(struct l2t_data *d);
150 #ifdef CONFIG_PROC_FS
151 int t3_l2t_proc_setup(struct proc_dir_entry *dir, struct l2t_data *d);
152 void t3_l2t_proc_free(struct proc_dir_entry *dir);
153 #else
154 #define l2t_proc_setup(dir, d) 0
155 #define l2t_proc_free(dir)
156 #endif
158 int cxgb_ofld_send(struct toedev *dev, struct mbuf *m);
160 static inline int l2t_send(struct toedev *dev, struct mbuf *m,
161 struct l2t_entry *e)
163 if (__predict_true(e->state == L2T_STATE_VALID))
164 return cxgb_ofld_send(dev, m);
165 return t3_l2t_send_slow(dev, m, e);
168 static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
170 if (atomic_fetchadd_int(&e->refcnt, -1) == 1)
171 t3_l2e_free(d, e);
174 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
176 if (atomic_fetchadd_int(&e->refcnt, 1) == 1) /* 0 -> 1 transition */
177 atomic_add_int(&d->nfree, 1);
180 #endif