Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / net / ethernet / ibm / ibmveth.h
blob01c587fc02c70c0fa805f29bc5b63c8c6bd4962b
1 /*
2 * IBM Power Virtual Ethernet Device Driver
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 * Copyright (C) IBM Corporation, 2003, 2010
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
26 #ifndef _IBMVETH_H
27 #define _IBMVETH_H
29 /* constants for H_MULTICAST_CTRL */
30 #define IbmVethMcastReceptionModifyBit 0x80000UL
31 #define IbmVethMcastReceptionEnableBit 0x20000UL
32 #define IbmVethMcastFilterModifyBit 0x40000UL
33 #define IbmVethMcastFilterEnableBit 0x10000UL
35 #define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
36 #define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit)
37 #define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
38 #define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
39 #define IbmVethMcastAddFilter 0x1UL
40 #define IbmVethMcastRemoveFilter 0x2UL
41 #define IbmVethMcastClearFilterTable 0x3UL
43 #define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
44 #define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
45 #define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
46 #define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
47 #define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
48 #define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL
49 #define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL
51 /* hcall macros */
52 #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
53 plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
55 #define h_free_logical_lan(ua) \
56 plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
58 #define h_add_logical_lan_buffer(ua, buf) \
59 plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
61 static inline long h_send_logical_lan(unsigned long unit_address,
62 unsigned long desc1, unsigned long desc2, unsigned long desc3,
63 unsigned long desc4, unsigned long desc5, unsigned long desc6,
64 unsigned long corellator_in, unsigned long *corellator_out,
65 unsigned long mss, unsigned long large_send_support)
67 long rc;
68 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
70 if (large_send_support)
71 rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
72 desc1, desc2, desc3, desc4, desc5, desc6,
73 corellator_in, mss);
74 else
75 rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
76 desc1, desc2, desc3, desc4, desc5, desc6,
77 corellator_in);
79 *corellator_out = retbuf[0];
81 return rc;
84 static inline long h_illan_attributes(unsigned long unit_address,
85 unsigned long reset_mask, unsigned long set_mask,
86 unsigned long *ret_attributes)
88 long rc;
89 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
91 rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
92 reset_mask, set_mask);
94 *ret_attributes = retbuf[0];
96 return rc;
99 #define h_multicast_ctrl(ua, cmd, mac) \
100 plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
102 #define h_change_logical_lan_mac(ua, mac) \
103 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
105 #define IBMVETH_NUM_BUFF_POOLS 5
106 #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
107 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
108 #define IBMVETH_MIN_MTU 68
109 #define IBMVETH_MAX_POOL_COUNT 4096
110 #define IBMVETH_BUFF_LIST_SIZE 4096
111 #define IBMVETH_FILT_LIST_SIZE 4096
112 #define IBMVETH_MAX_BUF_SIZE (1024 * 128)
114 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
115 static int pool_count[] = { 256, 512, 256, 256, 256 };
116 static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
117 static int pool_active[] = { 1, 1, 0, 0, 1};
119 #define IBM_VETH_INVALID_MAP ((u16)0xffff)
121 struct ibmveth_buff_pool {
122 u32 size;
123 u32 index;
124 u32 buff_size;
125 u32 threshold;
126 atomic_t available;
127 u32 consumer_index;
128 u32 producer_index;
129 u16 *free_map;
130 dma_addr_t *dma_addr;
131 struct sk_buff **skbuff;
132 int active;
133 struct kobject kobj;
136 struct ibmveth_rx_q {
137 u64 index;
138 u64 num_slots;
139 u64 toggle;
140 dma_addr_t queue_dma;
141 u32 queue_len;
142 struct ibmveth_rx_q_entry *queue_addr;
145 struct ibmveth_adapter {
146 struct vio_dev *vdev;
147 struct net_device *netdev;
148 struct napi_struct napi;
149 unsigned int mcastFilterSize;
150 void * buffer_list_addr;
151 void * filter_list_addr;
152 dma_addr_t buffer_list_dma;
153 dma_addr_t filter_list_dma;
154 struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
155 struct ibmveth_rx_q rx_queue;
156 int pool_config;
157 int rx_csum;
158 int large_send;
159 bool is_active_trunk;
160 void *bounce_buffer;
161 dma_addr_t bounce_buffer_dma;
163 u64 fw_ipv6_csum_support;
164 u64 fw_ipv4_csum_support;
165 u64 fw_large_send_support;
166 /* adapter specific stats */
167 u64 replenish_task_cycles;
168 u64 replenish_no_mem;
169 u64 replenish_add_buff_failure;
170 u64 replenish_add_buff_success;
171 u64 rx_invalid_buffer;
172 u64 rx_no_buffer;
173 u64 tx_map_failed;
174 u64 tx_send_failed;
175 u64 tx_large_packets;
176 u64 rx_large_packets;
180 * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
181 * so we don't need to byteswap the two elements. However since we use
182 * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
183 * do end up with endian specific ordering of the elements and that
184 * needs correcting.
186 struct ibmveth_buf_desc_fields {
187 #ifdef __BIG_ENDIAN
188 u32 flags_len;
189 u32 address;
190 #else
191 u32 address;
192 u32 flags_len;
193 #endif
194 #define IBMVETH_BUF_VALID 0x80000000
195 #define IBMVETH_BUF_TOGGLE 0x40000000
196 #define IBMVETH_BUF_LRG_SND 0x04000000
197 #define IBMVETH_BUF_NO_CSUM 0x02000000
198 #define IBMVETH_BUF_CSUM_GOOD 0x01000000
199 #define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
202 union ibmveth_buf_desc {
203 u64 desc;
204 struct ibmveth_buf_desc_fields fields;
207 struct ibmveth_rx_q_entry {
208 __be32 flags_off;
209 #define IBMVETH_RXQ_TOGGLE 0x80000000
210 #define IBMVETH_RXQ_TOGGLE_SHIFT 31
211 #define IBMVETH_RXQ_VALID 0x40000000
212 #define IBMVETH_RXQ_LRG_PKT 0x04000000
213 #define IBMVETH_RXQ_NO_CSUM 0x02000000
214 #define IBMVETH_RXQ_CSUM_GOOD 0x01000000
215 #define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
217 __be32 length;
218 /* correlator is only used by the OS, no need to byte swap */
219 u64 correlator;
222 #endif /* _IBMVETH_H */