2 * Copyright (C) 2002 Intersil Americas Inc.
3 * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/netdevice.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
25 #include <asm/system.h>
26 #include <linux/if_arp.h>
28 #include "prismcompat.h"
30 #include "islpci_mgt.h"
31 #include "isl_oid.h" /* additional types and defs for isl38xx fw */
32 #include "isl_ioctl.h"
34 #include <net/iw_handler.h>
36 /******************************************************************************
37 Global variable definition section
38 ******************************************************************************/
39 int pc_debug
= VERBOSE
;
40 module_param(pc_debug
, int, 0);
42 /******************************************************************************
43 Driver general functions
44 ******************************************************************************/
45 #if VERBOSE > SHOW_ERROR_MESSAGES
47 display_buffer(char *buffer
, int length
)
49 if ((pc_debug
& SHOW_BUFFER_CONTENTS
) == 0)
53 printk("[%02x]", *buffer
& 255);
62 /*****************************************************************************
63 Queue handling for management frames
64 ******************************************************************************/
67 * Helper function to create a PIMFOR management frame header.
70 pimfor_encode_header(int operation
, u32 oid
, u32 length
, pimfor_header_t
*h
)
72 h
->version
= PIMFOR_VERSION
;
73 h
->operation
= operation
;
74 h
->device_id
= PIMFOR_DEV_ID_MHLI_MIB
;
76 h
->oid
= cpu_to_be32(oid
);
77 h
->length
= cpu_to_be32(length
);
81 * Helper function to analyze a PIMFOR management frame header.
83 static pimfor_header_t
*
84 pimfor_decode_header(void *data
, int len
)
86 pimfor_header_t
*h
= data
;
88 while ((void *) h
< data
+ len
) {
89 if (h
->flags
& PIMFOR_FLAG_LITTLE_ENDIAN
) {
90 le32_to_cpus(&h
->oid
);
91 le32_to_cpus(&h
->length
);
93 be32_to_cpus(&h
->oid
);
94 be32_to_cpus(&h
->length
);
96 if (h
->oid
!= OID_INL_TUNNEL
)
104 * Fill the receive queue for management frames with fresh buffers.
107 islpci_mgmt_rx_fill(struct net_device
*ndev
)
109 islpci_private
*priv
= netdev_priv(ndev
);
110 isl38xx_control_block
*cb
= /* volatile not needed */
111 (isl38xx_control_block
*) priv
->control_block
;
112 u32 curr
= le32_to_cpu(cb
->driver_curr_frag
[ISL38XX_CB_RX_MGMTQ
]);
114 #if VERBOSE > SHOW_ERROR_MESSAGES
115 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgmt_rx_fill \n");
118 while (curr
- priv
->index_mgmt_rx
< ISL38XX_CB_MGMT_QSIZE
) {
119 u32 index
= curr
% ISL38XX_CB_MGMT_QSIZE
;
120 struct islpci_membuf
*buf
= &priv
->mgmt_rx
[index
];
121 isl38xx_fragment
*frag
= &cb
->rx_data_mgmt
[index
];
123 if (buf
->mem
== NULL
) {
124 buf
->mem
= kmalloc(MGMT_FRAME_SIZE
, GFP_ATOMIC
);
127 "Error allocating management frame.\n");
130 buf
->size
= MGMT_FRAME_SIZE
;
132 if (buf
->pci_addr
== 0) {
133 buf
->pci_addr
= pci_map_single(priv
->pdev
, buf
->mem
,
136 if (!buf
->pci_addr
) {
138 "Failed to make memory DMA'able.\n");
143 /* be safe: always reset control block information */
144 frag
->size
= cpu_to_le16(MGMT_FRAME_SIZE
);
146 frag
->address
= cpu_to_le32(buf
->pci_addr
);
149 /* The fragment address in the control block must have
150 * been written before announcing the frame buffer to
153 cb
->driver_curr_frag
[ISL38XX_CB_RX_MGMTQ
] = cpu_to_le32(curr
);
159 * Create and transmit a management frame using "operation" and "oid",
160 * with arguments data/length.
161 * We either return an error and free the frame, or we return 0 and
162 * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
166 islpci_mgt_transmit(struct net_device
*ndev
, int operation
, unsigned long oid
,
167 void *data
, int length
)
169 islpci_private
*priv
= netdev_priv(ndev
);
170 isl38xx_control_block
*cb
=
171 (isl38xx_control_block
*) priv
->control_block
;
175 isl38xx_fragment
*frag
;
176 struct islpci_membuf buf
;
179 int frag_len
= length
+ PIMFOR_HEADER_SIZE
;
181 #if VERBOSE > SHOW_ERROR_MESSAGES
182 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_transmit\n");
185 if (frag_len
> MGMT_FRAME_SIZE
) {
186 printk(KERN_DEBUG
"%s: mgmt frame too large %d\n",
187 ndev
->name
, frag_len
);
192 p
= buf
.mem
= kmalloc(frag_len
, GFP_KERNEL
);
194 printk(KERN_DEBUG
"%s: cannot allocate mgmt frame\n",
200 /* create the header directly in the fragment data area */
201 pimfor_encode_header(operation
, oid
, length
, (pimfor_header_t
*) p
);
202 p
+= PIMFOR_HEADER_SIZE
;
205 memcpy(p
, data
, length
);
207 memset(p
, 0, length
);
209 #if VERBOSE > SHOW_ERROR_MESSAGES
211 pimfor_header_t
*h
= buf
.mem
;
212 DEBUG(SHOW_PIMFOR_FRAMES
,
213 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n",
214 h
->operation
, oid
, h
->device_id
, h
->flags
, length
);
216 /* display the buffer contents for debugging */
217 display_buffer((char *) h
, sizeof (pimfor_header_t
));
218 display_buffer(p
, length
);
223 buf
.pci_addr
= pci_map_single(priv
->pdev
, buf
.mem
, frag_len
,
226 printk(KERN_WARNING
"%s: cannot map PCI memory for mgmt\n",
231 /* Protect the control block modifications against interrupts. */
232 spin_lock_irqsave(&priv
->slock
, flags
);
233 curr_frag
= le32_to_cpu(cb
->driver_curr_frag
[ISL38XX_CB_TX_MGMTQ
]);
234 if (curr_frag
- priv
->index_mgmt_tx
>= ISL38XX_CB_MGMT_QSIZE
) {
235 printk(KERN_WARNING
"%s: mgmt tx queue is still full\n",
240 /* commit the frame to the tx device queue */
241 index
= curr_frag
% ISL38XX_CB_MGMT_QSIZE
;
242 priv
->mgmt_tx
[index
] = buf
;
243 frag
= &cb
->tx_data_mgmt
[index
];
244 frag
->size
= cpu_to_le16(frag_len
);
245 frag
->flags
= 0; /* for any other than the last fragment, set to 1 */
246 frag
->address
= cpu_to_le32(buf
.pci_addr
);
248 /* The fragment address in the control block must have
249 * been written before announcing the frame buffer to
252 cb
->driver_curr_frag
[ISL38XX_CB_TX_MGMTQ
] = cpu_to_le32(curr_frag
+ 1);
253 spin_unlock_irqrestore(&priv
->slock
, flags
);
255 /* trigger the device */
256 islpci_trigger(priv
);
260 spin_unlock_irqrestore(&priv
->slock
, flags
);
268 * Receive a management frame from the device.
269 * This can be an arbitrary number of traps, and at most one response
270 * frame for a previous request sent via islpci_mgt_transmit().
273 islpci_mgt_receive(struct net_device
*ndev
)
275 islpci_private
*priv
= netdev_priv(ndev
);
276 isl38xx_control_block
*cb
=
277 (isl38xx_control_block
*) priv
->control_block
;
280 #if VERBOSE > SHOW_ERROR_MESSAGES
281 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_receive \n");
284 /* Only once per interrupt, determine fragment range to
285 * process. This avoids an endless loop (i.e. lockup) if
286 * frames come in faster than we can process them. */
287 curr_frag
= le32_to_cpu(cb
->device_curr_frag
[ISL38XX_CB_RX_MGMTQ
]);
290 for (; priv
->index_mgmt_rx
< curr_frag
; priv
->index_mgmt_rx
++) {
291 pimfor_header_t
*header
;
292 u32 index
= priv
->index_mgmt_rx
% ISL38XX_CB_MGMT_QSIZE
;
293 struct islpci_membuf
*buf
= &priv
->mgmt_rx
[index
];
296 struct islpci_mgmtframe
*frame
;
298 /* I have no idea (and no documentation) if flags != 0
299 * is possible. Drop the frame, reuse the buffer. */
300 if (le16_to_cpu(cb
->rx_data_mgmt
[index
].flags
) != 0) {
301 printk(KERN_WARNING
"%s: unknown flags 0x%04x\n",
303 le16_to_cpu(cb
->rx_data_mgmt
[index
].flags
));
307 /* The device only returns the size of the header(s) here. */
308 frag_len
= le16_to_cpu(cb
->rx_data_mgmt
[index
].size
);
311 * We appear to have no way to tell the device the
312 * size of a receive buffer. Thus, if this check
313 * triggers, we likely have kernel heap corruption. */
314 if (frag_len
> MGMT_FRAME_SIZE
) {
316 "%s: Bogus packet size of %d (%#x).\n",
317 ndev
->name
, frag_len
, frag_len
);
318 frag_len
= MGMT_FRAME_SIZE
;
321 /* Ensure the results of device DMA are visible to the CPU. */
322 pci_dma_sync_single_for_cpu(priv
->pdev
, buf
->pci_addr
,
323 buf
->size
, PCI_DMA_FROMDEVICE
);
325 /* Perform endianess conversion for PIMFOR header in-place. */
326 header
= pimfor_decode_header(buf
->mem
, frag_len
);
328 printk(KERN_WARNING
"%s: no PIMFOR header found\n",
333 /* The device ID from the PIMFOR packet received from
334 * the MVC is always 0. We forward a sensible device_id.
335 * Not that anyone upstream would care... */
336 header
->device_id
= priv
->ndev
->ifindex
;
338 #if VERBOSE > SHOW_ERROR_MESSAGES
339 DEBUG(SHOW_PIMFOR_FRAMES
,
340 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
341 header
->operation
, header
->oid
, header
->device_id
,
342 header
->flags
, header
->length
);
344 /* display the buffer contents for debugging */
345 display_buffer((char *) header
, PIMFOR_HEADER_SIZE
);
346 display_buffer((char *) header
+ PIMFOR_HEADER_SIZE
,
350 /* nobody sends these */
351 if (header
->flags
& PIMFOR_FLAG_APPLIC_ORIGIN
) {
353 "%s: errant PIMFOR application frame\n",
358 /* Determine frame size, skipping OID_INL_TUNNEL headers. */
359 size
= PIMFOR_HEADER_SIZE
+ header
->length
;
360 frame
= kmalloc(sizeof (struct islpci_mgmtframe
) + size
,
364 "%s: Out of memory, cannot handle oid 0x%08x\n",
365 ndev
->name
, header
->oid
);
369 memcpy(&frame
->buf
, header
, size
);
370 frame
->header
= (pimfor_header_t
*) frame
->buf
;
371 frame
->data
= frame
->buf
+ PIMFOR_HEADER_SIZE
;
373 #if VERBOSE > SHOW_ERROR_MESSAGES
374 DEBUG(SHOW_PIMFOR_FRAMES
,
375 "frame: header: %p, data: %p, size: %d\n",
376 frame
->header
, frame
->data
, size
);
379 if (header
->operation
== PIMFOR_OP_TRAP
) {
380 #if VERBOSE > SHOW_ERROR_MESSAGES
382 "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
383 header
->oid
, header
->device_id
, header
->flags
,
387 /* Create work to handle trap out of interrupt
389 INIT_WORK(&frame
->ws
, prism54_process_trap
);
390 schedule_work(&frame
->ws
);
393 /* Signal the one waiting process that a response
394 * has been received. */
395 if ((frame
= xchg(&priv
->mgmt_received
, frame
)) != NULL
) {
397 "%s: mgmt response not collected\n",
401 #if VERBOSE > SHOW_ERROR_MESSAGES
402 DEBUG(SHOW_TRACING
, "Wake up Mgmt Queue\n");
404 wake_up(&priv
->mgmt_wqueue
);
413 * Cleanup the transmit queue by freeing all frames handled by the device.
416 islpci_mgt_cleanup_transmit(struct net_device
*ndev
)
418 islpci_private
*priv
= netdev_priv(ndev
);
419 isl38xx_control_block
*cb
= /* volatile not needed */
420 (isl38xx_control_block
*) priv
->control_block
;
423 #if VERBOSE > SHOW_ERROR_MESSAGES
424 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_cleanup_transmit\n");
427 /* Only once per cleanup, determine fragment range to
428 * process. This avoids an endless loop (i.e. lockup) if
429 * the device became confused, incrementing device_curr_frag
431 curr_frag
= le32_to_cpu(cb
->device_curr_frag
[ISL38XX_CB_TX_MGMTQ
]);
434 for (; priv
->index_mgmt_tx
< curr_frag
; priv
->index_mgmt_tx
++) {
435 int index
= priv
->index_mgmt_tx
% ISL38XX_CB_MGMT_QSIZE
;
436 struct islpci_membuf
*buf
= &priv
->mgmt_tx
[index
];
437 pci_unmap_single(priv
->pdev
, buf
->pci_addr
, buf
->size
,
447 * Perform one request-response transaction to the device.
450 islpci_mgt_transaction(struct net_device
*ndev
,
451 int operation
, unsigned long oid
,
452 void *senddata
, int sendlen
,
453 struct islpci_mgmtframe
**recvframe
)
455 islpci_private
*priv
= netdev_priv(ndev
);
456 const long wait_cycle_jiffies
= msecs_to_jiffies(ISL38XX_WAIT_CYCLE
* 10);
457 long timeout_left
= ISL38XX_MAX_WAIT_CYCLES
* wait_cycle_jiffies
;
463 if (down_interruptible(&priv
->mgmt_sem
))
466 prepare_to_wait(&priv
->mgmt_wqueue
, &wait
, TASK_UNINTERRUPTIBLE
);
467 err
= islpci_mgt_transmit(ndev
, operation
, oid
, senddata
, sendlen
);
472 while (timeout_left
> 0) {
474 struct islpci_mgmtframe
*frame
;
476 timeleft
= schedule_timeout_uninterruptible(wait_cycle_jiffies
);
477 frame
= xchg(&priv
->mgmt_received
, NULL
);
479 if (frame
->header
->oid
== oid
) {
485 "%s: expecting oid 0x%x, received 0x%x.\n",
486 ndev
->name
, (unsigned int) oid
,
494 "%s: timeout waiting for mgmt response %lu, "
495 "triggering device\n",
496 ndev
->name
, timeout_left
);
497 islpci_trigger(priv
);
499 timeout_left
+= timeleft
- wait_cycle_jiffies
;
501 printk(KERN_WARNING
"%s: timeout waiting for mgmt response\n",
504 /* TODO: we should reset the device here */
506 finish_wait(&priv
->mgmt_wqueue
, &wait
);