1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2002 Intersil Americas Inc.
4 * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
7 #include <linux/netdevice.h>
8 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
14 #include <linux/if_arp.h>
16 #include "prismcompat.h"
18 #include "islpci_mgt.h"
19 #include "isl_oid.h" /* additional types and defs for isl38xx fw */
20 #include "isl_ioctl.h"
22 #include <net/iw_handler.h>
24 /******************************************************************************
25 Global variable definition section
26 ******************************************************************************/
27 int pc_debug
= VERBOSE
;
28 module_param(pc_debug
, int, 0);
30 /******************************************************************************
31 Driver general functions
32 ******************************************************************************/
33 #if VERBOSE > SHOW_ERROR_MESSAGES
35 display_buffer(char *buffer
, int length
)
37 if ((pc_debug
& SHOW_BUFFER_CONTENTS
) == 0)
41 printk("[%02x]", *buffer
& 255);
50 /*****************************************************************************
51 Queue handling for management frames
52 ******************************************************************************/
55 * Helper function to create a PIMFOR management frame header.
58 pimfor_encode_header(int operation
, u32 oid
, u32 length
, pimfor_header_t
*h
)
60 h
->version
= PIMFOR_VERSION
;
61 h
->operation
= operation
;
62 h
->device_id
= PIMFOR_DEV_ID_MHLI_MIB
;
64 h
->oid
= cpu_to_be32(oid
);
65 h
->length
= cpu_to_be32(length
);
69 * Helper function to analyze a PIMFOR management frame header.
71 static pimfor_header_t
*
72 pimfor_decode_header(void *data
, int len
)
74 pimfor_header_t
*h
= data
;
76 while ((void *) h
< data
+ len
) {
77 if (h
->flags
& PIMFOR_FLAG_LITTLE_ENDIAN
) {
78 le32_to_cpus(&h
->oid
);
79 le32_to_cpus(&h
->length
);
81 be32_to_cpus(&h
->oid
);
82 be32_to_cpus(&h
->length
);
84 if (h
->oid
!= OID_INL_TUNNEL
)
92 * Fill the receive queue for management frames with fresh buffers.
95 islpci_mgmt_rx_fill(struct net_device
*ndev
)
97 islpci_private
*priv
= netdev_priv(ndev
);
98 isl38xx_control_block
*cb
= /* volatile not needed */
99 (isl38xx_control_block
*) priv
->control_block
;
100 u32 curr
= le32_to_cpu(cb
->driver_curr_frag
[ISL38XX_CB_RX_MGMTQ
]);
102 #if VERBOSE > SHOW_ERROR_MESSAGES
103 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgmt_rx_fill\n");
106 while (curr
- priv
->index_mgmt_rx
< ISL38XX_CB_MGMT_QSIZE
) {
107 u32 index
= curr
% ISL38XX_CB_MGMT_QSIZE
;
108 struct islpci_membuf
*buf
= &priv
->mgmt_rx
[index
];
109 isl38xx_fragment
*frag
= &cb
->rx_data_mgmt
[index
];
111 if (buf
->mem
== NULL
) {
112 buf
->mem
= kmalloc(MGMT_FRAME_SIZE
, GFP_ATOMIC
);
115 buf
->size
= MGMT_FRAME_SIZE
;
117 if (buf
->pci_addr
== 0) {
118 buf
->pci_addr
= dma_map_single(&priv
->pdev
->dev
,
122 if (dma_mapping_error(&priv
->pdev
->dev
, buf
->pci_addr
)) {
124 "Failed to make memory DMA'able.\n");
129 /* be safe: always reset control block information */
130 frag
->size
= cpu_to_le16(MGMT_FRAME_SIZE
);
132 frag
->address
= cpu_to_le32(buf
->pci_addr
);
135 /* The fragment address in the control block must have
136 * been written before announcing the frame buffer to
139 cb
->driver_curr_frag
[ISL38XX_CB_RX_MGMTQ
] = cpu_to_le32(curr
);
145 * Create and transmit a management frame using "operation" and "oid",
146 * with arguments data/length.
147 * We either return an error and free the frame, or we return 0 and
148 * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
152 islpci_mgt_transmit(struct net_device
*ndev
, int operation
, unsigned long oid
,
153 void *data
, int length
)
155 islpci_private
*priv
= netdev_priv(ndev
);
156 isl38xx_control_block
*cb
=
157 (isl38xx_control_block
*) priv
->control_block
;
161 isl38xx_fragment
*frag
;
162 struct islpci_membuf buf
;
165 int frag_len
= length
+ PIMFOR_HEADER_SIZE
;
167 #if VERBOSE > SHOW_ERROR_MESSAGES
168 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_transmit\n");
171 if (frag_len
> MGMT_FRAME_SIZE
) {
172 printk(KERN_DEBUG
"%s: mgmt frame too large %d\n",
173 ndev
->name
, frag_len
);
178 p
= buf
.mem
= kmalloc(frag_len
, GFP_KERNEL
);
184 /* create the header directly in the fragment data area */
185 pimfor_encode_header(operation
, oid
, length
, (pimfor_header_t
*) p
);
186 p
+= PIMFOR_HEADER_SIZE
;
189 memcpy(p
, data
, length
);
191 memset(p
, 0, length
);
193 #if VERBOSE > SHOW_ERROR_MESSAGES
195 pimfor_header_t
*h
= buf
.mem
;
196 DEBUG(SHOW_PIMFOR_FRAMES
,
197 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
198 h
->operation
, oid
, h
->device_id
, h
->flags
, length
);
200 /* display the buffer contents for debugging */
201 display_buffer((char *) h
, sizeof (pimfor_header_t
));
202 display_buffer(p
, length
);
207 buf
.pci_addr
= dma_map_single(&priv
->pdev
->dev
, buf
.mem
, frag_len
,
209 if (dma_mapping_error(&priv
->pdev
->dev
, buf
.pci_addr
)) {
210 printk(KERN_WARNING
"%s: cannot map PCI memory for mgmt\n",
215 /* Protect the control block modifications against interrupts. */
216 spin_lock_irqsave(&priv
->slock
, flags
);
217 curr_frag
= le32_to_cpu(cb
->driver_curr_frag
[ISL38XX_CB_TX_MGMTQ
]);
218 if (curr_frag
- priv
->index_mgmt_tx
>= ISL38XX_CB_MGMT_QSIZE
) {
219 printk(KERN_WARNING
"%s: mgmt tx queue is still full\n",
224 /* commit the frame to the tx device queue */
225 index
= curr_frag
% ISL38XX_CB_MGMT_QSIZE
;
226 priv
->mgmt_tx
[index
] = buf
;
227 frag
= &cb
->tx_data_mgmt
[index
];
228 frag
->size
= cpu_to_le16(frag_len
);
229 frag
->flags
= 0; /* for any other than the last fragment, set to 1 */
230 frag
->address
= cpu_to_le32(buf
.pci_addr
);
232 /* The fragment address in the control block must have
233 * been written before announcing the frame buffer to
236 cb
->driver_curr_frag
[ISL38XX_CB_TX_MGMTQ
] = cpu_to_le32(curr_frag
+ 1);
237 spin_unlock_irqrestore(&priv
->slock
, flags
);
239 /* trigger the device */
240 islpci_trigger(priv
);
244 spin_unlock_irqrestore(&priv
->slock
, flags
);
252 * Receive a management frame from the device.
253 * This can be an arbitrary number of traps, and at most one response
254 * frame for a previous request sent via islpci_mgt_transmit().
257 islpci_mgt_receive(struct net_device
*ndev
)
259 islpci_private
*priv
= netdev_priv(ndev
);
260 isl38xx_control_block
*cb
=
261 (isl38xx_control_block
*) priv
->control_block
;
264 #if VERBOSE > SHOW_ERROR_MESSAGES
265 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_receive\n");
268 /* Only once per interrupt, determine fragment range to
269 * process. This avoids an endless loop (i.e. lockup) if
270 * frames come in faster than we can process them. */
271 curr_frag
= le32_to_cpu(cb
->device_curr_frag
[ISL38XX_CB_RX_MGMTQ
]);
274 for (; priv
->index_mgmt_rx
< curr_frag
; priv
->index_mgmt_rx
++) {
275 pimfor_header_t
*header
;
276 u32 index
= priv
->index_mgmt_rx
% ISL38XX_CB_MGMT_QSIZE
;
277 struct islpci_membuf
*buf
= &priv
->mgmt_rx
[index
];
280 struct islpci_mgmtframe
*frame
;
282 /* I have no idea (and no documentation) if flags != 0
283 * is possible. Drop the frame, reuse the buffer. */
284 if (le16_to_cpu(cb
->rx_data_mgmt
[index
].flags
) != 0) {
285 printk(KERN_WARNING
"%s: unknown flags 0x%04x\n",
287 le16_to_cpu(cb
->rx_data_mgmt
[index
].flags
));
291 /* The device only returns the size of the header(s) here. */
292 frag_len
= le16_to_cpu(cb
->rx_data_mgmt
[index
].size
);
295 * We appear to have no way to tell the device the
296 * size of a receive buffer. Thus, if this check
297 * triggers, we likely have kernel heap corruption. */
298 if (frag_len
> MGMT_FRAME_SIZE
) {
300 "%s: Bogus packet size of %d (%#x).\n",
301 ndev
->name
, frag_len
, frag_len
);
302 frag_len
= MGMT_FRAME_SIZE
;
305 /* Ensure the results of device DMA are visible to the CPU. */
306 dma_sync_single_for_cpu(&priv
->pdev
->dev
, buf
->pci_addr
,
307 buf
->size
, DMA_FROM_DEVICE
);
309 /* Perform endianess conversion for PIMFOR header in-place. */
310 header
= pimfor_decode_header(buf
->mem
, frag_len
);
312 printk(KERN_WARNING
"%s: no PIMFOR header found\n",
317 /* The device ID from the PIMFOR packet received from
318 * the MVC is always 0. We forward a sensible device_id.
319 * Not that anyone upstream would care... */
320 header
->device_id
= priv
->ndev
->ifindex
;
322 #if VERBOSE > SHOW_ERROR_MESSAGES
323 DEBUG(SHOW_PIMFOR_FRAMES
,
324 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
325 header
->operation
, header
->oid
, header
->device_id
,
326 header
->flags
, header
->length
);
328 /* display the buffer contents for debugging */
329 display_buffer((char *) header
, PIMFOR_HEADER_SIZE
);
330 display_buffer((char *) header
+ PIMFOR_HEADER_SIZE
,
334 /* nobody sends these */
335 if (header
->flags
& PIMFOR_FLAG_APPLIC_ORIGIN
) {
337 "%s: errant PIMFOR application frame\n",
342 /* Determine frame size, skipping OID_INL_TUNNEL headers. */
343 size
= PIMFOR_HEADER_SIZE
+ header
->length
;
344 frame
= kmalloc(sizeof(struct islpci_mgmtframe
) + size
,
350 memcpy(&frame
->buf
, header
, size
);
351 frame
->header
= (pimfor_header_t
*) frame
->buf
;
352 frame
->data
= frame
->buf
+ PIMFOR_HEADER_SIZE
;
354 #if VERBOSE > SHOW_ERROR_MESSAGES
355 DEBUG(SHOW_PIMFOR_FRAMES
,
356 "frame: header: %p, data: %p, size: %d\n",
357 frame
->header
, frame
->data
, size
);
360 if (header
->operation
== PIMFOR_OP_TRAP
) {
361 #if VERBOSE > SHOW_ERROR_MESSAGES
363 "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
364 header
->oid
, header
->device_id
, header
->flags
,
368 /* Create work to handle trap out of interrupt
370 INIT_WORK(&frame
->ws
, prism54_process_trap
);
371 schedule_work(&frame
->ws
);
374 /* Signal the one waiting process that a response
375 * has been received. */
376 if ((frame
= xchg(&priv
->mgmt_received
, frame
)) != NULL
) {
378 "%s: mgmt response not collected\n",
382 #if VERBOSE > SHOW_ERROR_MESSAGES
383 DEBUG(SHOW_TRACING
, "Wake up Mgmt Queue\n");
385 wake_up(&priv
->mgmt_wqueue
);
394 * Cleanup the transmit queue by freeing all frames handled by the device.
397 islpci_mgt_cleanup_transmit(struct net_device
*ndev
)
399 islpci_private
*priv
= netdev_priv(ndev
);
400 isl38xx_control_block
*cb
= /* volatile not needed */
401 (isl38xx_control_block
*) priv
->control_block
;
404 #if VERBOSE > SHOW_ERROR_MESSAGES
405 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_cleanup_transmit\n");
408 /* Only once per cleanup, determine fragment range to
409 * process. This avoids an endless loop (i.e. lockup) if
410 * the device became confused, incrementing device_curr_frag
412 curr_frag
= le32_to_cpu(cb
->device_curr_frag
[ISL38XX_CB_TX_MGMTQ
]);
415 for (; priv
->index_mgmt_tx
< curr_frag
; priv
->index_mgmt_tx
++) {
416 int index
= priv
->index_mgmt_tx
% ISL38XX_CB_MGMT_QSIZE
;
417 struct islpci_membuf
*buf
= &priv
->mgmt_tx
[index
];
418 dma_unmap_single(&priv
->pdev
->dev
, buf
->pci_addr
, buf
->size
,
428 * Perform one request-response transaction to the device.
431 islpci_mgt_transaction(struct net_device
*ndev
,
432 int operation
, unsigned long oid
,
433 void *senddata
, int sendlen
,
434 struct islpci_mgmtframe
**recvframe
)
436 islpci_private
*priv
= netdev_priv(ndev
);
437 const long wait_cycle_jiffies
= msecs_to_jiffies(ISL38XX_WAIT_CYCLE
* 10);
438 long timeout_left
= ISL38XX_MAX_WAIT_CYCLES
* wait_cycle_jiffies
;
444 if (mutex_lock_interruptible(&priv
->mgmt_lock
))
447 prepare_to_wait(&priv
->mgmt_wqueue
, &wait
, TASK_UNINTERRUPTIBLE
);
448 err
= islpci_mgt_transmit(ndev
, operation
, oid
, senddata
, sendlen
);
453 while (timeout_left
> 0) {
455 struct islpci_mgmtframe
*frame
;
457 timeleft
= schedule_timeout_uninterruptible(wait_cycle_jiffies
);
458 frame
= xchg(&priv
->mgmt_received
, NULL
);
460 if (frame
->header
->oid
== oid
) {
466 "%s: expecting oid 0x%x, received 0x%x.\n",
467 ndev
->name
, (unsigned int) oid
,
475 "%s: timeout waiting for mgmt response %lu, "
476 "triggering device\n",
477 ndev
->name
, timeout_left
);
478 islpci_trigger(priv
);
480 timeout_left
+= timeleft
- wait_cycle_jiffies
;
482 printk(KERN_WARNING
"%s: timeout waiting for mgmt response\n",
485 /* TODO: we should reset the device here */
487 finish_wait(&priv
->mgmt_wqueue
, &wait
);
488 mutex_unlock(&priv
->mgmt_lock
);