net: add skb_dst_force() in sock_queue_err_skb()
[linux/fpc-iii.git] / drivers / net / wan / ixp4xx_hss.c
blobf1e1643dc3eb87c25d35c84a8aa1605718cffb8b
1 /*
2 * Intel IXP4xx HSS (synchronous serial port) driver for Linux
4 * Copyright (C) 2007-2008 Krzysztof HaƂasa <khc@pm.waw.pl>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
11 #include <linux/bitops.h>
12 #include <linux/cdev.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/fs.h>
16 #include <linux/hdlc.h>
17 #include <linux/io.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/poll.h>
21 #include <linux/slab.h>
22 #include <mach/npe.h>
23 #include <mach/qmgr.h>
25 #define DEBUG_DESC 0
26 #define DEBUG_RX 0
27 #define DEBUG_TX 0
28 #define DEBUG_PKT_BYTES 0
29 #define DEBUG_CLOSE 0
31 #define DRV_NAME "ixp4xx_hss"
33 #define PKT_EXTRA_FLAGS 0 /* orig 1 */
34 #define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
35 #define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
37 #define RX_DESCS 16 /* also length of all RX queues */
38 #define TX_DESCS 16 /* also length of all TX queues */
40 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
41 #define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
42 #define MAX_CLOSE_WAIT 1000 /* microseconds */
43 #define HSS_COUNT 2
44 #define FRAME_SIZE 256 /* doesn't matter at this point */
45 #define FRAME_OFFSET 0
46 #define MAX_CHANNELS (FRAME_SIZE / 8)
48 #define NAPI_WEIGHT 16
50 /* Queue IDs */
51 #define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
52 #define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
53 #define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
54 #define HSS0_PKT_TX1_QUEUE 15
55 #define HSS0_PKT_TX2_QUEUE 16
56 #define HSS0_PKT_TX3_QUEUE 17
57 #define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
58 #define HSS0_PKT_RXFREE1_QUEUE 19
59 #define HSS0_PKT_RXFREE2_QUEUE 20
60 #define HSS0_PKT_RXFREE3_QUEUE 21
61 #define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
63 #define HSS1_CHL_RXTRIG_QUEUE 10
64 #define HSS1_PKT_RX_QUEUE 0
65 #define HSS1_PKT_TX0_QUEUE 5
66 #define HSS1_PKT_TX1_QUEUE 6
67 #define HSS1_PKT_TX2_QUEUE 7
68 #define HSS1_PKT_TX3_QUEUE 8
69 #define HSS1_PKT_RXFREE0_QUEUE 1
70 #define HSS1_PKT_RXFREE1_QUEUE 2
71 #define HSS1_PKT_RXFREE2_QUEUE 3
72 #define HSS1_PKT_RXFREE3_QUEUE 4
73 #define HSS1_PKT_TXDONE_QUEUE 9
75 #define NPE_PKT_MODE_HDLC 0
76 #define NPE_PKT_MODE_RAW 1
77 #define NPE_PKT_MODE_56KMODE 2
78 #define NPE_PKT_MODE_56KENDIAN_MSB 4
80 /* PKT_PIPE_HDLC_CFG_WRITE flags */
81 #define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
82 #define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
83 #define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
86 /* hss_config, PCRs */
87 /* Frame sync sampling, default = active low */
88 #define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
89 #define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
90 #define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
92 /* Frame sync pin: input (default) or output generated off a given clk edge */
93 #define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
94 #define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
96 /* Frame and data clock sampling on edge, default = falling */
97 #define PCR_FCLK_EDGE_RISING 0x08000000
98 #define PCR_DCLK_EDGE_RISING 0x04000000
100 /* Clock direction, default = input */
101 #define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
103 /* Generate/Receive frame pulses, default = enabled */
104 #define PCR_FRM_PULSE_DISABLED 0x01000000
106 /* Data rate is full (default) or half the configured clk speed */
107 #define PCR_HALF_CLK_RATE 0x00200000
109 /* Invert data between NPE and HSS FIFOs? (default = no) */
110 #define PCR_DATA_POLARITY_INVERT 0x00100000
112 /* TX/RX endianness, default = LSB */
113 #define PCR_MSB_ENDIAN 0x00080000
115 /* Normal (default) / open drain mode (TX only) */
116 #define PCR_TX_PINS_OPEN_DRAIN 0x00040000
118 /* No framing bit transmitted and expected on RX? (default = framing bit) */
119 #define PCR_SOF_NO_FBIT 0x00020000
121 /* Drive data pins? */
122 #define PCR_TX_DATA_ENABLE 0x00010000
124 /* Voice 56k type: drive the data pins low (default), high, high Z */
125 #define PCR_TX_V56K_HIGH 0x00002000
126 #define PCR_TX_V56K_HIGH_IMP 0x00004000
128 /* Unassigned type: drive the data pins low (default), high, high Z */
129 #define PCR_TX_UNASS_HIGH 0x00000800
130 #define PCR_TX_UNASS_HIGH_IMP 0x00001000
132 /* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
133 #define PCR_TX_FB_HIGH_IMP 0x00000400
135 /* 56k data endiannes - which bit unused: high (default) or low */
136 #define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
138 /* 56k data transmission type: 32/8 bit data (default) or 56K data */
139 #define PCR_TX_56KS_56K_DATA 0x00000100
141 /* hss_config, cCR */
142 /* Number of packetized clients, default = 1 */
143 #define CCR_NPE_HFIFO_2_HDLC 0x04000000
144 #define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
146 /* default = no loopback */
147 #define CCR_LOOPBACK 0x02000000
149 /* HSS number, default = 0 (first) */
150 #define CCR_SECOND_HSS 0x01000000
153 /* hss_config, clkCR: main:10, num:10, denom:12 */
154 #define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
156 #define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
157 #define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
158 #define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
159 #define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
160 #define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
161 #define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
163 #define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
164 #define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
165 #define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
166 #define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
167 #define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
168 #define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
171 * HSS_CONFIG_CLOCK_CR register consists of 3 parts:
172 * A (10 bits), B (10 bits) and C (12 bits).
173 * IXP42x HSS clock generator operation (verified with an oscilloscope):
174 * Each clock bit takes 7.5 ns (1 / 133.xx MHz).
175 * The clock sequence consists of (C - B) states of 0s and 1s, each state is
176 * A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is
177 * (A + 1) bits wide.
179 * The resulting average clock frequency (assuming 33.333 MHz oscillator) is:
180 * freq = 66.666 MHz / (A + (B + 1) / (C + 1))
181 * minimum freq = 66.666 MHz / (A + 1)
182 * maximum freq = 66.666 MHz / A
184 * Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7
185 * freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s).
186 * The clock sequence is: 1100110011 (5 doubles) 000111000 (3 triples).
187 * The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits
188 * = 19 bits (each 7.5 ns long) = 142.5 ns (then the sequence repeats).
189 * The sequence consists of 4 complete clock periods, thus the average
190 * frequency (= clock rate) is 4 / 142.5 ns = 28.07 MHz (Mb/s).
191 * (max specified clock rate for IXP42x HSS is 8.192 Mb/s).
194 /* hss_config, LUT entries */
195 #define TDMMAP_UNASSIGNED 0
196 #define TDMMAP_HDLC 1 /* HDLC - packetized */
197 #define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
198 #define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
200 /* offsets into HSS config */
201 #define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
202 #define HSS_CONFIG_RX_PCR 0x04
203 #define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
204 #define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
205 #define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
206 #define HSS_CONFIG_RX_FCR 0x14
207 #define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
208 #define HSS_CONFIG_RX_LUT 0x38
211 /* NPE command codes */
212 /* writes the ConfigWord value to the location specified by offset */
213 #define PORT_CONFIG_WRITE 0x40
215 /* triggers the NPE to load the contents of the configuration table */
216 #define PORT_CONFIG_LOAD 0x41
218 /* triggers the NPE to return an HssErrorReadResponse message */
219 #define PORT_ERROR_READ 0x42
221 /* triggers the NPE to reset internal status and enable the HssPacketized
222 operation for the flow specified by pPipe */
223 #define PKT_PIPE_FLOW_ENABLE 0x50
224 #define PKT_PIPE_FLOW_DISABLE 0x51
225 #define PKT_NUM_PIPES_WRITE 0x52
226 #define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
227 #define PKT_PIPE_HDLC_CFG_WRITE 0x54
228 #define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
229 #define PKT_PIPE_RX_SIZE_WRITE 0x56
230 #define PKT_PIPE_MODE_WRITE 0x57
232 /* HDLC packet status values - desc->status */
233 #define ERR_SHUTDOWN 1 /* stop or shutdown occurrence */
234 #define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
235 #define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
236 #define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
237 this packet (if buf_len < pkt_len) */
238 #define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
239 #define ERR_HDLC_ABORT 6 /* abort sequence received */
240 #define ERR_DISCONNECTING 7 /* disconnect is in progress */
243 #ifdef __ARMEB__
244 typedef struct sk_buff buffer_t;
245 #define free_buffer dev_kfree_skb
246 #define free_buffer_irq dev_kfree_skb_irq
247 #else
248 typedef void buffer_t;
249 #define free_buffer kfree
250 #define free_buffer_irq kfree
251 #endif
253 struct port {
254 struct device *dev;
255 struct npe *npe;
256 struct net_device *netdev;
257 struct napi_struct napi;
258 struct hss_plat_info *plat;
259 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
260 struct desc *desc_tab; /* coherent */
261 u32 desc_tab_phys;
262 unsigned int id;
263 unsigned int clock_type, clock_rate, loopback;
264 unsigned int initialized, carrier;
265 u8 hdlc_cfg;
266 u32 clock_reg;
269 /* NPE message structure */
270 struct msg {
271 #ifdef __ARMEB__
272 u8 cmd, unused, hss_port, index;
273 union {
274 struct { u8 data8a, data8b, data8c, data8d; };
275 struct { u16 data16a, data16b; };
276 struct { u32 data32; };
278 #else
279 u8 index, hss_port, unused, cmd;
280 union {
281 struct { u8 data8d, data8c, data8b, data8a; };
282 struct { u16 data16b, data16a; };
283 struct { u32 data32; };
285 #endif
288 /* HDLC packet descriptor */
289 struct desc {
290 u32 next; /* pointer to next buffer, unused */
292 #ifdef __ARMEB__
293 u16 buf_len; /* buffer length */
294 u16 pkt_len; /* packet length */
295 u32 data; /* pointer to data buffer in RAM */
296 u8 status;
297 u8 error_count;
298 u16 __reserved;
299 #else
300 u16 pkt_len; /* packet length */
301 u16 buf_len; /* buffer length */
302 u32 data; /* pointer to data buffer in RAM */
303 u16 __reserved;
304 u8 error_count;
305 u8 status;
306 #endif
307 u32 __reserved1[4];
311 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
312 (n) * sizeof(struct desc))
313 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
315 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
316 ((n) + RX_DESCS) * sizeof(struct desc))
317 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
319 /*****************************************************************************
320 * global variables
321 ****************************************************************************/
323 static int ports_open;
324 static struct dma_pool *dma_pool;
325 static spinlock_t npe_lock;
327 static const struct {
328 int tx, txdone, rx, rxfree;
329 }queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
330 HSS0_PKT_RXFREE0_QUEUE},
331 {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
332 HSS1_PKT_RXFREE0_QUEUE},
335 /*****************************************************************************
336 * utility functions
337 ****************************************************************************/
339 static inline struct port* dev_to_port(struct net_device *dev)
341 return dev_to_hdlc(dev)->priv;
344 #ifndef __ARMEB__
345 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
347 int i;
348 for (i = 0; i < cnt; i++)
349 dest[i] = swab32(src[i]);
351 #endif
353 /*****************************************************************************
354 * HSS access
355 ****************************************************************************/
357 static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
359 u32 *val = (u32*)msg;
360 if (npe_send_message(port->npe, msg, what)) {
361 printk(KERN_CRIT "HSS-%i: unable to send command [%08X:%08X]"
362 " to %s\n", port->id, val[0], val[1],
363 npe_name(port->npe));
364 BUG();
368 static void hss_config_set_lut(struct port *port)
370 struct msg msg;
371 int ch;
373 memset(&msg, 0, sizeof(msg));
374 msg.cmd = PORT_CONFIG_WRITE;
375 msg.hss_port = port->id;
377 for (ch = 0; ch < MAX_CHANNELS; ch++) {
378 msg.data32 >>= 2;
379 msg.data32 |= TDMMAP_HDLC << 30;
381 if (ch % 16 == 15) {
382 msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
383 hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
385 msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
386 hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
391 static void hss_config(struct port *port)
393 struct msg msg;
395 memset(&msg, 0, sizeof(msg));
396 msg.cmd = PORT_CONFIG_WRITE;
397 msg.hss_port = port->id;
398 msg.index = HSS_CONFIG_TX_PCR;
399 msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN |
400 PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
401 if (port->clock_type == CLOCK_INT)
402 msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
403 hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
405 msg.index = HSS_CONFIG_RX_PCR;
406 msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
407 hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
409 memset(&msg, 0, sizeof(msg));
410 msg.cmd = PORT_CONFIG_WRITE;
411 msg.hss_port = port->id;
412 msg.index = HSS_CONFIG_CORE_CR;
413 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
414 (port->id ? CCR_SECOND_HSS : 0);
415 hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
417 memset(&msg, 0, sizeof(msg));
418 msg.cmd = PORT_CONFIG_WRITE;
419 msg.hss_port = port->id;
420 msg.index = HSS_CONFIG_CLOCK_CR;
421 msg.data32 = port->clock_reg;
422 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
424 memset(&msg, 0, sizeof(msg));
425 msg.cmd = PORT_CONFIG_WRITE;
426 msg.hss_port = port->id;
427 msg.index = HSS_CONFIG_TX_FCR;
428 msg.data16a = FRAME_OFFSET;
429 msg.data16b = FRAME_SIZE - 1;
430 hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
432 memset(&msg, 0, sizeof(msg));
433 msg.cmd = PORT_CONFIG_WRITE;
434 msg.hss_port = port->id;
435 msg.index = HSS_CONFIG_RX_FCR;
436 msg.data16a = FRAME_OFFSET;
437 msg.data16b = FRAME_SIZE - 1;
438 hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
440 hss_config_set_lut(port);
442 memset(&msg, 0, sizeof(msg));
443 msg.cmd = PORT_CONFIG_LOAD;
444 msg.hss_port = port->id;
445 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
447 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
448 /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
449 msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
450 printk(KERN_CRIT "HSS-%i: HSS_LOAD_CONFIG failed\n",
451 port->id);
452 BUG();
455 /* HDLC may stop working without this - check FIXME */
456 npe_recv_message(port->npe, &msg, "FLUSH_IT");
459 static void hss_set_hdlc_cfg(struct port *port)
461 struct msg msg;
463 memset(&msg, 0, sizeof(msg));
464 msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
465 msg.hss_port = port->id;
466 msg.data8a = port->hdlc_cfg; /* rx_cfg */
467 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
468 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
471 static u32 hss_get_status(struct port *port)
473 struct msg msg;
475 memset(&msg, 0, sizeof(msg));
476 msg.cmd = PORT_ERROR_READ;
477 msg.hss_port = port->id;
478 hss_npe_send(port, &msg, "PORT_ERROR_READ");
479 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
480 printk(KERN_CRIT "HSS-%i: unable to read HSS status\n",
481 port->id);
482 BUG();
485 return msg.data32;
488 static void hss_start_hdlc(struct port *port)
490 struct msg msg;
492 memset(&msg, 0, sizeof(msg));
493 msg.cmd = PKT_PIPE_FLOW_ENABLE;
494 msg.hss_port = port->id;
495 msg.data32 = 0;
496 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
499 static void hss_stop_hdlc(struct port *port)
501 struct msg msg;
503 memset(&msg, 0, sizeof(msg));
504 msg.cmd = PKT_PIPE_FLOW_DISABLE;
505 msg.hss_port = port->id;
506 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
507 hss_get_status(port); /* make sure it's halted */
510 static int hss_load_firmware(struct port *port)
512 struct msg msg;
513 int err;
515 if (port->initialized)
516 return 0;
518 if (!npe_running(port->npe) &&
519 (err = npe_load_firmware(port->npe, npe_name(port->npe),
520 port->dev)))
521 return err;
523 /* HDLC mode configuration */
524 memset(&msg, 0, sizeof(msg));
525 msg.cmd = PKT_NUM_PIPES_WRITE;
526 msg.hss_port = port->id;
527 msg.data8a = PKT_NUM_PIPES;
528 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
530 msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
531 msg.data8a = PKT_PIPE_FIFO_SIZEW;
532 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
534 msg.cmd = PKT_PIPE_MODE_WRITE;
535 msg.data8a = NPE_PKT_MODE_HDLC;
536 /* msg.data8b = inv_mask */
537 /* msg.data8c = or_mask */
538 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
540 msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
541 msg.data16a = HDLC_MAX_MRU; /* including CRC */
542 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
544 msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
545 msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
546 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
548 port->initialized = 1;
549 return 0;
552 /*****************************************************************************
553 * packetized (HDLC) operation
554 ****************************************************************************/
556 static inline void debug_pkt(struct net_device *dev, const char *func,
557 u8 *data, int len)
559 #if DEBUG_PKT_BYTES
560 int i;
562 printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
563 for (i = 0; i < len; i++) {
564 if (i >= DEBUG_PKT_BYTES)
565 break;
566 printk("%s%02X", !(i % 4) ? " " : "", data[i]);
568 printk("\n");
569 #endif
573 static inline void debug_desc(u32 phys, struct desc *desc)
575 #if DEBUG_DESC
576 printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
577 phys, desc->next, desc->buf_len, desc->pkt_len,
578 desc->data, desc->status, desc->error_count);
579 #endif
582 static inline int queue_get_desc(unsigned int queue, struct port *port,
583 int is_tx)
585 u32 phys, tab_phys, n_desc;
586 struct desc *tab;
588 if (!(phys = qmgr_get_entry(queue)))
589 return -1;
591 BUG_ON(phys & 0x1F);
592 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
593 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
594 n_desc = (phys - tab_phys) / sizeof(struct desc);
595 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
596 debug_desc(phys, &tab[n_desc]);
597 BUG_ON(tab[n_desc].next);
598 return n_desc;
601 static inline void queue_put_desc(unsigned int queue, u32 phys,
602 struct desc *desc)
604 debug_desc(phys, desc);
605 BUG_ON(phys & 0x1F);
606 qmgr_put_entry(queue, phys);
607 /* Don't check for queue overflow here, we've allocated sufficient
608 length and queues >= 32 don't support this check anyway. */
612 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
614 #ifdef __ARMEB__
615 dma_unmap_single(&port->netdev->dev, desc->data,
616 desc->buf_len, DMA_TO_DEVICE);
617 #else
618 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
619 ALIGN((desc->data & 3) + desc->buf_len, 4),
620 DMA_TO_DEVICE);
621 #endif
625 static void hss_hdlc_set_carrier(void *pdev, int carrier)
627 struct net_device *netdev = pdev;
628 struct port *port = dev_to_port(netdev);
629 unsigned long flags;
631 spin_lock_irqsave(&npe_lock, flags);
632 port->carrier = carrier;
633 if (!port->loopback) {
634 if (carrier)
635 netif_carrier_on(netdev);
636 else
637 netif_carrier_off(netdev);
639 spin_unlock_irqrestore(&npe_lock, flags);
642 static void hss_hdlc_rx_irq(void *pdev)
644 struct net_device *dev = pdev;
645 struct port *port = dev_to_port(dev);
647 #if DEBUG_RX
648 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
649 #endif
650 qmgr_disable_irq(queue_ids[port->id].rx);
651 napi_schedule(&port->napi);
654 static int hss_hdlc_poll(struct napi_struct *napi, int budget)
656 struct port *port = container_of(napi, struct port, napi);
657 struct net_device *dev = port->netdev;
658 unsigned int rxq = queue_ids[port->id].rx;
659 unsigned int rxfreeq = queue_ids[port->id].rxfree;
660 int received = 0;
662 #if DEBUG_RX
663 printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
664 #endif
666 while (received < budget) {
667 struct sk_buff *skb;
668 struct desc *desc;
669 int n;
670 #ifdef __ARMEB__
671 struct sk_buff *temp;
672 u32 phys;
673 #endif
675 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
676 #if DEBUG_RX
677 printk(KERN_DEBUG "%s: hss_hdlc_poll"
678 " napi_complete\n", dev->name);
679 #endif
680 napi_complete(napi);
681 qmgr_enable_irq(rxq);
682 if (!qmgr_stat_empty(rxq) &&
683 napi_reschedule(napi)) {
684 #if DEBUG_RX
685 printk(KERN_DEBUG "%s: hss_hdlc_poll"
686 " napi_reschedule succeeded\n",
687 dev->name);
688 #endif
689 qmgr_disable_irq(rxq);
690 continue;
692 #if DEBUG_RX
693 printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
694 dev->name);
695 #endif
696 return received; /* all work done */
699 desc = rx_desc_ptr(port, n);
700 #if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
701 if (desc->error_count)
702 printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
703 " errors %u\n", dev->name, desc->status,
704 desc->error_count);
705 #endif
706 skb = NULL;
707 switch (desc->status) {
708 case 0:
709 #ifdef __ARMEB__
710 if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
711 phys = dma_map_single(&dev->dev, skb->data,
712 RX_SIZE,
713 DMA_FROM_DEVICE);
714 if (dma_mapping_error(&dev->dev, phys)) {
715 dev_kfree_skb(skb);
716 skb = NULL;
719 #else
720 skb = netdev_alloc_skb(dev, desc->pkt_len);
721 #endif
722 if (!skb)
723 dev->stats.rx_dropped++;
724 break;
725 case ERR_HDLC_ALIGN:
726 case ERR_HDLC_ABORT:
727 dev->stats.rx_frame_errors++;
728 dev->stats.rx_errors++;
729 break;
730 case ERR_HDLC_FCS:
731 dev->stats.rx_crc_errors++;
732 dev->stats.rx_errors++;
733 break;
734 case ERR_HDLC_TOO_LONG:
735 dev->stats.rx_length_errors++;
736 dev->stats.rx_errors++;
737 break;
738 default: /* FIXME - remove printk */
739 printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
740 " errors %u\n", dev->name, desc->status,
741 desc->error_count);
742 dev->stats.rx_errors++;
745 if (!skb) {
746 /* put the desc back on RX-ready queue */
747 desc->buf_len = RX_SIZE;
748 desc->pkt_len = desc->status = 0;
749 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
750 continue;
753 /* process received frame */
754 #ifdef __ARMEB__
755 temp = skb;
756 skb = port->rx_buff_tab[n];
757 dma_unmap_single(&dev->dev, desc->data,
758 RX_SIZE, DMA_FROM_DEVICE);
759 #else
760 dma_sync_single_for_cpu(&dev->dev, desc->data,
761 RX_SIZE, DMA_FROM_DEVICE);
762 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
763 ALIGN(desc->pkt_len, 4) / 4);
764 #endif
765 skb_put(skb, desc->pkt_len);
767 debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
769 skb->protocol = hdlc_type_trans(skb, dev);
770 dev->stats.rx_packets++;
771 dev->stats.rx_bytes += skb->len;
772 netif_receive_skb(skb);
774 /* put the new buffer on RX-free queue */
775 #ifdef __ARMEB__
776 port->rx_buff_tab[n] = temp;
777 desc->data = phys;
778 #endif
779 desc->buf_len = RX_SIZE;
780 desc->pkt_len = 0;
781 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
782 received++;
784 #if DEBUG_RX
785 printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
786 #endif
787 return received; /* not all work done */
791 static void hss_hdlc_txdone_irq(void *pdev)
793 struct net_device *dev = pdev;
794 struct port *port = dev_to_port(dev);
795 int n_desc;
797 #if DEBUG_TX
798 printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
799 #endif
800 while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
801 port, 1)) >= 0) {
802 struct desc *desc;
803 int start;
805 desc = tx_desc_ptr(port, n_desc);
807 dev->stats.tx_packets++;
808 dev->stats.tx_bytes += desc->pkt_len;
810 dma_unmap_tx(port, desc);
811 #if DEBUG_TX
812 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
813 dev->name, port->tx_buff_tab[n_desc]);
814 #endif
815 free_buffer_irq(port->tx_buff_tab[n_desc]);
816 port->tx_buff_tab[n_desc] = NULL;
818 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
819 queue_put_desc(port->plat->txreadyq,
820 tx_desc_phys(port, n_desc), desc);
821 if (start) { /* TX-ready queue was empty */
822 #if DEBUG_TX
823 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
824 " ready\n", dev->name);
825 #endif
826 netif_wake_queue(dev);
831 static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
833 struct port *port = dev_to_port(dev);
834 unsigned int txreadyq = port->plat->txreadyq;
835 int len, offset, bytes, n;
836 void *mem;
837 u32 phys;
838 struct desc *desc;
840 #if DEBUG_TX
841 printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
842 #endif
844 if (unlikely(skb->len > HDLC_MAX_MRU)) {
845 dev_kfree_skb(skb);
846 dev->stats.tx_errors++;
847 return NETDEV_TX_OK;
850 debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
852 len = skb->len;
853 #ifdef __ARMEB__
854 offset = 0; /* no need to keep alignment */
855 bytes = len;
856 mem = skb->data;
857 #else
858 offset = (int)skb->data & 3; /* keep 32-bit alignment */
859 bytes = ALIGN(offset + len, 4);
860 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
861 dev_kfree_skb(skb);
862 dev->stats.tx_dropped++;
863 return NETDEV_TX_OK;
865 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
866 dev_kfree_skb(skb);
867 #endif
869 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
870 if (dma_mapping_error(&dev->dev, phys)) {
871 #ifdef __ARMEB__
872 dev_kfree_skb(skb);
873 #else
874 kfree(mem);
875 #endif
876 dev->stats.tx_dropped++;
877 return NETDEV_TX_OK;
880 n = queue_get_desc(txreadyq, port, 1);
881 BUG_ON(n < 0);
882 desc = tx_desc_ptr(port, n);
884 #ifdef __ARMEB__
885 port->tx_buff_tab[n] = skb;
886 #else
887 port->tx_buff_tab[n] = mem;
888 #endif
889 desc->data = phys + offset;
890 desc->buf_len = desc->pkt_len = len;
892 wmb();
893 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
895 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
896 #if DEBUG_TX
897 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
898 #endif
899 netif_stop_queue(dev);
900 /* we could miss TX ready interrupt */
901 if (!qmgr_stat_below_low_watermark(txreadyq)) {
902 #if DEBUG_TX
903 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
904 dev->name);
905 #endif
906 netif_wake_queue(dev);
910 #if DEBUG_TX
911 printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
912 #endif
913 return NETDEV_TX_OK;
917 static int request_hdlc_queues(struct port *port)
919 int err;
921 err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
922 "%s:RX-free", port->netdev->name);
923 if (err)
924 return err;
926 err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
927 "%s:RX", port->netdev->name);
928 if (err)
929 goto rel_rxfree;
931 err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
932 "%s:TX", port->netdev->name);
933 if (err)
934 goto rel_rx;
936 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
937 "%s:TX-ready", port->netdev->name);
938 if (err)
939 goto rel_tx;
941 err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
942 "%s:TX-done", port->netdev->name);
943 if (err)
944 goto rel_txready;
945 return 0;
947 rel_txready:
948 qmgr_release_queue(port->plat->txreadyq);
949 rel_tx:
950 qmgr_release_queue(queue_ids[port->id].tx);
951 rel_rx:
952 qmgr_release_queue(queue_ids[port->id].rx);
953 rel_rxfree:
954 qmgr_release_queue(queue_ids[port->id].rxfree);
955 printk(KERN_DEBUG "%s: unable to request hardware queues\n",
956 port->netdev->name);
957 return err;
960 static void release_hdlc_queues(struct port *port)
962 qmgr_release_queue(queue_ids[port->id].rxfree);
963 qmgr_release_queue(queue_ids[port->id].rx);
964 qmgr_release_queue(queue_ids[port->id].txdone);
965 qmgr_release_queue(queue_ids[port->id].tx);
966 qmgr_release_queue(port->plat->txreadyq);
969 static int init_hdlc_queues(struct port *port)
971 int i;
973 if (!ports_open)
974 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
975 POOL_ALLOC_SIZE, 32, 0)))
976 return -ENOMEM;
978 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
979 &port->desc_tab_phys)))
980 return -ENOMEM;
981 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
982 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
983 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
985 /* Setup RX buffers */
986 for (i = 0; i < RX_DESCS; i++) {
987 struct desc *desc = rx_desc_ptr(port, i);
988 buffer_t *buff;
989 void *data;
990 #ifdef __ARMEB__
991 if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
992 return -ENOMEM;
993 data = buff->data;
994 #else
995 if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
996 return -ENOMEM;
997 data = buff;
998 #endif
999 desc->buf_len = RX_SIZE;
1000 desc->data = dma_map_single(&port->netdev->dev, data,
1001 RX_SIZE, DMA_FROM_DEVICE);
1002 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
1003 free_buffer(buff);
1004 return -EIO;
1006 port->rx_buff_tab[i] = buff;
1009 return 0;
1012 static void destroy_hdlc_queues(struct port *port)
1014 int i;
1016 if (port->desc_tab) {
1017 for (i = 0; i < RX_DESCS; i++) {
1018 struct desc *desc = rx_desc_ptr(port, i);
1019 buffer_t *buff = port->rx_buff_tab[i];
1020 if (buff) {
1021 dma_unmap_single(&port->netdev->dev,
1022 desc->data, RX_SIZE,
1023 DMA_FROM_DEVICE);
1024 free_buffer(buff);
1027 for (i = 0; i < TX_DESCS; i++) {
1028 struct desc *desc = tx_desc_ptr(port, i);
1029 buffer_t *buff = port->tx_buff_tab[i];
1030 if (buff) {
1031 dma_unmap_tx(port, desc);
1032 free_buffer(buff);
1035 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1036 port->desc_tab = NULL;
1039 if (!ports_open && dma_pool) {
1040 dma_pool_destroy(dma_pool);
1041 dma_pool = NULL;
1045 static int hss_hdlc_open(struct net_device *dev)
1047 struct port *port = dev_to_port(dev);
1048 unsigned long flags;
1049 int i, err = 0;
1051 if ((err = hdlc_open(dev)))
1052 return err;
1054 if ((err = hss_load_firmware(port)))
1055 goto err_hdlc_close;
1057 if ((err = request_hdlc_queues(port)))
1058 goto err_hdlc_close;
1060 if ((err = init_hdlc_queues(port)))
1061 goto err_destroy_queues;
1063 spin_lock_irqsave(&npe_lock, flags);
1064 if (port->plat->open)
1065 if ((err = port->plat->open(port->id, dev,
1066 hss_hdlc_set_carrier)))
1067 goto err_unlock;
1068 spin_unlock_irqrestore(&npe_lock, flags);
1070 /* Populate queues with buffers, no failure after this point */
1071 for (i = 0; i < TX_DESCS; i++)
1072 queue_put_desc(port->plat->txreadyq,
1073 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1075 for (i = 0; i < RX_DESCS; i++)
1076 queue_put_desc(queue_ids[port->id].rxfree,
1077 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1079 napi_enable(&port->napi);
1080 netif_start_queue(dev);
1082 qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
1083 hss_hdlc_rx_irq, dev);
1085 qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
1086 hss_hdlc_txdone_irq, dev);
1087 qmgr_enable_irq(queue_ids[port->id].txdone);
1089 ports_open++;
1091 hss_set_hdlc_cfg(port);
1092 hss_config(port);
1094 hss_start_hdlc(port);
1096 /* we may already have RX data, enables IRQ */
1097 napi_schedule(&port->napi);
1098 return 0;
1100 err_unlock:
1101 spin_unlock_irqrestore(&npe_lock, flags);
1102 err_destroy_queues:
1103 destroy_hdlc_queues(port);
1104 release_hdlc_queues(port);
1105 err_hdlc_close:
1106 hdlc_close(dev);
1107 return err;
1110 static int hss_hdlc_close(struct net_device *dev)
1112 struct port *port = dev_to_port(dev);
1113 unsigned long flags;
1114 int i, buffs = RX_DESCS; /* allocated RX buffers */
1116 spin_lock_irqsave(&npe_lock, flags);
1117 ports_open--;
1118 qmgr_disable_irq(queue_ids[port->id].rx);
1119 netif_stop_queue(dev);
1120 napi_disable(&port->napi);
1122 hss_stop_hdlc(port);
1124 while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
1125 buffs--;
1126 while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
1127 buffs--;
1129 if (buffs)
1130 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1131 " left in NPE\n", dev->name, buffs);
1133 buffs = TX_DESCS;
1134 while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
1135 buffs--; /* cancel TX */
1137 i = 0;
1138 do {
1139 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1140 buffs--;
1141 if (!buffs)
1142 break;
1143 } while (++i < MAX_CLOSE_WAIT);
1145 if (buffs)
1146 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1147 "left in NPE\n", dev->name, buffs);
1148 #if DEBUG_CLOSE
1149 if (!buffs)
1150 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1151 #endif
1152 qmgr_disable_irq(queue_ids[port->id].txdone);
1154 if (port->plat->close)
1155 port->plat->close(port->id, dev);
1156 spin_unlock_irqrestore(&npe_lock, flags);
1158 destroy_hdlc_queues(port);
1159 release_hdlc_queues(port);
1160 hdlc_close(dev);
1161 return 0;
1165 static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
1166 unsigned short parity)
1168 struct port *port = dev_to_port(dev);
1170 if (encoding != ENCODING_NRZ)
1171 return -EINVAL;
1173 switch(parity) {
1174 case PARITY_CRC16_PR1_CCITT:
1175 port->hdlc_cfg = 0;
1176 return 0;
1178 case PARITY_CRC32_PR1_CCITT:
1179 port->hdlc_cfg = PKT_HDLC_CRC_32;
1180 return 0;
1182 default:
1183 return -EINVAL;
1187 static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
1188 u32 *best, u32 *best_diff, u32 *reg)
1190 /* a is 10-bit, b is 10-bit, c is 12-bit */
1191 u64 new_rate;
1192 u32 new_diff;
1194 new_rate = ixp4xx_timer_freq * (u64)(c + 1);
1195 do_div(new_rate, a * (c + 1) + b + 1);
1196 new_diff = abs((u32)new_rate - rate);
1198 if (new_diff < *best_diff) {
1199 *best = new_rate;
1200 *best_diff = new_diff;
1201 *reg = (a << 22) | (b << 12) | c;
1203 return new_diff;
1206 static void find_best_clock(u32 rate, u32 *best, u32 *reg)
1208 u32 a, b, diff = 0xFFFFFFFF;
1210 a = ixp4xx_timer_freq / rate;
1212 if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
1213 check_clock(rate, 0x3FF, 1, 1, best, &diff, reg);
1214 return;
1216 if (a == 0) { /* > 66.666 MHz */
1217 a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
1218 rate = ixp4xx_timer_freq;
1221 if (rate * a == ixp4xx_timer_freq) { /* don't divide by 0 later */
1222 check_clock(rate, a - 1, 1, 1, best, &diff, reg);
1223 return;
1226 for (b = 0; b < 0x400; b++) {
1227 u64 c = (b + 1) * (u64)rate;
1228 do_div(c, ixp4xx_timer_freq - rate * a);
1229 c--;
1230 if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
1231 if (b == 0 && /* also try a bit higher rate */
1232 !check_clock(rate, a - 1, 1, 1, best, &diff, reg))
1233 return;
1234 check_clock(rate, a, b, 0xFFF, best, &diff, reg);
1235 return;
1237 if (!check_clock(rate, a, b, c, best, &diff, reg))
1238 return;
1239 if (!check_clock(rate, a, b, c + 1, best, &diff, reg))
1240 return;
1244 static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1246 const size_t size = sizeof(sync_serial_settings);
1247 sync_serial_settings new_line;
1248 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1249 struct port *port = dev_to_port(dev);
1250 unsigned long flags;
1251 int clk;
1253 if (cmd != SIOCWANDEV)
1254 return hdlc_ioctl(dev, ifr, cmd);
1256 switch(ifr->ifr_settings.type) {
1257 case IF_GET_IFACE:
1258 ifr->ifr_settings.type = IF_IFACE_V35;
1259 if (ifr->ifr_settings.size < size) {
1260 ifr->ifr_settings.size = size; /* data size wanted */
1261 return -ENOBUFS;
1263 memset(&new_line, 0, sizeof(new_line));
1264 new_line.clock_type = port->clock_type;
1265 new_line.clock_rate = port->clock_rate;
1266 new_line.loopback = port->loopback;
1267 if (copy_to_user(line, &new_line, size))
1268 return -EFAULT;
1269 return 0;
1271 case IF_IFACE_SYNC_SERIAL:
1272 case IF_IFACE_V35:
1273 if(!capable(CAP_NET_ADMIN))
1274 return -EPERM;
1275 if (copy_from_user(&new_line, line, size))
1276 return -EFAULT;
1278 clk = new_line.clock_type;
1279 if (port->plat->set_clock)
1280 clk = port->plat->set_clock(port->id, clk);
1282 if (clk != CLOCK_EXT && clk != CLOCK_INT)
1283 return -EINVAL; /* No such clock setting */
1285 if (new_line.loopback != 0 && new_line.loopback != 1)
1286 return -EINVAL;
1288 port->clock_type = clk; /* Update settings */
1289 if (clk == CLOCK_INT)
1290 find_best_clock(new_line.clock_rate, &port->clock_rate,
1291 &port->clock_reg);
1292 else {
1293 port->clock_rate = 0;
1294 port->clock_reg = CLK42X_SPEED_2048KHZ;
1296 port->loopback = new_line.loopback;
1298 spin_lock_irqsave(&npe_lock, flags);
1300 if (dev->flags & IFF_UP)
1301 hss_config(port);
1303 if (port->loopback || port->carrier)
1304 netif_carrier_on(port->netdev);
1305 else
1306 netif_carrier_off(port->netdev);
1307 spin_unlock_irqrestore(&npe_lock, flags);
1309 return 0;
1311 default:
1312 return hdlc_ioctl(dev, ifr, cmd);
1316 /*****************************************************************************
1317 * initialization
1318 ****************************************************************************/
1320 static const struct net_device_ops hss_hdlc_ops = {
1321 .ndo_open = hss_hdlc_open,
1322 .ndo_stop = hss_hdlc_close,
1323 .ndo_change_mtu = hdlc_change_mtu,
1324 .ndo_start_xmit = hdlc_start_xmit,
1325 .ndo_do_ioctl = hss_hdlc_ioctl,
1328 static int __devinit hss_init_one(struct platform_device *pdev)
1330 struct port *port;
1331 struct net_device *dev;
1332 hdlc_device *hdlc;
1333 int err;
1335 if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
1336 return -ENOMEM;
1338 if ((port->npe = npe_request(0)) == NULL) {
1339 err = -ENODEV;
1340 goto err_free;
1343 if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
1344 err = -ENOMEM;
1345 goto err_plat;
1348 SET_NETDEV_DEV(dev, &pdev->dev);
1349 hdlc = dev_to_hdlc(dev);
1350 hdlc->attach = hss_hdlc_attach;
1351 hdlc->xmit = hss_hdlc_xmit;
1352 dev->netdev_ops = &hss_hdlc_ops;
1353 dev->tx_queue_len = 100;
1354 port->clock_type = CLOCK_EXT;
1355 port->clock_rate = 0;
1356 port->clock_reg = CLK42X_SPEED_2048KHZ;
1357 port->id = pdev->id;
1358 port->dev = &pdev->dev;
1359 port->plat = pdev->dev.platform_data;
1360 netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
1362 if ((err = register_hdlc_device(dev)))
1363 goto err_free_netdev;
1365 platform_set_drvdata(pdev, port);
1367 printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
1368 return 0;
1370 err_free_netdev:
1371 free_netdev(dev);
1372 err_plat:
1373 npe_release(port->npe);
1374 err_free:
1375 kfree(port);
1376 return err;
1379 static int __devexit hss_remove_one(struct platform_device *pdev)
1381 struct port *port = platform_get_drvdata(pdev);
1383 unregister_hdlc_device(port->netdev);
1384 free_netdev(port->netdev);
1385 npe_release(port->npe);
1386 platform_set_drvdata(pdev, NULL);
1387 kfree(port);
1388 return 0;
1391 static struct platform_driver ixp4xx_hss_driver = {
1392 .driver.name = DRV_NAME,
1393 .probe = hss_init_one,
1394 .remove = hss_remove_one,
1397 static int __init hss_init_module(void)
1399 if ((ixp4xx_read_feature_bits() &
1400 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
1401 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
1402 return -ENODEV;
1404 spin_lock_init(&npe_lock);
1406 return platform_driver_register(&ixp4xx_hss_driver);
1409 static void __exit hss_cleanup_module(void)
1411 platform_driver_unregister(&ixp4xx_hss_driver);
1414 MODULE_AUTHOR("Krzysztof Halasa");
1415 MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
1416 MODULE_LICENSE("GPL v2");
1417 MODULE_ALIAS("platform:ixp4xx_hss");
1418 module_init(hss_init_module);
1419 module_exit(hss_cleanup_module);