1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IXP4xx HSS (synchronous serial port) driver for Linux
5 * Copyright (C) 2007-2008 Krzysztof HaĆasa <khc@pm.waw.pl>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/bitops.h>
12 #include <linux/cdev.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
16 #include <linux/hdlc.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/poll.h>
21 #include <linux/slab.h>
22 #include <linux/soc/ixp4xx/npe.h>
23 #include <linux/soc/ixp4xx/qmgr.h>
28 #define DEBUG_PKT_BYTES 0
31 #define DRV_NAME "ixp4xx_hss"
33 #define PKT_EXTRA_FLAGS 0 /* orig 1 */
34 #define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
35 #define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
37 #define RX_DESCS 16 /* also length of all RX queues */
38 #define TX_DESCS 16 /* also length of all TX queues */
40 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
41 #define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
42 #define MAX_CLOSE_WAIT 1000 /* microseconds */
44 #define FRAME_SIZE 256 /* doesn't matter at this point */
45 #define FRAME_OFFSET 0
46 #define MAX_CHANNELS (FRAME_SIZE / 8)
48 #define NAPI_WEIGHT 16
51 #define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
52 #define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
53 #define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
54 #define HSS0_PKT_TX1_QUEUE 15
55 #define HSS0_PKT_TX2_QUEUE 16
56 #define HSS0_PKT_TX3_QUEUE 17
57 #define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
58 #define HSS0_PKT_RXFREE1_QUEUE 19
59 #define HSS0_PKT_RXFREE2_QUEUE 20
60 #define HSS0_PKT_RXFREE3_QUEUE 21
61 #define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
63 #define HSS1_CHL_RXTRIG_QUEUE 10
64 #define HSS1_PKT_RX_QUEUE 0
65 #define HSS1_PKT_TX0_QUEUE 5
66 #define HSS1_PKT_TX1_QUEUE 6
67 #define HSS1_PKT_TX2_QUEUE 7
68 #define HSS1_PKT_TX3_QUEUE 8
69 #define HSS1_PKT_RXFREE0_QUEUE 1
70 #define HSS1_PKT_RXFREE1_QUEUE 2
71 #define HSS1_PKT_RXFREE2_QUEUE 3
72 #define HSS1_PKT_RXFREE3_QUEUE 4
73 #define HSS1_PKT_TXDONE_QUEUE 9
75 #define NPE_PKT_MODE_HDLC 0
76 #define NPE_PKT_MODE_RAW 1
77 #define NPE_PKT_MODE_56KMODE 2
78 #define NPE_PKT_MODE_56KENDIAN_MSB 4
80 /* PKT_PIPE_HDLC_CFG_WRITE flags */
81 #define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
82 #define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
83 #define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
86 /* hss_config, PCRs */
87 /* Frame sync sampling, default = active low */
88 #define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
89 #define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
90 #define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
92 /* Frame sync pin: input (default) or output generated off a given clk edge */
93 #define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
94 #define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
96 /* Frame and data clock sampling on edge, default = falling */
97 #define PCR_FCLK_EDGE_RISING 0x08000000
98 #define PCR_DCLK_EDGE_RISING 0x04000000
100 /* Clock direction, default = input */
101 #define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
103 /* Generate/Receive frame pulses, default = enabled */
104 #define PCR_FRM_PULSE_DISABLED 0x01000000
106 /* Data rate is full (default) or half the configured clk speed */
107 #define PCR_HALF_CLK_RATE 0x00200000
109 /* Invert data between NPE and HSS FIFOs? (default = no) */
110 #define PCR_DATA_POLARITY_INVERT 0x00100000
112 /* TX/RX endianness, default = LSB */
113 #define PCR_MSB_ENDIAN 0x00080000
115 /* Normal (default) / open drain mode (TX only) */
116 #define PCR_TX_PINS_OPEN_DRAIN 0x00040000
118 /* No framing bit transmitted and expected on RX? (default = framing bit) */
119 #define PCR_SOF_NO_FBIT 0x00020000
121 /* Drive data pins? */
122 #define PCR_TX_DATA_ENABLE 0x00010000
124 /* Voice 56k type: drive the data pins low (default), high, high Z */
125 #define PCR_TX_V56K_HIGH 0x00002000
126 #define PCR_TX_V56K_HIGH_IMP 0x00004000
128 /* Unassigned type: drive the data pins low (default), high, high Z */
129 #define PCR_TX_UNASS_HIGH 0x00000800
130 #define PCR_TX_UNASS_HIGH_IMP 0x00001000
132 /* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
133 #define PCR_TX_FB_HIGH_IMP 0x00000400
135 /* 56k data endiannes - which bit unused: high (default) or low */
136 #define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
138 /* 56k data transmission type: 32/8 bit data (default) or 56K data */
139 #define PCR_TX_56KS_56K_DATA 0x00000100
141 /* hss_config, cCR */
142 /* Number of packetized clients, default = 1 */
143 #define CCR_NPE_HFIFO_2_HDLC 0x04000000
144 #define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
146 /* default = no loopback */
147 #define CCR_LOOPBACK 0x02000000
149 /* HSS number, default = 0 (first) */
150 #define CCR_SECOND_HSS 0x01000000
153 /* hss_config, clkCR: main:10, num:10, denom:12 */
154 #define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
156 #define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
157 #define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
158 #define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
159 #define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
160 #define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
161 #define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
163 #define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
164 #define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
165 #define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
166 #define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
167 #define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
168 #define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
171 * HSS_CONFIG_CLOCK_CR register consists of 3 parts:
172 * A (10 bits), B (10 bits) and C (12 bits).
173 * IXP42x HSS clock generator operation (verified with an oscilloscope):
174 * Each clock bit takes 7.5 ns (1 / 133.xx MHz).
175 * The clock sequence consists of (C - B) states of 0s and 1s, each state is
176 * A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is
179 * The resulting average clock frequency (assuming 33.333 MHz oscillator) is:
180 * freq = 66.666 MHz / (A + (B + 1) / (C + 1))
181 * minimum freq = 66.666 MHz / (A + 1)
182 * maximum freq = 66.666 MHz / A
184 * Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7
185 * freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s).
186 * The clock sequence is: 1100110011 (5 doubles) 000111000 (3 triples).
187 * The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits
188 * = 19 bits (each 7.5 ns long) = 142.5 ns (then the sequence repeats).
189 * The sequence consists of 4 complete clock periods, thus the average
190 * frequency (= clock rate) is 4 / 142.5 ns = 28.07 MHz (Mb/s).
191 * (max specified clock rate for IXP42x HSS is 8.192 Mb/s).
194 /* hss_config, LUT entries */
195 #define TDMMAP_UNASSIGNED 0
196 #define TDMMAP_HDLC 1 /* HDLC - packetized */
197 #define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
198 #define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
200 /* offsets into HSS config */
201 #define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
202 #define HSS_CONFIG_RX_PCR 0x04
203 #define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
204 #define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
205 #define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
206 #define HSS_CONFIG_RX_FCR 0x14
207 #define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
208 #define HSS_CONFIG_RX_LUT 0x38
211 /* NPE command codes */
212 /* writes the ConfigWord value to the location specified by offset */
213 #define PORT_CONFIG_WRITE 0x40
215 /* triggers the NPE to load the contents of the configuration table */
216 #define PORT_CONFIG_LOAD 0x41
218 /* triggers the NPE to return an HssErrorReadResponse message */
219 #define PORT_ERROR_READ 0x42
221 /* triggers the NPE to reset internal status and enable the HssPacketized
222 operation for the flow specified by pPipe */
223 #define PKT_PIPE_FLOW_ENABLE 0x50
224 #define PKT_PIPE_FLOW_DISABLE 0x51
225 #define PKT_NUM_PIPES_WRITE 0x52
226 #define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
227 #define PKT_PIPE_HDLC_CFG_WRITE 0x54
228 #define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
229 #define PKT_PIPE_RX_SIZE_WRITE 0x56
230 #define PKT_PIPE_MODE_WRITE 0x57
232 /* HDLC packet status values - desc->status */
233 #define ERR_SHUTDOWN 1 /* stop or shutdown occurrence */
234 #define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
235 #define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
236 #define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
237 this packet (if buf_len < pkt_len) */
238 #define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
239 #define ERR_HDLC_ABORT 6 /* abort sequence received */
240 #define ERR_DISCONNECTING 7 /* disconnect is in progress */
244 typedef struct sk_buff buffer_t
;
245 #define free_buffer dev_kfree_skb
246 #define free_buffer_irq dev_consume_skb_irq
248 typedef void buffer_t
;
249 #define free_buffer kfree
250 #define free_buffer_irq kfree
256 struct net_device
*netdev
;
257 struct napi_struct napi
;
258 struct hss_plat_info
*plat
;
259 buffer_t
*rx_buff_tab
[RX_DESCS
], *tx_buff_tab
[TX_DESCS
];
260 struct desc
*desc_tab
; /* coherent */
263 unsigned int clock_type
, clock_rate
, loopback
;
264 unsigned int initialized
, carrier
;
269 /* NPE message structure */
272 u8 cmd
, unused
, hss_port
, index
;
274 struct { u8 data8a
, data8b
, data8c
, data8d
; };
275 struct { u16 data16a
, data16b
; };
276 struct { u32 data32
; };
279 u8 index
, hss_port
, unused
, cmd
;
281 struct { u8 data8d
, data8c
, data8b
, data8a
; };
282 struct { u16 data16b
, data16a
; };
283 struct { u32 data32
; };
288 /* HDLC packet descriptor */
290 u32 next
; /* pointer to next buffer, unused */
293 u16 buf_len
; /* buffer length */
294 u16 pkt_len
; /* packet length */
295 u32 data
; /* pointer to data buffer in RAM */
300 u16 pkt_len
; /* packet length */
301 u16 buf_len
; /* buffer length */
302 u32 data
; /* pointer to data buffer in RAM */
311 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
312 (n) * sizeof(struct desc))
313 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
315 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
316 ((n) + RX_DESCS) * sizeof(struct desc))
317 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
319 /*****************************************************************************
321 ****************************************************************************/
323 static int ports_open
;
324 static struct dma_pool
*dma_pool
;
325 static spinlock_t npe_lock
;
327 static const struct {
328 int tx
, txdone
, rx
, rxfree
;
329 }queue_ids
[2] = {{HSS0_PKT_TX0_QUEUE
, HSS0_PKT_TXDONE_QUEUE
, HSS0_PKT_RX_QUEUE
,
330 HSS0_PKT_RXFREE0_QUEUE
},
331 {HSS1_PKT_TX0_QUEUE
, HSS1_PKT_TXDONE_QUEUE
, HSS1_PKT_RX_QUEUE
,
332 HSS1_PKT_RXFREE0_QUEUE
},
335 /*****************************************************************************
337 ****************************************************************************/
339 static inline struct port
* dev_to_port(struct net_device
*dev
)
341 return dev_to_hdlc(dev
)->priv
;
345 static inline void memcpy_swab32(u32
*dest
, u32
*src
, int cnt
)
348 for (i
= 0; i
< cnt
; i
++)
349 dest
[i
] = swab32(src
[i
]);
353 /*****************************************************************************
355 ****************************************************************************/
357 static void hss_npe_send(struct port
*port
, struct msg
*msg
, const char* what
)
359 u32
*val
= (u32
*)msg
;
360 if (npe_send_message(port
->npe
, msg
, what
)) {
361 pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n",
362 port
->id
, val
[0], val
[1], npe_name(port
->npe
));
367 static void hss_config_set_lut(struct port
*port
)
372 memset(&msg
, 0, sizeof(msg
));
373 msg
.cmd
= PORT_CONFIG_WRITE
;
374 msg
.hss_port
= port
->id
;
376 for (ch
= 0; ch
< MAX_CHANNELS
; ch
++) {
378 msg
.data32
|= TDMMAP_HDLC
<< 30;
381 msg
.index
= HSS_CONFIG_TX_LUT
+ ((ch
/ 4) & ~3);
382 hss_npe_send(port
, &msg
, "HSS_SET_TX_LUT");
384 msg
.index
+= HSS_CONFIG_RX_LUT
- HSS_CONFIG_TX_LUT
;
385 hss_npe_send(port
, &msg
, "HSS_SET_RX_LUT");
390 static void hss_config(struct port
*port
)
394 memset(&msg
, 0, sizeof(msg
));
395 msg
.cmd
= PORT_CONFIG_WRITE
;
396 msg
.hss_port
= port
->id
;
397 msg
.index
= HSS_CONFIG_TX_PCR
;
398 msg
.data32
= PCR_FRM_PULSE_DISABLED
| PCR_MSB_ENDIAN
|
399 PCR_TX_DATA_ENABLE
| PCR_SOF_NO_FBIT
;
400 if (port
->clock_type
== CLOCK_INT
)
401 msg
.data32
|= PCR_SYNC_CLK_DIR_OUTPUT
;
402 hss_npe_send(port
, &msg
, "HSS_SET_TX_PCR");
404 msg
.index
= HSS_CONFIG_RX_PCR
;
405 msg
.data32
^= PCR_TX_DATA_ENABLE
| PCR_DCLK_EDGE_RISING
;
406 hss_npe_send(port
, &msg
, "HSS_SET_RX_PCR");
408 memset(&msg
, 0, sizeof(msg
));
409 msg
.cmd
= PORT_CONFIG_WRITE
;
410 msg
.hss_port
= port
->id
;
411 msg
.index
= HSS_CONFIG_CORE_CR
;
412 msg
.data32
= (port
->loopback
? CCR_LOOPBACK
: 0) |
413 (port
->id
? CCR_SECOND_HSS
: 0);
414 hss_npe_send(port
, &msg
, "HSS_SET_CORE_CR");
416 memset(&msg
, 0, sizeof(msg
));
417 msg
.cmd
= PORT_CONFIG_WRITE
;
418 msg
.hss_port
= port
->id
;
419 msg
.index
= HSS_CONFIG_CLOCK_CR
;
420 msg
.data32
= port
->clock_reg
;
421 hss_npe_send(port
, &msg
, "HSS_SET_CLOCK_CR");
423 memset(&msg
, 0, sizeof(msg
));
424 msg
.cmd
= PORT_CONFIG_WRITE
;
425 msg
.hss_port
= port
->id
;
426 msg
.index
= HSS_CONFIG_TX_FCR
;
427 msg
.data16a
= FRAME_OFFSET
;
428 msg
.data16b
= FRAME_SIZE
- 1;
429 hss_npe_send(port
, &msg
, "HSS_SET_TX_FCR");
431 memset(&msg
, 0, sizeof(msg
));
432 msg
.cmd
= PORT_CONFIG_WRITE
;
433 msg
.hss_port
= port
->id
;
434 msg
.index
= HSS_CONFIG_RX_FCR
;
435 msg
.data16a
= FRAME_OFFSET
;
436 msg
.data16b
= FRAME_SIZE
- 1;
437 hss_npe_send(port
, &msg
, "HSS_SET_RX_FCR");
439 hss_config_set_lut(port
);
441 memset(&msg
, 0, sizeof(msg
));
442 msg
.cmd
= PORT_CONFIG_LOAD
;
443 msg
.hss_port
= port
->id
;
444 hss_npe_send(port
, &msg
, "HSS_LOAD_CONFIG");
446 if (npe_recv_message(port
->npe
, &msg
, "HSS_LOAD_CONFIG") ||
447 /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
448 msg
.cmd
!= PORT_CONFIG_LOAD
|| msg
.data32
) {
449 pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port
->id
);
453 /* HDLC may stop working without this - check FIXME */
454 npe_recv_message(port
->npe
, &msg
, "FLUSH_IT");
457 static void hss_set_hdlc_cfg(struct port
*port
)
461 memset(&msg
, 0, sizeof(msg
));
462 msg
.cmd
= PKT_PIPE_HDLC_CFG_WRITE
;
463 msg
.hss_port
= port
->id
;
464 msg
.data8a
= port
->hdlc_cfg
; /* rx_cfg */
465 msg
.data8b
= port
->hdlc_cfg
| (PKT_EXTRA_FLAGS
<< 3); /* tx_cfg */
466 hss_npe_send(port
, &msg
, "HSS_SET_HDLC_CFG");
469 static u32
hss_get_status(struct port
*port
)
473 memset(&msg
, 0, sizeof(msg
));
474 msg
.cmd
= PORT_ERROR_READ
;
475 msg
.hss_port
= port
->id
;
476 hss_npe_send(port
, &msg
, "PORT_ERROR_READ");
477 if (npe_recv_message(port
->npe
, &msg
, "PORT_ERROR_READ")) {
478 pr_crit("HSS-%i: unable to read HSS status\n", port
->id
);
485 static void hss_start_hdlc(struct port
*port
)
489 memset(&msg
, 0, sizeof(msg
));
490 msg
.cmd
= PKT_PIPE_FLOW_ENABLE
;
491 msg
.hss_port
= port
->id
;
493 hss_npe_send(port
, &msg
, "HSS_ENABLE_PKT_PIPE");
496 static void hss_stop_hdlc(struct port
*port
)
500 memset(&msg
, 0, sizeof(msg
));
501 msg
.cmd
= PKT_PIPE_FLOW_DISABLE
;
502 msg
.hss_port
= port
->id
;
503 hss_npe_send(port
, &msg
, "HSS_DISABLE_PKT_PIPE");
504 hss_get_status(port
); /* make sure it's halted */
507 static int hss_load_firmware(struct port
*port
)
512 if (port
->initialized
)
515 if (!npe_running(port
->npe
) &&
516 (err
= npe_load_firmware(port
->npe
, npe_name(port
->npe
),
520 /* HDLC mode configuration */
521 memset(&msg
, 0, sizeof(msg
));
522 msg
.cmd
= PKT_NUM_PIPES_WRITE
;
523 msg
.hss_port
= port
->id
;
524 msg
.data8a
= PKT_NUM_PIPES
;
525 hss_npe_send(port
, &msg
, "HSS_SET_PKT_PIPES");
527 msg
.cmd
= PKT_PIPE_FIFO_SIZEW_WRITE
;
528 msg
.data8a
= PKT_PIPE_FIFO_SIZEW
;
529 hss_npe_send(port
, &msg
, "HSS_SET_PKT_FIFO");
531 msg
.cmd
= PKT_PIPE_MODE_WRITE
;
532 msg
.data8a
= NPE_PKT_MODE_HDLC
;
533 /* msg.data8b = inv_mask */
534 /* msg.data8c = or_mask */
535 hss_npe_send(port
, &msg
, "HSS_SET_PKT_MODE");
537 msg
.cmd
= PKT_PIPE_RX_SIZE_WRITE
;
538 msg
.data16a
= HDLC_MAX_MRU
; /* including CRC */
539 hss_npe_send(port
, &msg
, "HSS_SET_PKT_RX_SIZE");
541 msg
.cmd
= PKT_PIPE_IDLE_PATTERN_WRITE
;
542 msg
.data32
= 0x7F7F7F7F; /* ??? FIXME */
543 hss_npe_send(port
, &msg
, "HSS_SET_PKT_IDLE");
545 port
->initialized
= 1;
549 /*****************************************************************************
550 * packetized (HDLC) operation
551 ****************************************************************************/
553 static inline void debug_pkt(struct net_device
*dev
, const char *func
,
559 printk(KERN_DEBUG
"%s: %s(%i)", dev
->name
, func
, len
);
560 for (i
= 0; i
< len
; i
++) {
561 if (i
>= DEBUG_PKT_BYTES
)
563 printk("%s%02X", !(i
% 4) ? " " : "", data
[i
]);
570 static inline void debug_desc(u32 phys
, struct desc
*desc
)
573 printk(KERN_DEBUG
"%X: %X %3X %3X %08X %X %X\n",
574 phys
, desc
->next
, desc
->buf_len
, desc
->pkt_len
,
575 desc
->data
, desc
->status
, desc
->error_count
);
579 static inline int queue_get_desc(unsigned int queue
, struct port
*port
,
582 u32 phys
, tab_phys
, n_desc
;
585 if (!(phys
= qmgr_get_entry(queue
)))
589 tab_phys
= is_tx
? tx_desc_phys(port
, 0) : rx_desc_phys(port
, 0);
590 tab
= is_tx
? tx_desc_ptr(port
, 0) : rx_desc_ptr(port
, 0);
591 n_desc
= (phys
- tab_phys
) / sizeof(struct desc
);
592 BUG_ON(n_desc
>= (is_tx
? TX_DESCS
: RX_DESCS
));
593 debug_desc(phys
, &tab
[n_desc
]);
594 BUG_ON(tab
[n_desc
].next
);
598 static inline void queue_put_desc(unsigned int queue
, u32 phys
,
601 debug_desc(phys
, desc
);
603 qmgr_put_entry(queue
, phys
);
604 /* Don't check for queue overflow here, we've allocated sufficient
605 length and queues >= 32 don't support this check anyway. */
609 static inline void dma_unmap_tx(struct port
*port
, struct desc
*desc
)
612 dma_unmap_single(&port
->netdev
->dev
, desc
->data
,
613 desc
->buf_len
, DMA_TO_DEVICE
);
615 dma_unmap_single(&port
->netdev
->dev
, desc
->data
& ~3,
616 ALIGN((desc
->data
& 3) + desc
->buf_len
, 4),
622 static void hss_hdlc_set_carrier(void *pdev
, int carrier
)
624 struct net_device
*netdev
= pdev
;
625 struct port
*port
= dev_to_port(netdev
);
628 spin_lock_irqsave(&npe_lock
, flags
);
629 port
->carrier
= carrier
;
630 if (!port
->loopback
) {
632 netif_carrier_on(netdev
);
634 netif_carrier_off(netdev
);
636 spin_unlock_irqrestore(&npe_lock
, flags
);
639 static void hss_hdlc_rx_irq(void *pdev
)
641 struct net_device
*dev
= pdev
;
642 struct port
*port
= dev_to_port(dev
);
645 printk(KERN_DEBUG
"%s: hss_hdlc_rx_irq\n", dev
->name
);
647 qmgr_disable_irq(queue_ids
[port
->id
].rx
);
648 napi_schedule(&port
->napi
);
651 static int hss_hdlc_poll(struct napi_struct
*napi
, int budget
)
653 struct port
*port
= container_of(napi
, struct port
, napi
);
654 struct net_device
*dev
= port
->netdev
;
655 unsigned int rxq
= queue_ids
[port
->id
].rx
;
656 unsigned int rxfreeq
= queue_ids
[port
->id
].rxfree
;
660 printk(KERN_DEBUG
"%s: hss_hdlc_poll\n", dev
->name
);
663 while (received
< budget
) {
668 struct sk_buff
*temp
;
672 if ((n
= queue_get_desc(rxq
, port
, 0)) < 0) {
674 printk(KERN_DEBUG
"%s: hss_hdlc_poll"
675 " napi_complete\n", dev
->name
);
678 qmgr_enable_irq(rxq
);
679 if (!qmgr_stat_empty(rxq
) &&
680 napi_reschedule(napi
)) {
682 printk(KERN_DEBUG
"%s: hss_hdlc_poll"
683 " napi_reschedule succeeded\n",
686 qmgr_disable_irq(rxq
);
690 printk(KERN_DEBUG
"%s: hss_hdlc_poll all done\n",
693 return received
; /* all work done */
696 desc
= rx_desc_ptr(port
, n
);
697 #if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
698 if (desc
->error_count
)
699 printk(KERN_DEBUG
"%s: hss_hdlc_poll status 0x%02X"
700 " errors %u\n", dev
->name
, desc
->status
,
704 switch (desc
->status
) {
707 if ((skb
= netdev_alloc_skb(dev
, RX_SIZE
)) != NULL
) {
708 phys
= dma_map_single(&dev
->dev
, skb
->data
,
711 if (dma_mapping_error(&dev
->dev
, phys
)) {
717 skb
= netdev_alloc_skb(dev
, desc
->pkt_len
);
720 dev
->stats
.rx_dropped
++;
724 dev
->stats
.rx_frame_errors
++;
725 dev
->stats
.rx_errors
++;
728 dev
->stats
.rx_crc_errors
++;
729 dev
->stats
.rx_errors
++;
731 case ERR_HDLC_TOO_LONG
:
732 dev
->stats
.rx_length_errors
++;
733 dev
->stats
.rx_errors
++;
735 default: /* FIXME - remove printk */
736 netdev_err(dev
, "hss_hdlc_poll: status 0x%02X errors %u\n",
737 desc
->status
, desc
->error_count
);
738 dev
->stats
.rx_errors
++;
742 /* put the desc back on RX-ready queue */
743 desc
->buf_len
= RX_SIZE
;
744 desc
->pkt_len
= desc
->status
= 0;
745 queue_put_desc(rxfreeq
, rx_desc_phys(port
, n
), desc
);
749 /* process received frame */
752 skb
= port
->rx_buff_tab
[n
];
753 dma_unmap_single(&dev
->dev
, desc
->data
,
754 RX_SIZE
, DMA_FROM_DEVICE
);
756 dma_sync_single_for_cpu(&dev
->dev
, desc
->data
,
757 RX_SIZE
, DMA_FROM_DEVICE
);
758 memcpy_swab32((u32
*)skb
->data
, (u32
*)port
->rx_buff_tab
[n
],
759 ALIGN(desc
->pkt_len
, 4) / 4);
761 skb_put(skb
, desc
->pkt_len
);
763 debug_pkt(dev
, "hss_hdlc_poll", skb
->data
, skb
->len
);
765 skb
->protocol
= hdlc_type_trans(skb
, dev
);
766 dev
->stats
.rx_packets
++;
767 dev
->stats
.rx_bytes
+= skb
->len
;
768 netif_receive_skb(skb
);
770 /* put the new buffer on RX-free queue */
772 port
->rx_buff_tab
[n
] = temp
;
775 desc
->buf_len
= RX_SIZE
;
777 queue_put_desc(rxfreeq
, rx_desc_phys(port
, n
), desc
);
781 printk(KERN_DEBUG
"hss_hdlc_poll: end, not all work done\n");
783 return received
; /* not all work done */
787 static void hss_hdlc_txdone_irq(void *pdev
)
789 struct net_device
*dev
= pdev
;
790 struct port
*port
= dev_to_port(dev
);
794 printk(KERN_DEBUG DRV_NAME
": hss_hdlc_txdone_irq\n");
796 while ((n_desc
= queue_get_desc(queue_ids
[port
->id
].txdone
,
801 desc
= tx_desc_ptr(port
, n_desc
);
803 dev
->stats
.tx_packets
++;
804 dev
->stats
.tx_bytes
+= desc
->pkt_len
;
806 dma_unmap_tx(port
, desc
);
808 printk(KERN_DEBUG
"%s: hss_hdlc_txdone_irq free %p\n",
809 dev
->name
, port
->tx_buff_tab
[n_desc
]);
811 free_buffer_irq(port
->tx_buff_tab
[n_desc
]);
812 port
->tx_buff_tab
[n_desc
] = NULL
;
814 start
= qmgr_stat_below_low_watermark(port
->plat
->txreadyq
);
815 queue_put_desc(port
->plat
->txreadyq
,
816 tx_desc_phys(port
, n_desc
), desc
);
817 if (start
) { /* TX-ready queue was empty */
819 printk(KERN_DEBUG
"%s: hss_hdlc_txdone_irq xmit"
820 " ready\n", dev
->name
);
822 netif_wake_queue(dev
);
827 static int hss_hdlc_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
829 struct port
*port
= dev_to_port(dev
);
830 unsigned int txreadyq
= port
->plat
->txreadyq
;
831 int len
, offset
, bytes
, n
;
837 printk(KERN_DEBUG
"%s: hss_hdlc_xmit\n", dev
->name
);
840 if (unlikely(skb
->len
> HDLC_MAX_MRU
)) {
842 dev
->stats
.tx_errors
++;
846 debug_pkt(dev
, "hss_hdlc_xmit", skb
->data
, skb
->len
);
850 offset
= 0; /* no need to keep alignment */
854 offset
= (int)skb
->data
& 3; /* keep 32-bit alignment */
855 bytes
= ALIGN(offset
+ len
, 4);
856 if (!(mem
= kmalloc(bytes
, GFP_ATOMIC
))) {
858 dev
->stats
.tx_dropped
++;
861 memcpy_swab32(mem
, (u32
*)((int)skb
->data
& ~3), bytes
/ 4);
865 phys
= dma_map_single(&dev
->dev
, mem
, bytes
, DMA_TO_DEVICE
);
866 if (dma_mapping_error(&dev
->dev
, phys
)) {
872 dev
->stats
.tx_dropped
++;
876 n
= queue_get_desc(txreadyq
, port
, 1);
878 desc
= tx_desc_ptr(port
, n
);
881 port
->tx_buff_tab
[n
] = skb
;
883 port
->tx_buff_tab
[n
] = mem
;
885 desc
->data
= phys
+ offset
;
886 desc
->buf_len
= desc
->pkt_len
= len
;
889 queue_put_desc(queue_ids
[port
->id
].tx
, tx_desc_phys(port
, n
), desc
);
891 if (qmgr_stat_below_low_watermark(txreadyq
)) { /* empty */
893 printk(KERN_DEBUG
"%s: hss_hdlc_xmit queue full\n", dev
->name
);
895 netif_stop_queue(dev
);
896 /* we could miss TX ready interrupt */
897 if (!qmgr_stat_below_low_watermark(txreadyq
)) {
899 printk(KERN_DEBUG
"%s: hss_hdlc_xmit ready again\n",
902 netif_wake_queue(dev
);
907 printk(KERN_DEBUG
"%s: hss_hdlc_xmit end\n", dev
->name
);
913 static int request_hdlc_queues(struct port
*port
)
917 err
= qmgr_request_queue(queue_ids
[port
->id
].rxfree
, RX_DESCS
, 0, 0,
918 "%s:RX-free", port
->netdev
->name
);
922 err
= qmgr_request_queue(queue_ids
[port
->id
].rx
, RX_DESCS
, 0, 0,
923 "%s:RX", port
->netdev
->name
);
927 err
= qmgr_request_queue(queue_ids
[port
->id
].tx
, TX_DESCS
, 0, 0,
928 "%s:TX", port
->netdev
->name
);
932 err
= qmgr_request_queue(port
->plat
->txreadyq
, TX_DESCS
, 0, 0,
933 "%s:TX-ready", port
->netdev
->name
);
937 err
= qmgr_request_queue(queue_ids
[port
->id
].txdone
, TX_DESCS
, 0, 0,
938 "%s:TX-done", port
->netdev
->name
);
944 qmgr_release_queue(port
->plat
->txreadyq
);
946 qmgr_release_queue(queue_ids
[port
->id
].tx
);
948 qmgr_release_queue(queue_ids
[port
->id
].rx
);
950 qmgr_release_queue(queue_ids
[port
->id
].rxfree
);
951 printk(KERN_DEBUG
"%s: unable to request hardware queues\n",
956 static void release_hdlc_queues(struct port
*port
)
958 qmgr_release_queue(queue_ids
[port
->id
].rxfree
);
959 qmgr_release_queue(queue_ids
[port
->id
].rx
);
960 qmgr_release_queue(queue_ids
[port
->id
].txdone
);
961 qmgr_release_queue(queue_ids
[port
->id
].tx
);
962 qmgr_release_queue(port
->plat
->txreadyq
);
965 static int init_hdlc_queues(struct port
*port
)
970 dma_pool
= dma_pool_create(DRV_NAME
, &port
->netdev
->dev
,
971 POOL_ALLOC_SIZE
, 32, 0);
976 if (!(port
->desc_tab
= dma_pool_alloc(dma_pool
, GFP_KERNEL
,
977 &port
->desc_tab_phys
)))
979 memset(port
->desc_tab
, 0, POOL_ALLOC_SIZE
);
980 memset(port
->rx_buff_tab
, 0, sizeof(port
->rx_buff_tab
)); /* tables */
981 memset(port
->tx_buff_tab
, 0, sizeof(port
->tx_buff_tab
));
983 /* Setup RX buffers */
984 for (i
= 0; i
< RX_DESCS
; i
++) {
985 struct desc
*desc
= rx_desc_ptr(port
, i
);
989 if (!(buff
= netdev_alloc_skb(port
->netdev
, RX_SIZE
)))
993 if (!(buff
= kmalloc(RX_SIZE
, GFP_KERNEL
)))
997 desc
->buf_len
= RX_SIZE
;
998 desc
->data
= dma_map_single(&port
->netdev
->dev
, data
,
999 RX_SIZE
, DMA_FROM_DEVICE
);
1000 if (dma_mapping_error(&port
->netdev
->dev
, desc
->data
)) {
1004 port
->rx_buff_tab
[i
] = buff
;
1010 static void destroy_hdlc_queues(struct port
*port
)
1014 if (port
->desc_tab
) {
1015 for (i
= 0; i
< RX_DESCS
; i
++) {
1016 struct desc
*desc
= rx_desc_ptr(port
, i
);
1017 buffer_t
*buff
= port
->rx_buff_tab
[i
];
1019 dma_unmap_single(&port
->netdev
->dev
,
1020 desc
->data
, RX_SIZE
,
1025 for (i
= 0; i
< TX_DESCS
; i
++) {
1026 struct desc
*desc
= tx_desc_ptr(port
, i
);
1027 buffer_t
*buff
= port
->tx_buff_tab
[i
];
1029 dma_unmap_tx(port
, desc
);
1033 dma_pool_free(dma_pool
, port
->desc_tab
, port
->desc_tab_phys
);
1034 port
->desc_tab
= NULL
;
1037 if (!ports_open
&& dma_pool
) {
1038 dma_pool_destroy(dma_pool
);
1043 static int hss_hdlc_open(struct net_device
*dev
)
1045 struct port
*port
= dev_to_port(dev
);
1046 unsigned long flags
;
1049 if ((err
= hdlc_open(dev
)))
1052 if ((err
= hss_load_firmware(port
)))
1053 goto err_hdlc_close
;
1055 if ((err
= request_hdlc_queues(port
)))
1056 goto err_hdlc_close
;
1058 if ((err
= init_hdlc_queues(port
)))
1059 goto err_destroy_queues
;
1061 spin_lock_irqsave(&npe_lock
, flags
);
1062 if (port
->plat
->open
)
1063 if ((err
= port
->plat
->open(port
->id
, dev
,
1064 hss_hdlc_set_carrier
)))
1066 spin_unlock_irqrestore(&npe_lock
, flags
);
1068 /* Populate queues with buffers, no failure after this point */
1069 for (i
= 0; i
< TX_DESCS
; i
++)
1070 queue_put_desc(port
->plat
->txreadyq
,
1071 tx_desc_phys(port
, i
), tx_desc_ptr(port
, i
));
1073 for (i
= 0; i
< RX_DESCS
; i
++)
1074 queue_put_desc(queue_ids
[port
->id
].rxfree
,
1075 rx_desc_phys(port
, i
), rx_desc_ptr(port
, i
));
1077 napi_enable(&port
->napi
);
1078 netif_start_queue(dev
);
1080 qmgr_set_irq(queue_ids
[port
->id
].rx
, QUEUE_IRQ_SRC_NOT_EMPTY
,
1081 hss_hdlc_rx_irq
, dev
);
1083 qmgr_set_irq(queue_ids
[port
->id
].txdone
, QUEUE_IRQ_SRC_NOT_EMPTY
,
1084 hss_hdlc_txdone_irq
, dev
);
1085 qmgr_enable_irq(queue_ids
[port
->id
].txdone
);
1089 hss_set_hdlc_cfg(port
);
1092 hss_start_hdlc(port
);
1094 /* we may already have RX data, enables IRQ */
1095 napi_schedule(&port
->napi
);
1099 spin_unlock_irqrestore(&npe_lock
, flags
);
1101 destroy_hdlc_queues(port
);
1102 release_hdlc_queues(port
);
1108 static int hss_hdlc_close(struct net_device
*dev
)
1110 struct port
*port
= dev_to_port(dev
);
1111 unsigned long flags
;
1112 int i
, buffs
= RX_DESCS
; /* allocated RX buffers */
1114 spin_lock_irqsave(&npe_lock
, flags
);
1116 qmgr_disable_irq(queue_ids
[port
->id
].rx
);
1117 netif_stop_queue(dev
);
1118 napi_disable(&port
->napi
);
1120 hss_stop_hdlc(port
);
1122 while (queue_get_desc(queue_ids
[port
->id
].rxfree
, port
, 0) >= 0)
1124 while (queue_get_desc(queue_ids
[port
->id
].rx
, port
, 0) >= 0)
1128 netdev_crit(dev
, "unable to drain RX queue, %i buffer(s) left in NPE\n",
1132 while (queue_get_desc(queue_ids
[port
->id
].tx
, port
, 1) >= 0)
1133 buffs
--; /* cancel TX */
1137 while (queue_get_desc(port
->plat
->txreadyq
, port
, 1) >= 0)
1141 } while (++i
< MAX_CLOSE_WAIT
);
1144 netdev_crit(dev
, "unable to drain TX queue, %i buffer(s) left in NPE\n",
1148 printk(KERN_DEBUG
"Draining TX queues took %i cycles\n", i
);
1150 qmgr_disable_irq(queue_ids
[port
->id
].txdone
);
1152 if (port
->plat
->close
)
1153 port
->plat
->close(port
->id
, dev
);
1154 spin_unlock_irqrestore(&npe_lock
, flags
);
1156 destroy_hdlc_queues(port
);
1157 release_hdlc_queues(port
);
1163 static int hss_hdlc_attach(struct net_device
*dev
, unsigned short encoding
,
1164 unsigned short parity
)
1166 struct port
*port
= dev_to_port(dev
);
1168 if (encoding
!= ENCODING_NRZ
)
1172 case PARITY_CRC16_PR1_CCITT
:
1176 case PARITY_CRC32_PR1_CCITT
:
1177 port
->hdlc_cfg
= PKT_HDLC_CRC_32
;
1185 static u32
check_clock(u32 rate
, u32 a
, u32 b
, u32 c
,
1186 u32
*best
, u32
*best_diff
, u32
*reg
)
1188 /* a is 10-bit, b is 10-bit, c is 12-bit */
1192 new_rate
= ixp4xx_timer_freq
* (u64
)(c
+ 1);
1193 do_div(new_rate
, a
* (c
+ 1) + b
+ 1);
1194 new_diff
= abs((u32
)new_rate
- rate
);
1196 if (new_diff
< *best_diff
) {
1198 *best_diff
= new_diff
;
1199 *reg
= (a
<< 22) | (b
<< 12) | c
;
1204 static void find_best_clock(u32 rate
, u32
*best
, u32
*reg
)
1206 u32 a
, b
, diff
= 0xFFFFFFFF;
1208 a
= ixp4xx_timer_freq
/ rate
;
1210 if (a
> 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
1211 check_clock(rate
, 0x3FF, 1, 1, best
, &diff
, reg
);
1214 if (a
== 0) { /* > 66.666 MHz */
1215 a
= 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
1216 rate
= ixp4xx_timer_freq
;
1219 if (rate
* a
== ixp4xx_timer_freq
) { /* don't divide by 0 later */
1220 check_clock(rate
, a
- 1, 1, 1, best
, &diff
, reg
);
1224 for (b
= 0; b
< 0x400; b
++) {
1225 u64 c
= (b
+ 1) * (u64
)rate
;
1226 do_div(c
, ixp4xx_timer_freq
- rate
* a
);
1228 if (c
>= 0xFFF) { /* 12-bit - no need to check more 'b's */
1229 if (b
== 0 && /* also try a bit higher rate */
1230 !check_clock(rate
, a
- 1, 1, 1, best
, &diff
, reg
))
1232 check_clock(rate
, a
, b
, 0xFFF, best
, &diff
, reg
);
1235 if (!check_clock(rate
, a
, b
, c
, best
, &diff
, reg
))
1237 if (!check_clock(rate
, a
, b
, c
+ 1, best
, &diff
, reg
))
1242 static int hss_hdlc_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1244 const size_t size
= sizeof(sync_serial_settings
);
1245 sync_serial_settings new_line
;
1246 sync_serial_settings __user
*line
= ifr
->ifr_settings
.ifs_ifsu
.sync
;
1247 struct port
*port
= dev_to_port(dev
);
1248 unsigned long flags
;
1251 if (cmd
!= SIOCWANDEV
)
1252 return hdlc_ioctl(dev
, ifr
, cmd
);
1254 switch(ifr
->ifr_settings
.type
) {
1256 ifr
->ifr_settings
.type
= IF_IFACE_V35
;
1257 if (ifr
->ifr_settings
.size
< size
) {
1258 ifr
->ifr_settings
.size
= size
; /* data size wanted */
1261 memset(&new_line
, 0, sizeof(new_line
));
1262 new_line
.clock_type
= port
->clock_type
;
1263 new_line
.clock_rate
= port
->clock_rate
;
1264 new_line
.loopback
= port
->loopback
;
1265 if (copy_to_user(line
, &new_line
, size
))
1269 case IF_IFACE_SYNC_SERIAL
:
1271 if(!capable(CAP_NET_ADMIN
))
1273 if (copy_from_user(&new_line
, line
, size
))
1276 clk
= new_line
.clock_type
;
1277 if (port
->plat
->set_clock
)
1278 clk
= port
->plat
->set_clock(port
->id
, clk
);
1280 if (clk
!= CLOCK_EXT
&& clk
!= CLOCK_INT
)
1281 return -EINVAL
; /* No such clock setting */
1283 if (new_line
.loopback
!= 0 && new_line
.loopback
!= 1)
1286 port
->clock_type
= clk
; /* Update settings */
1287 if (clk
== CLOCK_INT
)
1288 find_best_clock(new_line
.clock_rate
, &port
->clock_rate
,
1291 port
->clock_rate
= 0;
1292 port
->clock_reg
= CLK42X_SPEED_2048KHZ
;
1294 port
->loopback
= new_line
.loopback
;
1296 spin_lock_irqsave(&npe_lock
, flags
);
1298 if (dev
->flags
& IFF_UP
)
1301 if (port
->loopback
|| port
->carrier
)
1302 netif_carrier_on(port
->netdev
);
1304 netif_carrier_off(port
->netdev
);
1305 spin_unlock_irqrestore(&npe_lock
, flags
);
1310 return hdlc_ioctl(dev
, ifr
, cmd
);
1314 /*****************************************************************************
1316 ****************************************************************************/
1318 static const struct net_device_ops hss_hdlc_ops
= {
1319 .ndo_open
= hss_hdlc_open
,
1320 .ndo_stop
= hss_hdlc_close
,
1321 .ndo_start_xmit
= hdlc_start_xmit
,
1322 .ndo_do_ioctl
= hss_hdlc_ioctl
,
1325 static int hss_init_one(struct platform_device
*pdev
)
1328 struct net_device
*dev
;
1332 if ((port
= kzalloc(sizeof(*port
), GFP_KERNEL
)) == NULL
)
1335 if ((port
->npe
= npe_request(0)) == NULL
) {
1340 if ((port
->netdev
= dev
= alloc_hdlcdev(port
)) == NULL
) {
1345 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1346 hdlc
= dev_to_hdlc(dev
);
1347 hdlc
->attach
= hss_hdlc_attach
;
1348 hdlc
->xmit
= hss_hdlc_xmit
;
1349 dev
->netdev_ops
= &hss_hdlc_ops
;
1350 dev
->tx_queue_len
= 100;
1351 port
->clock_type
= CLOCK_EXT
;
1352 port
->clock_rate
= 0;
1353 port
->clock_reg
= CLK42X_SPEED_2048KHZ
;
1354 port
->id
= pdev
->id
;
1355 port
->dev
= &pdev
->dev
;
1356 port
->plat
= pdev
->dev
.platform_data
;
1357 netif_napi_add(dev
, &port
->napi
, hss_hdlc_poll
, NAPI_WEIGHT
);
1359 if ((err
= register_hdlc_device(dev
)))
1360 goto err_free_netdev
;
1362 platform_set_drvdata(pdev
, port
);
1364 netdev_info(dev
, "initialized\n");
1370 npe_release(port
->npe
);
1376 static int hss_remove_one(struct platform_device
*pdev
)
1378 struct port
*port
= platform_get_drvdata(pdev
);
1380 unregister_hdlc_device(port
->netdev
);
1381 free_netdev(port
->netdev
);
1382 npe_release(port
->npe
);
1387 static struct platform_driver ixp4xx_hss_driver
= {
1388 .driver
.name
= DRV_NAME
,
1389 .probe
= hss_init_one
,
1390 .remove
= hss_remove_one
,
1393 static int __init
hss_init_module(void)
1395 if ((ixp4xx_read_feature_bits() &
1396 (IXP4XX_FEATURE_HDLC
| IXP4XX_FEATURE_HSS
)) !=
1397 (IXP4XX_FEATURE_HDLC
| IXP4XX_FEATURE_HSS
))
1400 spin_lock_init(&npe_lock
);
1402 return platform_driver_register(&ixp4xx_hss_driver
);
1405 static void __exit
hss_cleanup_module(void)
1407 platform_driver_unregister(&ixp4xx_hss_driver
);
1410 MODULE_AUTHOR("Krzysztof Halasa");
1411 MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
1412 MODULE_LICENSE("GPL v2");
1413 MODULE_ALIAS("platform:ixp4xx_hss");
1414 module_init(hss_init_module
);
1415 module_exit(hss_cleanup_module
);