2 * Driver for PLX NET2272 USB device controller
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/gpio.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/kernel.h>
31 #include <linux/list.h>
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <linux/prefetch.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/timer.h>
40 #include <linux/usb.h>
41 #include <linux/usb/ch9.h>
42 #include <linux/usb/gadget.h>
44 #include <asm/byteorder.h>
45 #include <asm/system.h>
46 #include <asm/unaligned.h>
50 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
52 static const char driver_name
[] = "net2272";
53 static const char driver_vers
[] = "2006 October 17/mainline";
54 static const char driver_desc
[] = DRIVER_DESC
;
56 static const char ep0name
[] = "ep0";
57 static const char * const ep_name
[] = {
59 "ep-a", "ep-b", "ep-c",
62 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
63 #ifdef CONFIG_USB_GADGET_NET2272_DMA
65 * use_dma: the NET2272 can use an external DMA controller.
66 * Note that since there is no generic DMA api, some functions,
67 * notably request_dma, start_dma, and cancel_dma will need to be
68 * modified for your platform's particular dma controller.
70 * If use_dma is disabled, pio will be used instead.
72 static int use_dma
= 0;
73 module_param(use_dma
, bool, 0644);
76 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
77 * The NET2272 can only use dma for a single endpoint at a time.
78 * At some point this could be modified to allow either endpoint
79 * to take control of dma as it becomes available.
81 * Note that DMA should not be used on OUT endpoints unless it can
82 * be guaranteed that no short packets will arrive on an IN endpoint
83 * while the DMA operation is pending. Otherwise the OUT DMA will
84 * terminate prematurely (See NET2272 Errata 630-0213-0101)
86 static ushort dma_ep
= 1;
87 module_param(dma_ep
, ushort
, 0644);
90 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
91 * mode 0 == Slow DREQ mode
92 * mode 1 == Fast DREQ mode
93 * mode 2 == Burst mode
95 static ushort dma_mode
= 2;
96 module_param(dma_mode
, ushort
, 0644);
104 * fifo_mode: net2272 buffer configuration:
105 * mode 0 == ep-{a,b,c} 512db each
106 * mode 1 == ep-a 1k, ep-{b,c} 512db
107 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
108 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
110 static ushort fifo_mode
= 0;
111 module_param(fifo_mode
, ushort
, 0644);
114 * enable_suspend: When enabled, the driver will respond to
115 * USB suspend requests by powering down the NET2272. Otherwise,
116 * USB suspend requests will be ignored. This is acceptible for
117 * self-powered devices. For bus powered devices set this to 1.
119 static ushort enable_suspend
= 0;
120 module_param(enable_suspend
, ushort
, 0644);
122 static void assert_out_naking(struct net2272_ep
*ep
, const char *where
)
130 tmp
= net2272_ep_read(ep
, EP_STAT0
);
131 if ((tmp
& (1 << NAK_OUT_PACKETS
)) == 0) {
132 dev_dbg(ep
->dev
->dev
, "%s %s %02x !NAK\n",
133 ep
->ep
.name
, where
, tmp
);
134 net2272_ep_write(ep
, EP_RSPSET
, 1 << ALT_NAK_OUT_PACKETS
);
137 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
139 static void stop_out_naking(struct net2272_ep
*ep
)
141 u8 tmp
= net2272_ep_read(ep
, EP_STAT0
);
143 if ((tmp
& (1 << NAK_OUT_PACKETS
)) != 0)
144 net2272_ep_write(ep
, EP_RSPCLR
, 1 << ALT_NAK_OUT_PACKETS
);
147 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
149 static char *type_string(u8 bmAttributes
)
151 switch ((bmAttributes
) & USB_ENDPOINT_XFERTYPE_MASK
) {
152 case USB_ENDPOINT_XFER_BULK
: return "bulk";
153 case USB_ENDPOINT_XFER_ISOC
: return "iso";
154 case USB_ENDPOINT_XFER_INT
: return "intr";
155 default: return "control";
159 static char *buf_state_string(unsigned state
)
162 case BUFF_FREE
: return "free";
163 case BUFF_VALID
: return "valid";
164 case BUFF_LCL
: return "local";
165 case BUFF_USB
: return "usb";
166 default: return "unknown";
170 static char *dma_mode_string(void)
175 case 0: return "SLOW DREQ";
176 case 1: return "FAST DREQ";
177 case 2: return "BURST";
178 default: return "invalid";
182 static void net2272_dequeue_all(struct net2272_ep
*);
183 static int net2272_kick_dma(struct net2272_ep
*, struct net2272_request
*);
184 static int net2272_fifo_status(struct usb_ep
*);
186 static struct usb_ep_ops net2272_ep_ops
;
188 /*---------------------------------------------------------------------------*/
191 net2272_enable(struct usb_ep
*_ep
, const struct usb_endpoint_descriptor
*desc
)
194 struct net2272_ep
*ep
;
199 ep
= container_of(_ep
, struct net2272_ep
, ep
);
200 if (!_ep
|| !desc
|| ep
->desc
|| _ep
->name
== ep0name
201 || desc
->bDescriptorType
!= USB_DT_ENDPOINT
)
204 if (!dev
->driver
|| dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
207 max
= usb_endpoint_maxp(desc
) & 0x1fff;
209 spin_lock_irqsave(&dev
->lock
, flags
);
210 _ep
->maxpacket
= max
& 0x7fff;
213 /* net2272_ep_reset() has already been called */
217 /* set speed-dependent max packet */
218 net2272_ep_write(ep
, EP_MAXPKT0
, max
& 0xff);
219 net2272_ep_write(ep
, EP_MAXPKT1
, (max
& 0xff00) >> 8);
221 /* set type, direction, address; reset fifo counters */
222 net2272_ep_write(ep
, EP_STAT1
, 1 << BUFFER_FLUSH
);
223 tmp
= usb_endpoint_type(desc
);
224 if (usb_endpoint_xfer_bulk(desc
)) {
225 /* catch some particularly blatant driver bugs */
226 if ((dev
->gadget
.speed
== USB_SPEED_HIGH
&& max
!= 512) ||
227 (dev
->gadget
.speed
== USB_SPEED_FULL
&& max
> 64)) {
228 spin_unlock_irqrestore(&dev
->lock
, flags
);
232 ep
->is_iso
= usb_endpoint_xfer_isoc(desc
) ? 1 : 0;
233 tmp
<<= ENDPOINT_TYPE
;
234 tmp
|= ((desc
->bEndpointAddress
& 0x0f) << ENDPOINT_NUMBER
);
235 tmp
|= usb_endpoint_dir_in(desc
) << ENDPOINT_DIRECTION
;
236 tmp
|= (1 << ENDPOINT_ENABLE
);
238 /* for OUT transfers, block the rx fifo until a read is posted */
239 ep
->is_in
= usb_endpoint_dir_in(desc
);
241 net2272_ep_write(ep
, EP_RSPSET
, 1 << ALT_NAK_OUT_PACKETS
);
243 net2272_ep_write(ep
, EP_CFG
, tmp
);
246 tmp
= (1 << ep
->num
) | net2272_read(dev
, IRQENB0
);
247 net2272_write(dev
, IRQENB0
, tmp
);
249 tmp
= (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE
)
250 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE
)
251 | net2272_ep_read(ep
, EP_IRQENB
);
252 net2272_ep_write(ep
, EP_IRQENB
, tmp
);
254 tmp
= desc
->bEndpointAddress
;
255 dev_dbg(dev
->dev
, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
256 _ep
->name
, tmp
& 0x0f, PIPEDIR(tmp
),
257 type_string(desc
->bmAttributes
), max
,
258 net2272_ep_read(ep
, EP_CFG
));
260 spin_unlock_irqrestore(&dev
->lock
, flags
);
264 static void net2272_ep_reset(struct net2272_ep
*ep
)
269 INIT_LIST_HEAD(&ep
->queue
);
271 ep
->ep
.maxpacket
= ~0;
272 ep
->ep
.ops
= &net2272_ep_ops
;
274 /* disable irqs, endpoint */
275 net2272_ep_write(ep
, EP_IRQENB
, 0);
277 /* init to our chosen defaults, notably so that we NAK OUT
278 * packets until the driver queues a read.
280 tmp
= (1 << NAK_OUT_PACKETS_MODE
) | (1 << ALT_NAK_OUT_PACKETS
);
281 net2272_ep_write(ep
, EP_RSPSET
, tmp
);
283 tmp
= (1 << INTERRUPT_MODE
) | (1 << HIDE_STATUS_PHASE
);
285 tmp
|= (1 << ENDPOINT_TOGGLE
) | (1 << ENDPOINT_HALT
);
287 net2272_ep_write(ep
, EP_RSPCLR
, tmp
);
289 /* scrub most status bits, and flush any fifo state */
290 net2272_ep_write(ep
, EP_STAT0
,
291 (1 << DATA_IN_TOKEN_INTERRUPT
)
292 | (1 << DATA_OUT_TOKEN_INTERRUPT
)
293 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
)
294 | (1 << DATA_PACKET_RECEIVED_INTERRUPT
)
295 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
));
297 net2272_ep_write(ep
, EP_STAT1
,
299 | (1 << USB_OUT_ACK_SENT
)
300 | (1 << USB_OUT_NAK_SENT
)
301 | (1 << USB_IN_ACK_RCVD
)
302 | (1 << USB_IN_NAK_SENT
)
303 | (1 << USB_STALL_SENT
)
304 | (1 << LOCAL_OUT_ZLP
)
305 | (1 << BUFFER_FLUSH
));
307 /* fifo size is handled seperately */
310 static int net2272_disable(struct usb_ep
*_ep
)
312 struct net2272_ep
*ep
;
315 ep
= container_of(_ep
, struct net2272_ep
, ep
);
316 if (!_ep
|| !ep
->desc
|| _ep
->name
== ep0name
)
319 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
320 net2272_dequeue_all(ep
);
321 net2272_ep_reset(ep
);
323 dev_vdbg(ep
->dev
->dev
, "disabled %s\n", _ep
->name
);
325 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
329 /*---------------------------------------------------------------------------*/
331 static struct usb_request
*
332 net2272_alloc_request(struct usb_ep
*_ep
, gfp_t gfp_flags
)
334 struct net2272_ep
*ep
;
335 struct net2272_request
*req
;
339 ep
= container_of(_ep
, struct net2272_ep
, ep
);
341 req
= kzalloc(sizeof(*req
), gfp_flags
);
345 req
->req
.dma
= DMA_ADDR_INVALID
;
346 INIT_LIST_HEAD(&req
->queue
);
352 net2272_free_request(struct usb_ep
*_ep
, struct usb_request
*_req
)
354 struct net2272_ep
*ep
;
355 struct net2272_request
*req
;
357 ep
= container_of(_ep
, struct net2272_ep
, ep
);
361 req
= container_of(_req
, struct net2272_request
, req
);
362 WARN_ON(!list_empty(&req
->queue
));
367 net2272_done(struct net2272_ep
*ep
, struct net2272_request
*req
, int status
)
370 unsigned stopped
= ep
->stopped
;
373 if (ep
->dev
->protocol_stall
) {
380 list_del_init(&req
->queue
);
382 if (req
->req
.status
== -EINPROGRESS
)
383 req
->req
.status
= status
;
385 status
= req
->req
.status
;
388 if (use_dma
&& req
->mapped
) {
389 dma_unmap_single(dev
->dev
, req
->req
.dma
, req
->req
.length
,
390 ep
->is_in
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
391 req
->req
.dma
= DMA_ADDR_INVALID
;
395 if (status
&& status
!= -ESHUTDOWN
)
396 dev_vdbg(dev
->dev
, "complete %s req %p stat %d len %u/%u buf %p\n",
397 ep
->ep
.name
, &req
->req
, status
,
398 req
->req
.actual
, req
->req
.length
, req
->req
.buf
);
400 /* don't modify queue heads during completion callback */
402 spin_unlock(&dev
->lock
);
403 req
->req
.complete(&ep
->ep
, &req
->req
);
404 spin_lock(&dev
->lock
);
405 ep
->stopped
= stopped
;
409 net2272_write_packet(struct net2272_ep
*ep
, u8
*buf
,
410 struct net2272_request
*req
, unsigned max
)
412 u16 __iomem
*ep_data
= net2272_reg_addr(ep
->dev
, EP_DATA
);
414 unsigned length
, count
;
417 length
= min(req
->req
.length
- req
->req
.actual
, max
);
418 req
->req
.actual
+= length
;
420 dev_vdbg(ep
->dev
->dev
, "write packet %s req %p max %u len %u avail %u\n",
421 ep
->ep
.name
, req
, max
, length
,
422 (net2272_ep_read(ep
, EP_AVAIL1
) << 8) | net2272_ep_read(ep
, EP_AVAIL0
));
427 while (likely(count
>= 2)) {
428 /* no byte-swap required; chip endian set during init */
429 writew(*bufp
++, ep_data
);
434 /* write final byte by placing the NET2272 into 8-bit mode */
435 if (unlikely(count
)) {
436 tmp
= net2272_read(ep
->dev
, LOCCTL
);
437 net2272_write(ep
->dev
, LOCCTL
, tmp
& ~(1 << DATA_WIDTH
));
438 writeb(*buf
, ep_data
);
439 net2272_write(ep
->dev
, LOCCTL
, tmp
);
444 /* returns: 0: still running, 1: completed, negative: errno */
446 net2272_write_fifo(struct net2272_ep
*ep
, struct net2272_request
*req
)
452 dev_vdbg(ep
->dev
->dev
, "write_fifo %s actual %d len %d\n",
453 ep
->ep
.name
, req
->req
.actual
, req
->req
.length
);
456 * Keep loading the endpoint until the final packet is loaded,
457 * or the endpoint buffer is full.
461 * Clear interrupt status
462 * - Packet Transmitted interrupt will become set again when the
463 * host successfully takes another packet
465 net2272_ep_write(ep
, EP_STAT0
, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
));
466 while (!(net2272_ep_read(ep
, EP_STAT0
) & (1 << BUFFER_FULL
))) {
467 buf
= req
->req
.buf
+ req
->req
.actual
;
471 net2272_ep_read(ep
, EP_STAT0
);
473 max
= (net2272_ep_read(ep
, EP_AVAIL1
) << 8) |
474 (net2272_ep_read(ep
, EP_AVAIL0
));
476 if (max
< ep
->ep
.maxpacket
)
477 max
= (net2272_ep_read(ep
, EP_AVAIL1
) << 8)
478 | (net2272_ep_read(ep
, EP_AVAIL0
));
480 count
= net2272_write_packet(ep
, buf
, req
, max
);
481 /* see if we are done */
482 if (req
->req
.length
== req
->req
.actual
) {
483 /* validate short or zlp packet */
484 if (count
< ep
->ep
.maxpacket
)
485 set_fifo_bytecount(ep
, 0);
486 net2272_done(ep
, req
, 0);
488 if (!list_empty(&ep
->queue
)) {
489 req
= list_entry(ep
->queue
.next
,
490 struct net2272_request
,
492 status
= net2272_kick_dma(ep
, req
);
495 if ((net2272_ep_read(ep
, EP_STAT0
)
496 & (1 << BUFFER_EMPTY
)))
501 net2272_ep_write(ep
, EP_STAT0
, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
));
507 net2272_out_flush(struct net2272_ep
*ep
)
509 ASSERT_OUT_NAKING(ep
);
511 net2272_ep_write(ep
, EP_STAT0
, (1 << DATA_OUT_TOKEN_INTERRUPT
)
512 | (1 << DATA_PACKET_RECEIVED_INTERRUPT
));
513 net2272_ep_write(ep
, EP_STAT1
, 1 << BUFFER_FLUSH
);
517 net2272_read_packet(struct net2272_ep
*ep
, u8
*buf
,
518 struct net2272_request
*req
, unsigned avail
)
520 u16 __iomem
*ep_data
= net2272_reg_addr(ep
->dev
, EP_DATA
);
524 req
->req
.actual
+= avail
;
526 dev_vdbg(ep
->dev
->dev
, "read packet %s req %p len %u avail %u\n",
527 ep
->ep
.name
, req
, avail
,
528 (net2272_ep_read(ep
, EP_AVAIL1
) << 8) | net2272_ep_read(ep
, EP_AVAIL0
));
530 is_short
= (avail
< ep
->ep
.maxpacket
);
532 if (unlikely(avail
== 0)) {
533 /* remove any zlp from the buffer */
534 (void)readw(ep_data
);
538 /* Ensure we get the final byte */
539 if (unlikely(avail
% 2))
544 *bufp
++ = readw(ep_data
);
549 * To avoid false endpoint available race condition must read
550 * ep stat0 twice in the case of a short transfer
552 if (net2272_ep_read(ep
, EP_STAT0
) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
))
553 net2272_ep_read(ep
, EP_STAT0
);
559 net2272_read_fifo(struct net2272_ep
*ep
, struct net2272_request
*req
)
568 dev_vdbg(ep
->dev
->dev
, "read_fifo %s actual %d len %d\n",
569 ep
->ep
.name
, req
->req
.actual
, req
->req
.length
);
573 buf
= req
->req
.buf
+ req
->req
.actual
;
576 count
= (net2272_ep_read(ep
, EP_AVAIL1
) << 8)
577 | net2272_ep_read(ep
, EP_AVAIL0
);
579 net2272_ep_write(ep
, EP_STAT0
,
580 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
) |
581 (1 << DATA_PACKET_RECEIVED_INTERRUPT
));
583 tmp
= req
->req
.length
- req
->req
.actual
;
586 if ((tmp
% ep
->ep
.maxpacket
) != 0) {
587 dev_err(ep
->dev
->dev
,
588 "%s out fifo %d bytes, expected %d\n",
589 ep
->ep
.name
, count
, tmp
);
592 count
= (tmp
> 0) ? tmp
: 0;
595 is_short
= net2272_read_packet(ep
, buf
, req
, count
);
598 if (unlikely(cleanup
|| is_short
||
599 ((req
->req
.actual
== req
->req
.length
)
600 && !req
->req
.zero
))) {
603 net2272_out_flush(ep
);
604 net2272_done(ep
, req
, -EOVERFLOW
);
606 net2272_done(ep
, req
, 0);
608 /* re-initialize endpoint transfer registers
609 * otherwise they may result in erroneous pre-validation
610 * for subsequent control reads
612 if (unlikely(ep
->num
== 0)) {
613 net2272_ep_write(ep
, EP_TRANSFER2
, 0);
614 net2272_ep_write(ep
, EP_TRANSFER1
, 0);
615 net2272_ep_write(ep
, EP_TRANSFER0
, 0);
618 if (!list_empty(&ep
->queue
)) {
619 req
= list_entry(ep
->queue
.next
,
620 struct net2272_request
, queue
);
621 status
= net2272_kick_dma(ep
, req
);
623 !(net2272_ep_read(ep
, EP_STAT0
) & (1 << BUFFER_EMPTY
)))
628 } while (!(net2272_ep_read(ep
, EP_STAT0
) & (1 << BUFFER_EMPTY
)));
634 net2272_pio_advance(struct net2272_ep
*ep
)
636 struct net2272_request
*req
;
638 if (unlikely(list_empty(&ep
->queue
)))
641 req
= list_entry(ep
->queue
.next
, struct net2272_request
, queue
);
642 (ep
->is_in
? net2272_write_fifo
: net2272_read_fifo
)(ep
, req
);
645 /* returns 0 on success, else negative errno */
647 net2272_request_dma(struct net2272
*dev
, unsigned ep
, u32 buf
,
648 unsigned len
, unsigned dir
)
650 dev_vdbg(dev
->dev
, "request_dma ep %d buf %08x len %d dir %d\n",
653 /* The NET2272 only supports a single dma channel */
657 * EP_TRANSFER (used to determine the number of bytes received
658 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
660 if ((dir
== 1) && (len
> 0x1000000))
665 /* initialize platform's dma */
667 /* NET2272 addr, buffer addr, length, etc. */
668 switch (dev
->dev_id
) {
669 case PCI_DEVICE_ID_RDK1
:
670 /* Setup PLX 9054 DMA mode */
671 writel((1 << LOCAL_BUS_WIDTH
) |
672 (1 << TA_READY_INPUT_ENABLE
) |
673 (0 << LOCAL_BURST_ENABLE
) |
674 (1 << DONE_INTERRUPT_ENABLE
) |
675 (1 << LOCAL_ADDRESSING_MODE
) |
677 (1 << DMA_EOT_ENABLE
) |
678 (1 << FAST_SLOW_TERMINATE_MODE_SELECT
) |
679 (1 << DMA_CHANNEL_INTERRUPT_SELECT
),
680 dev
->rdk1
.plx9054_base_addr
+ DMAMODE0
);
682 writel(0x100000, dev
->rdk1
.plx9054_base_addr
+ DMALADR0
);
683 writel(buf
, dev
->rdk1
.plx9054_base_addr
+ DMAPADR0
);
684 writel(len
, dev
->rdk1
.plx9054_base_addr
+ DMASIZ0
);
685 writel((dir
<< DIRECTION_OF_TRANSFER
) |
686 (1 << INTERRUPT_AFTER_TERMINAL_COUNT
),
687 dev
->rdk1
.plx9054_base_addr
+ DMADPR0
);
688 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE
) |
689 readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
),
690 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
696 net2272_write(dev
, DMAREQ
,
697 (0 << DMA_BUFFER_VALID
) |
698 (1 << DMA_REQUEST_ENABLE
) |
699 (1 << DMA_CONTROL_DACK
) |
700 (dev
->dma_eot_polarity
<< EOT_POLARITY
) |
701 (dev
->dma_dack_polarity
<< DACK_POLARITY
) |
702 (dev
->dma_dreq_polarity
<< DREQ_POLARITY
) |
703 ((ep
>> 1) << DMA_ENDPOINT_SELECT
));
705 (void) net2272_read(dev
, SCRATCH
);
711 net2272_start_dma(struct net2272
*dev
)
713 /* start platform's dma controller */
715 switch (dev
->dev_id
) {
716 case PCI_DEVICE_ID_RDK1
:
717 writeb((1 << CHANNEL_ENABLE
) | (1 << CHANNEL_START
),
718 dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
724 /* returns 0 on success, else negative errno */
726 net2272_kick_dma(struct net2272_ep
*ep
, struct net2272_request
*req
)
731 if (!use_dma
|| (ep
->num
< 1) || (ep
->num
> 2) || !ep
->dma
)
734 /* don't use dma for odd-length transfers
735 * otherwise, we'd need to deal with the last byte with pio
737 if (req
->req
.length
& 1)
740 dev_vdbg(ep
->dev
->dev
, "kick_dma %s req %p dma %08llx\n",
741 ep
->ep
.name
, req
, (unsigned long long) req
->req
.dma
);
743 net2272_ep_write(ep
, EP_RSPSET
, 1 << ALT_NAK_OUT_PACKETS
);
745 /* The NET2272 can only use DMA on one endpoint at a time */
746 if (ep
->dev
->dma_busy
)
749 /* Make sure we only DMA an even number of bytes (we'll use
750 * pio to complete the transfer)
752 size
= req
->req
.length
;
755 /* device-to-host transfer */
757 /* initialize platform's dma controller */
758 if (net2272_request_dma(ep
->dev
, ep
->num
, req
->req
.dma
, size
, 0))
759 /* unable to obtain DMA channel; return error and use pio mode */
761 req
->req
.actual
+= size
;
763 /* host-to-device transfer */
765 tmp
= net2272_ep_read(ep
, EP_STAT0
);
767 /* initialize platform's dma controller */
768 if (net2272_request_dma(ep
->dev
, ep
->num
, req
->req
.dma
, size
, 1))
769 /* unable to obtain DMA channel; return error and use pio mode */
772 if (!(tmp
& (1 << BUFFER_EMPTY
)))
778 /* allow the endpoint's buffer to fill */
779 net2272_ep_write(ep
, EP_RSPCLR
, 1 << ALT_NAK_OUT_PACKETS
);
781 /* this transfer completed and data's already in the fifo
782 * return error so pio gets used.
784 if (tmp
& (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
)) {
787 net2272_write(ep
->dev
, DMAREQ
,
788 (0 << DMA_BUFFER_VALID
) |
789 (0 << DMA_REQUEST_ENABLE
) |
790 (1 << DMA_CONTROL_DACK
) |
791 (ep
->dev
->dma_eot_polarity
<< EOT_POLARITY
) |
792 (ep
->dev
->dma_dack_polarity
<< DACK_POLARITY
) |
793 (ep
->dev
->dma_dreq_polarity
<< DREQ_POLARITY
) |
794 ((ep
->num
>> 1) << DMA_ENDPOINT_SELECT
));
800 /* Don't use per-packet interrupts: use dma interrupts only */
801 net2272_ep_write(ep
, EP_IRQENB
, 0);
803 net2272_start_dma(ep
->dev
);
808 static void net2272_cancel_dma(struct net2272
*dev
)
811 switch (dev
->dev_id
) {
812 case PCI_DEVICE_ID_RDK1
:
813 writeb(0, dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
814 writeb(1 << CHANNEL_ABORT
, dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
815 while (!(readb(dev
->rdk1
.plx9054_base_addr
+ DMACSR0
) &
816 (1 << CHANNEL_DONE
)))
817 continue; /* wait for dma to stabalize */
819 /* dma abort generates an interrupt */
820 writeb(1 << CHANNEL_CLEAR_INTERRUPT
,
821 dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
829 /*---------------------------------------------------------------------------*/
832 net2272_queue(struct usb_ep
*_ep
, struct usb_request
*_req
, gfp_t gfp_flags
)
834 struct net2272_request
*req
;
835 struct net2272_ep
*ep
;
841 req
= container_of(_req
, struct net2272_request
, req
);
842 if (!_req
|| !_req
->complete
|| !_req
->buf
843 || !list_empty(&req
->queue
))
845 ep
= container_of(_ep
, struct net2272_ep
, ep
);
846 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0))
849 if (!dev
->driver
|| dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
852 /* set up dma mapping in case the caller didn't */
853 if (use_dma
&& ep
->dma
&& _req
->dma
== DMA_ADDR_INVALID
) {
854 _req
->dma
= dma_map_single(dev
->dev
, _req
->buf
, _req
->length
,
855 ep
->is_in
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
859 dev_vdbg(dev
->dev
, "%s queue req %p, len %d buf %p dma %08llx %s\n",
860 _ep
->name
, _req
, _req
->length
, _req
->buf
,
861 (unsigned long long) _req
->dma
, _req
->zero
? "zero" : "!zero");
863 spin_lock_irqsave(&dev
->lock
, flags
);
865 _req
->status
= -EINPROGRESS
;
868 /* kickstart this i/o queue? */
869 if (list_empty(&ep
->queue
) && !ep
->stopped
) {
870 /* maybe there's no control data, just status ack */
871 if (ep
->num
== 0 && _req
->length
== 0) {
872 net2272_done(ep
, req
, 0);
873 dev_vdbg(dev
->dev
, "%s status ack\n", ep
->ep
.name
);
877 /* Return zlp, don't let it block subsequent packets */
878 s
= net2272_ep_read(ep
, EP_STAT0
);
879 if (s
& (1 << BUFFER_EMPTY
)) {
880 /* Buffer is empty check for a blocking zlp, handle it */
881 if ((s
& (1 << NAK_OUT_PACKETS
)) &&
882 net2272_ep_read(ep
, EP_STAT1
) & (1 << LOCAL_OUT_ZLP
)) {
883 dev_dbg(dev
->dev
, "WARNING: returning ZLP short packet termination!\n");
885 * Request is going to terminate with a short packet ...
886 * hope the client is ready for it!
888 status
= net2272_read_fifo(ep
, req
);
889 /* clear short packet naking */
890 net2272_ep_write(ep
, EP_STAT0
, (1 << NAK_OUT_PACKETS
));
896 status
= net2272_kick_dma(ep
, req
);
899 /* dma failed (most likely in use by another endpoint)
905 status
= net2272_write_fifo(ep
, req
);
907 s
= net2272_ep_read(ep
, EP_STAT0
);
908 if ((s
& (1 << BUFFER_EMPTY
)) == 0)
909 status
= net2272_read_fifo(ep
, req
);
912 if (unlikely(status
!= 0)) {
919 if (likely(req
!= 0))
920 list_add_tail(&req
->queue
, &ep
->queue
);
922 if (likely(!list_empty(&ep
->queue
)))
923 net2272_ep_write(ep
, EP_RSPCLR
, 1 << ALT_NAK_OUT_PACKETS
);
925 spin_unlock_irqrestore(&dev
->lock
, flags
);
930 /* dequeue ALL requests */
932 net2272_dequeue_all(struct net2272_ep
*ep
)
934 struct net2272_request
*req
;
936 /* called with spinlock held */
939 while (!list_empty(&ep
->queue
)) {
940 req
= list_entry(ep
->queue
.next
,
941 struct net2272_request
,
943 net2272_done(ep
, req
, -ESHUTDOWN
);
947 /* dequeue JUST ONE request */
949 net2272_dequeue(struct usb_ep
*_ep
, struct usb_request
*_req
)
951 struct net2272_ep
*ep
;
952 struct net2272_request
*req
;
956 ep
= container_of(_ep
, struct net2272_ep
, ep
);
957 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0) || !_req
)
960 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
961 stopped
= ep
->stopped
;
964 /* make sure it's still queued on this endpoint */
965 list_for_each_entry(req
, &ep
->queue
, queue
) {
966 if (&req
->req
== _req
)
969 if (&req
->req
!= _req
) {
970 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
974 /* queue head may be partially complete */
975 if (ep
->queue
.next
== &req
->queue
) {
976 dev_dbg(ep
->dev
->dev
, "unlink (%s) pio\n", _ep
->name
);
977 net2272_done(ep
, req
, -ECONNRESET
);
980 ep
->stopped
= stopped
;
982 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
986 /*---------------------------------------------------------------------------*/
989 net2272_set_halt_and_wedge(struct usb_ep
*_ep
, int value
, int wedged
)
991 struct net2272_ep
*ep
;
995 ep
= container_of(_ep
, struct net2272_ep
, ep
);
996 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0))
998 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1000 if (ep
->desc
/* not ep0 */ && usb_endpoint_xfer_isoc(ep
->desc
))
1003 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
1004 if (!list_empty(&ep
->queue
))
1006 else if (ep
->is_in
&& value
&& net2272_fifo_status(_ep
) != 0)
1009 dev_vdbg(ep
->dev
->dev
, "%s %s %s\n", _ep
->name
,
1010 value
? "set" : "clear",
1011 wedged
? "wedge" : "halt");
1015 ep
->dev
->protocol_stall
= 1;
1025 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
1031 net2272_set_halt(struct usb_ep
*_ep
, int value
)
1033 return net2272_set_halt_and_wedge(_ep
, value
, 0);
1037 net2272_set_wedge(struct usb_ep
*_ep
)
1039 if (!_ep
|| _ep
->name
== ep0name
)
1041 return net2272_set_halt_and_wedge(_ep
, 1, 1);
1045 net2272_fifo_status(struct usb_ep
*_ep
)
1047 struct net2272_ep
*ep
;
1050 ep
= container_of(_ep
, struct net2272_ep
, ep
);
1051 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0))
1053 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1056 avail
= net2272_ep_read(ep
, EP_AVAIL1
) << 8;
1057 avail
|= net2272_ep_read(ep
, EP_AVAIL0
);
1058 if (avail
> ep
->fifo_size
)
1061 avail
= ep
->fifo_size
- avail
;
1066 net2272_fifo_flush(struct usb_ep
*_ep
)
1068 struct net2272_ep
*ep
;
1070 ep
= container_of(_ep
, struct net2272_ep
, ep
);
1071 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0))
1073 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1076 net2272_ep_write(ep
, EP_STAT1
, 1 << BUFFER_FLUSH
);
1079 static struct usb_ep_ops net2272_ep_ops
= {
1080 .enable
= net2272_enable
,
1081 .disable
= net2272_disable
,
1083 .alloc_request
= net2272_alloc_request
,
1084 .free_request
= net2272_free_request
,
1086 .queue
= net2272_queue
,
1087 .dequeue
= net2272_dequeue
,
1089 .set_halt
= net2272_set_halt
,
1090 .set_wedge
= net2272_set_wedge
,
1091 .fifo_status
= net2272_fifo_status
,
1092 .fifo_flush
= net2272_fifo_flush
,
1095 /*---------------------------------------------------------------------------*/
1098 net2272_get_frame(struct usb_gadget
*_gadget
)
1100 struct net2272
*dev
;
1101 unsigned long flags
;
1106 dev
= container_of(_gadget
, struct net2272
, gadget
);
1107 spin_lock_irqsave(&dev
->lock
, flags
);
1109 ret
= net2272_read(dev
, FRAME1
) << 8;
1110 ret
|= net2272_read(dev
, FRAME0
);
1112 spin_unlock_irqrestore(&dev
->lock
, flags
);
1117 net2272_wakeup(struct usb_gadget
*_gadget
)
1119 struct net2272
*dev
;
1121 unsigned long flags
;
1125 dev
= container_of(_gadget
, struct net2272
, gadget
);
1127 spin_lock_irqsave(&dev
->lock
, flags
);
1128 tmp
= net2272_read(dev
, USBCTL0
);
1129 if (tmp
& (1 << IO_WAKEUP_ENABLE
))
1130 net2272_write(dev
, USBCTL1
, (1 << GENERATE_RESUME
));
1132 spin_unlock_irqrestore(&dev
->lock
, flags
);
1138 net2272_set_selfpowered(struct usb_gadget
*_gadget
, int value
)
1140 struct net2272
*dev
;
1144 dev
= container_of(_gadget
, struct net2272
, gadget
);
1146 dev
->is_selfpowered
= value
;
1152 net2272_pullup(struct usb_gadget
*_gadget
, int is_on
)
1154 struct net2272
*dev
;
1156 unsigned long flags
;
1160 dev
= container_of(_gadget
, struct net2272
, gadget
);
1162 spin_lock_irqsave(&dev
->lock
, flags
);
1163 tmp
= net2272_read(dev
, USBCTL0
);
1164 dev
->softconnect
= (is_on
!= 0);
1166 tmp
|= (1 << USB_DETECT_ENABLE
);
1168 tmp
&= ~(1 << USB_DETECT_ENABLE
);
1169 net2272_write(dev
, USBCTL0
, tmp
);
1170 spin_unlock_irqrestore(&dev
->lock
, flags
);
1175 static int net2272_start(struct usb_gadget_driver
*driver
,
1176 int (*bind
)(struct usb_gadget
*));
1177 static int net2272_stop(struct usb_gadget_driver
*driver
);
1179 static const struct usb_gadget_ops net2272_ops
= {
1180 .get_frame
= net2272_get_frame
,
1181 .wakeup
= net2272_wakeup
,
1182 .set_selfpowered
= net2272_set_selfpowered
,
1183 .pullup
= net2272_pullup
,
1184 .start
= net2272_start
,
1185 .stop
= net2272_stop
,
1188 /*---------------------------------------------------------------------------*/
1191 net2272_show_registers(struct device
*_dev
, struct device_attribute
*attr
, char *buf
)
1193 struct net2272
*dev
;
1196 unsigned long flags
;
1201 dev
= dev_get_drvdata(_dev
);
1204 spin_lock_irqsave(&dev
->lock
, flags
);
1207 s
= dev
->driver
->driver
.name
;
1211 /* Main Control Registers */
1212 t
= scnprintf(next
, size
, "%s version %s,"
1213 "chiprev %02x, locctl %02x\n"
1214 "irqenb0 %02x irqenb1 %02x "
1215 "irqstat0 %02x irqstat1 %02x\n",
1216 driver_name
, driver_vers
, dev
->chiprev
,
1217 net2272_read(dev
, LOCCTL
),
1218 net2272_read(dev
, IRQENB0
),
1219 net2272_read(dev
, IRQENB1
),
1220 net2272_read(dev
, IRQSTAT0
),
1221 net2272_read(dev
, IRQSTAT1
));
1226 t1
= net2272_read(dev
, DMAREQ
);
1227 t
= scnprintf(next
, size
, "\ndmareq %02x: %s %s%s%s%s\n",
1228 t1
, ep_name
[(t1
& 0x01) + 1],
1229 t1
& (1 << DMA_CONTROL_DACK
) ? "dack " : "",
1230 t1
& (1 << DMA_REQUEST_ENABLE
) ? "reqenb " : "",
1231 t1
& (1 << DMA_REQUEST
) ? "req " : "",
1232 t1
& (1 << DMA_BUFFER_VALID
) ? "valid " : "");
1236 /* USB Control Registers */
1237 t1
= net2272_read(dev
, USBCTL1
);
1238 if (t1
& (1 << VBUS_PIN
)) {
1239 if (t1
& (1 << USB_HIGH_SPEED
))
1241 else if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1247 t
= scnprintf(next
, size
,
1248 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1249 net2272_read(dev
, USBCTL0
), t1
,
1250 net2272_read(dev
, OURADDR
), s
);
1254 /* Endpoint Registers */
1255 for (i
= 0; i
< 4; ++i
) {
1256 struct net2272_ep
*ep
;
1262 t1
= net2272_ep_read(ep
, EP_CFG
);
1263 t2
= net2272_ep_read(ep
, EP_RSPSET
);
1264 t
= scnprintf(next
, size
,
1265 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1267 ep
->ep
.name
, t1
, t2
,
1268 (t2
& (1 << ALT_NAK_OUT_PACKETS
)) ? "NAK " : "",
1269 (t2
& (1 << HIDE_STATUS_PHASE
)) ? "hide " : "",
1270 (t2
& (1 << AUTOVALIDATE
)) ? "auto " : "",
1271 (t2
& (1 << INTERRUPT_MODE
)) ? "interrupt " : "",
1272 (t2
& (1 << CONTROL_STATUS_PHASE_HANDSHAKE
)) ? "status " : "",
1273 (t2
& (1 << NAK_OUT_PACKETS_MODE
)) ? "NAKmode " : "",
1274 (t2
& (1 << ENDPOINT_TOGGLE
)) ? "DATA1 " : "DATA0 ",
1275 (t2
& (1 << ENDPOINT_HALT
)) ? "HALT " : "",
1276 net2272_ep_read(ep
, EP_IRQENB
));
1280 t
= scnprintf(next
, size
,
1281 "\tstat0 %02x stat1 %02x avail %04x "
1283 net2272_ep_read(ep
, EP_STAT0
),
1284 net2272_ep_read(ep
, EP_STAT1
),
1285 (net2272_ep_read(ep
, EP_AVAIL1
) << 8) | net2272_ep_read(ep
, EP_AVAIL0
),
1287 ep
->is_in
? "in" : "out",
1288 type_string(t1
>> 5),
1289 ep
->stopped
? "*" : "");
1293 t
= scnprintf(next
, size
,
1294 "\tep_transfer %06x\n",
1295 ((net2272_ep_read(ep
, EP_TRANSFER2
) & 0xff) << 16) |
1296 ((net2272_ep_read(ep
, EP_TRANSFER1
) & 0xff) << 8) |
1297 ((net2272_ep_read(ep
, EP_TRANSFER0
) & 0xff)));
1301 t1
= net2272_ep_read(ep
, EP_BUFF_STATES
) & 0x03;
1302 t2
= (net2272_ep_read(ep
, EP_BUFF_STATES
) >> 2) & 0x03;
1303 t
= scnprintf(next
, size
,
1304 "\tbuf-a %s buf-b %s\n",
1305 buf_state_string(t1
),
1306 buf_state_string(t2
));
1311 spin_unlock_irqrestore(&dev
->lock
, flags
);
1313 return PAGE_SIZE
- size
;
1315 static DEVICE_ATTR(registers
, S_IRUGO
, net2272_show_registers
, NULL
);
1317 /*---------------------------------------------------------------------------*/
1320 net2272_set_fifo_mode(struct net2272
*dev
, int mode
)
1324 tmp
= net2272_read(dev
, LOCCTL
) & 0x3f;
1326 net2272_write(dev
, LOCCTL
, tmp
);
1328 INIT_LIST_HEAD(&dev
->gadget
.ep_list
);
1330 /* always ep-a, ep-c ... maybe not ep-b */
1331 list_add_tail(&dev
->ep
[1].ep
.ep_list
, &dev
->gadget
.ep_list
);
1335 list_add_tail(&dev
->ep
[2].ep
.ep_list
, &dev
->gadget
.ep_list
);
1336 dev
->ep
[1].fifo_size
= dev
->ep
[2].fifo_size
= 512;
1339 list_add_tail(&dev
->ep
[2].ep
.ep_list
, &dev
->gadget
.ep_list
);
1340 dev
->ep
[1].fifo_size
= 1024;
1341 dev
->ep
[2].fifo_size
= 512;
1344 list_add_tail(&dev
->ep
[2].ep
.ep_list
, &dev
->gadget
.ep_list
);
1345 dev
->ep
[1].fifo_size
= dev
->ep
[2].fifo_size
= 1024;
1348 dev
->ep
[1].fifo_size
= 1024;
1352 /* ep-c is always 2 512 byte buffers */
1353 list_add_tail(&dev
->ep
[3].ep
.ep_list
, &dev
->gadget
.ep_list
);
1354 dev
->ep
[3].fifo_size
= 512;
1357 /*---------------------------------------------------------------------------*/
1359 static struct net2272
*the_controller
;
1362 net2272_usb_reset(struct net2272
*dev
)
1364 dev
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1366 net2272_cancel_dma(dev
);
1368 net2272_write(dev
, IRQENB0
, 0);
1369 net2272_write(dev
, IRQENB1
, 0);
1371 /* clear irq state */
1372 net2272_write(dev
, IRQSTAT0
, 0xff);
1373 net2272_write(dev
, IRQSTAT1
, ~(1 << SUSPEND_REQUEST_INTERRUPT
));
1375 net2272_write(dev
, DMAREQ
,
1376 (0 << DMA_BUFFER_VALID
) |
1377 (0 << DMA_REQUEST_ENABLE
) |
1378 (1 << DMA_CONTROL_DACK
) |
1379 (dev
->dma_eot_polarity
<< EOT_POLARITY
) |
1380 (dev
->dma_dack_polarity
<< DACK_POLARITY
) |
1381 (dev
->dma_dreq_polarity
<< DREQ_POLARITY
) |
1382 ((dma_ep
>> 1) << DMA_ENDPOINT_SELECT
));
1384 net2272_cancel_dma(dev
);
1385 net2272_set_fifo_mode(dev
, (fifo_mode
<= 3) ? fifo_mode
: 0);
1387 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1388 * note that the higher level gadget drivers are expected to convert data to little endian.
1389 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1391 net2272_write(dev
, LOCCTL
, net2272_read(dev
, LOCCTL
) | (1 << DATA_WIDTH
));
1392 net2272_write(dev
, LOCCTL1
, (dma_mode
<< DMA_MODE
));
1396 net2272_usb_reinit(struct net2272
*dev
)
1400 /* basic endpoint init */
1401 for (i
= 0; i
< 4; ++i
) {
1402 struct net2272_ep
*ep
= &dev
->ep
[i
];
1404 ep
->ep
.name
= ep_name
[i
];
1409 if (use_dma
&& ep
->num
== dma_ep
)
1412 if (i
> 0 && i
<= 3)
1413 ep
->fifo_size
= 512;
1416 net2272_ep_reset(ep
);
1418 dev
->ep
[0].ep
.maxpacket
= 64;
1420 dev
->gadget
.ep0
= &dev
->ep
[0].ep
;
1421 dev
->ep
[0].stopped
= 0;
1422 INIT_LIST_HEAD(&dev
->gadget
.ep0
->ep_list
);
1426 net2272_ep0_start(struct net2272
*dev
)
1428 struct net2272_ep
*ep0
= &dev
->ep
[0];
1430 net2272_ep_write(ep0
, EP_RSPSET
,
1431 (1 << NAK_OUT_PACKETS_MODE
) |
1432 (1 << ALT_NAK_OUT_PACKETS
));
1433 net2272_ep_write(ep0
, EP_RSPCLR
,
1434 (1 << HIDE_STATUS_PHASE
) |
1435 (1 << CONTROL_STATUS_PHASE_HANDSHAKE
));
1436 net2272_write(dev
, USBCTL0
,
1437 (dev
->softconnect
<< USB_DETECT_ENABLE
) |
1438 (1 << USB_ROOT_PORT_WAKEUP_ENABLE
) |
1439 (1 << IO_WAKEUP_ENABLE
));
1440 net2272_write(dev
, IRQENB0
,
1441 (1 << SETUP_PACKET_INTERRUPT_ENABLE
) |
1442 (1 << ENDPOINT_0_INTERRUPT_ENABLE
) |
1443 (1 << DMA_DONE_INTERRUPT_ENABLE
));
1444 net2272_write(dev
, IRQENB1
,
1445 (1 << VBUS_INTERRUPT_ENABLE
) |
1446 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE
) |
1447 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE
));
1450 /* when a driver is successfully registered, it will receive
1451 * control requests including set_configuration(), which enables
1452 * non-control requests. then usb traffic follows until a
1453 * disconnect is reported. then a host may connect again, or
1454 * the driver might get unbound.
1456 static int net2272_start(struct usb_gadget_driver
*driver
,
1457 int (*bind
)(struct usb_gadget
*))
1459 struct net2272
*dev
= the_controller
;
1463 if (!driver
|| !bind
|| !driver
->unbind
|| !driver
->setup
||
1464 driver
->speed
!= USB_SPEED_HIGH
)
1471 for (i
= 0; i
< 4; ++i
)
1472 dev
->ep
[i
].irqs
= 0;
1473 /* hook up the driver ... */
1474 dev
->softconnect
= 1;
1475 driver
->driver
.bus
= NULL
;
1476 dev
->driver
= driver
;
1477 dev
->gadget
.dev
.driver
= &driver
->driver
;
1478 ret
= bind(&dev
->gadget
);
1480 dev_dbg(dev
->dev
, "bind to driver %s --> %d\n",
1481 driver
->driver
.name
, ret
);
1483 dev
->gadget
.dev
.driver
= NULL
;
1487 /* ... then enable host detection and ep0; and we're ready
1488 * for set_configuration as well as eventual disconnect.
1490 net2272_ep0_start(dev
);
1492 dev_dbg(dev
->dev
, "%s ready\n", driver
->driver
.name
);
1498 stop_activity(struct net2272
*dev
, struct usb_gadget_driver
*driver
)
1502 /* don't disconnect if it's not connected */
1503 if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1506 /* stop hardware; prevent new request submissions;
1507 * and kill any outstanding requests.
1509 net2272_usb_reset(dev
);
1510 for (i
= 0; i
< 4; ++i
)
1511 net2272_dequeue_all(&dev
->ep
[i
]);
1513 /* report disconnect; the driver is already quiesced */
1515 spin_unlock(&dev
->lock
);
1516 driver
->disconnect(&dev
->gadget
);
1517 spin_lock(&dev
->lock
);
1520 net2272_usb_reinit(dev
);
1523 static int net2272_stop(struct usb_gadget_driver
*driver
)
1525 struct net2272
*dev
= the_controller
;
1526 unsigned long flags
;
1530 if (!driver
|| driver
!= dev
->driver
)
1533 spin_lock_irqsave(&dev
->lock
, flags
);
1534 stop_activity(dev
, driver
);
1535 spin_unlock_irqrestore(&dev
->lock
, flags
);
1537 net2272_pullup(&dev
->gadget
, 0);
1539 driver
->unbind(&dev
->gadget
);
1540 dev
->gadget
.dev
.driver
= NULL
;
1543 dev_dbg(dev
->dev
, "unregistered driver '%s'\n", driver
->driver
.name
);
1547 /*---------------------------------------------------------------------------*/
1548 /* handle ep-a/ep-b dma completions */
1550 net2272_handle_dma(struct net2272_ep
*ep
)
1552 struct net2272_request
*req
;
1556 if (!list_empty(&ep
->queue
))
1557 req
= list_entry(ep
->queue
.next
,
1558 struct net2272_request
, queue
);
1562 dev_vdbg(ep
->dev
->dev
, "handle_dma %s req %p\n", ep
->ep
.name
, req
);
1564 /* Ensure DREQ is de-asserted */
1565 net2272_write(ep
->dev
, DMAREQ
,
1566 (0 << DMA_BUFFER_VALID
)
1567 | (0 << DMA_REQUEST_ENABLE
)
1568 | (1 << DMA_CONTROL_DACK
)
1569 | (ep
->dev
->dma_eot_polarity
<< EOT_POLARITY
)
1570 | (ep
->dev
->dma_dack_polarity
<< DACK_POLARITY
)
1571 | (ep
->dev
->dma_dreq_polarity
<< DREQ_POLARITY
)
1572 | ((ep
->dma
>> 1) << DMA_ENDPOINT_SELECT
));
1574 ep
->dev
->dma_busy
= 0;
1576 net2272_ep_write(ep
, EP_IRQENB
,
1577 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE
)
1578 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE
)
1579 | net2272_ep_read(ep
, EP_IRQENB
));
1581 /* device-to-host transfer completed */
1583 /* validate a short packet or zlp if necessary */
1584 if ((req
->req
.length
% ep
->ep
.maxpacket
!= 0) ||
1586 set_fifo_bytecount(ep
, 0);
1588 net2272_done(ep
, req
, 0);
1589 if (!list_empty(&ep
->queue
)) {
1590 req
= list_entry(ep
->queue
.next
,
1591 struct net2272_request
, queue
);
1592 status
= net2272_kick_dma(ep
, req
);
1594 net2272_pio_advance(ep
);
1597 /* host-to-device transfer completed */
1599 /* terminated with a short packet? */
1600 if (net2272_read(ep
->dev
, IRQSTAT0
) &
1601 (1 << DMA_DONE_INTERRUPT
)) {
1602 /* abort system dma */
1603 net2272_cancel_dma(ep
->dev
);
1606 /* EP_TRANSFER will contain the number of bytes
1607 * actually received.
1608 * NOTE: There is no overflow detection on EP_TRANSFER:
1609 * We can't deal with transfers larger than 2^24 bytes!
1611 len
= (net2272_ep_read(ep
, EP_TRANSFER2
) << 16)
1612 | (net2272_ep_read(ep
, EP_TRANSFER1
) << 8)
1613 | (net2272_ep_read(ep
, EP_TRANSFER0
));
1618 req
->req
.actual
+= len
;
1620 /* get any remaining data */
1621 net2272_pio_advance(ep
);
1625 /*---------------------------------------------------------------------------*/
1628 net2272_handle_ep(struct net2272_ep
*ep
)
1630 struct net2272_request
*req
;
1633 if (!list_empty(&ep
->queue
))
1634 req
= list_entry(ep
->queue
.next
,
1635 struct net2272_request
, queue
);
1639 /* ack all, and handle what we care about */
1640 stat0
= net2272_ep_read(ep
, EP_STAT0
);
1641 stat1
= net2272_ep_read(ep
, EP_STAT1
);
1644 dev_vdbg(ep
->dev
->dev
, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1645 ep
->ep
.name
, stat0
, stat1
, req
? &req
->req
: 0);
1647 net2272_ep_write(ep
, EP_STAT0
, stat0
&
1648 ~((1 << NAK_OUT_PACKETS
)
1649 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
)));
1650 net2272_ep_write(ep
, EP_STAT1
, stat1
);
1652 /* data packet(s) received (in the fifo, OUT)
1653 * direction must be validated, otherwise control read status phase
1654 * could be interpreted as a valid packet
1656 if (!ep
->is_in
&& (stat0
& (1 << DATA_PACKET_RECEIVED_INTERRUPT
)))
1657 net2272_pio_advance(ep
);
1658 /* data packet(s) transmitted (IN) */
1659 else if (stat0
& (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
))
1660 net2272_pio_advance(ep
);
1663 static struct net2272_ep
*
1664 net2272_get_ep_by_addr(struct net2272
*dev
, u16 wIndex
)
1666 struct net2272_ep
*ep
;
1668 if ((wIndex
& USB_ENDPOINT_NUMBER_MASK
) == 0)
1671 list_for_each_entry(ep
, &dev
->gadget
.ep_list
, ep
.ep_list
) {
1672 u8 bEndpointAddress
;
1676 bEndpointAddress
= ep
->desc
->bEndpointAddress
;
1677 if ((wIndex
^ bEndpointAddress
) & USB_DIR_IN
)
1679 if ((wIndex
& 0x0f) == (bEndpointAddress
& 0x0f))
1690 * JJJJJJJKKKKKKK * 8
1692 * {JKKKKKKK * 10}, JK
1694 static const u8 net2272_test_packet
[] = {
1695 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1696 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1697 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1698 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1699 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1700 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1704 net2272_set_test_mode(struct net2272
*dev
, int mode
)
1708 /* Disable all net2272 interrupts:
1709 * Nothing but a power cycle should stop the test.
1711 net2272_write(dev
, IRQENB0
, 0x00);
1712 net2272_write(dev
, IRQENB1
, 0x00);
1714 /* Force tranceiver to high-speed */
1715 net2272_write(dev
, XCVRDIAG
, 1 << FORCE_HIGH_SPEED
);
1717 net2272_write(dev
, PAGESEL
, 0);
1718 net2272_write(dev
, EP_STAT0
, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT
);
1719 net2272_write(dev
, EP_RSPCLR
,
1720 (1 << CONTROL_STATUS_PHASE_HANDSHAKE
)
1721 | (1 << HIDE_STATUS_PHASE
));
1722 net2272_write(dev
, EP_CFG
, 1 << ENDPOINT_DIRECTION
);
1723 net2272_write(dev
, EP_STAT1
, 1 << BUFFER_FLUSH
);
1725 /* wait for status phase to complete */
1726 while (!(net2272_read(dev
, EP_STAT0
) &
1727 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
)))
1730 /* Enable test mode */
1731 net2272_write(dev
, USBTEST
, mode
);
1733 /* load test packet */
1734 if (mode
== TEST_PACKET
) {
1735 /* switch to 8 bit mode */
1736 net2272_write(dev
, LOCCTL
, net2272_read(dev
, LOCCTL
) &
1737 ~(1 << DATA_WIDTH
));
1739 for (i
= 0; i
< sizeof(net2272_test_packet
); ++i
)
1740 net2272_write(dev
, EP_DATA
, net2272_test_packet
[i
]);
1742 /* Validate test packet */
1743 net2272_write(dev
, EP_TRANSFER0
, 0);
1748 net2272_handle_stat0_irqs(struct net2272
*dev
, u8 stat
)
1750 struct net2272_ep
*ep
;
1753 /* starting a control request? */
1754 if (unlikely(stat
& (1 << SETUP_PACKET_INTERRUPT
))) {
1757 struct usb_ctrlrequest r
;
1760 struct net2272_request
*req
;
1762 if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1763 if (net2272_read(dev
, USBCTL1
) & (1 << USB_HIGH_SPEED
))
1764 dev
->gadget
.speed
= USB_SPEED_HIGH
;
1766 dev
->gadget
.speed
= USB_SPEED_FULL
;
1767 dev_dbg(dev
->dev
, "%s speed\n",
1768 (dev
->gadget
.speed
== USB_SPEED_HIGH
) ? "high" : "full");
1774 /* make sure any leftover interrupt state is cleared */
1775 stat
&= ~(1 << ENDPOINT_0_INTERRUPT
);
1776 while (!list_empty(&ep
->queue
)) {
1777 req
= list_entry(ep
->queue
.next
,
1778 struct net2272_request
, queue
);
1779 net2272_done(ep
, req
,
1780 (req
->req
.actual
== req
->req
.length
) ? 0 : -EPROTO
);
1783 dev
->protocol_stall
= 0;
1784 net2272_ep_write(ep
, EP_STAT0
,
1785 (1 << DATA_IN_TOKEN_INTERRUPT
)
1786 | (1 << DATA_OUT_TOKEN_INTERRUPT
)
1787 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
)
1788 | (1 << DATA_PACKET_RECEIVED_INTERRUPT
)
1789 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
));
1790 net2272_ep_write(ep
, EP_STAT1
,
1792 | (1 << USB_OUT_ACK_SENT
)
1793 | (1 << USB_OUT_NAK_SENT
)
1794 | (1 << USB_IN_ACK_RCVD
)
1795 | (1 << USB_IN_NAK_SENT
)
1796 | (1 << USB_STALL_SENT
)
1797 | (1 << LOCAL_OUT_ZLP
));
1800 * Ensure Control Read pre-validation setting is beyond maximum size
1801 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1802 * an EP0 transfer following the Control Write is a Control Read,
1803 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1804 * pre-validation count.
1805 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1806 * the pre-validation count cannot cause an unexpected validatation
1808 net2272_write(dev
, PAGESEL
, 0);
1809 net2272_write(dev
, EP_TRANSFER2
, 0xff);
1810 net2272_write(dev
, EP_TRANSFER1
, 0xff);
1811 net2272_write(dev
, EP_TRANSFER0
, 0xff);
1813 u
.raw
[0] = net2272_read(dev
, SETUP0
);
1814 u
.raw
[1] = net2272_read(dev
, SETUP1
);
1815 u
.raw
[2] = net2272_read(dev
, SETUP2
);
1816 u
.raw
[3] = net2272_read(dev
, SETUP3
);
1817 u
.raw
[4] = net2272_read(dev
, SETUP4
);
1818 u
.raw
[5] = net2272_read(dev
, SETUP5
);
1819 u
.raw
[6] = net2272_read(dev
, SETUP6
);
1820 u
.raw
[7] = net2272_read(dev
, SETUP7
);
1822 * If you have a big endian cpu make sure le16_to_cpus
1823 * performs the proper byte swapping here...
1825 le16_to_cpus(&u
.r
.wValue
);
1826 le16_to_cpus(&u
.r
.wIndex
);
1827 le16_to_cpus(&u
.r
.wLength
);
1830 net2272_write(dev
, IRQSTAT0
, 1 << SETUP_PACKET_INTERRUPT
);
1831 stat
^= (1 << SETUP_PACKET_INTERRUPT
);
1833 /* watch control traffic at the token level, and force
1834 * synchronization before letting the status phase happen.
1836 ep
->is_in
= (u
.r
.bRequestType
& USB_DIR_IN
) != 0;
1838 scratch
= (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE
)
1839 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE
)
1840 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE
);
1841 stop_out_naking(ep
);
1843 scratch
= (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE
)
1844 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE
)
1845 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE
);
1846 net2272_ep_write(ep
, EP_IRQENB
, scratch
);
1848 if ((u
.r
.bRequestType
& USB_TYPE_MASK
) != USB_TYPE_STANDARD
)
1850 switch (u
.r
.bRequest
) {
1851 case USB_REQ_GET_STATUS
: {
1852 struct net2272_ep
*e
;
1855 switch (u
.r
.bRequestType
& USB_RECIP_MASK
) {
1856 case USB_RECIP_ENDPOINT
:
1857 e
= net2272_get_ep_by_addr(dev
, u
.r
.wIndex
);
1858 if (!e
|| u
.r
.wLength
> 2)
1860 if (net2272_ep_read(e
, EP_RSPSET
) & (1 << ENDPOINT_HALT
))
1861 status
= __constant_cpu_to_le16(1);
1863 status
= __constant_cpu_to_le16(0);
1865 /* don't bother with a request object! */
1866 net2272_ep_write(&dev
->ep
[0], EP_IRQENB
, 0);
1867 writew(status
, net2272_reg_addr(dev
, EP_DATA
));
1868 set_fifo_bytecount(&dev
->ep
[0], 0);
1870 dev_vdbg(dev
->dev
, "%s stat %02x\n",
1871 ep
->ep
.name
, status
);
1872 goto next_endpoints
;
1873 case USB_RECIP_DEVICE
:
1874 if (u
.r
.wLength
> 2)
1876 if (dev
->is_selfpowered
)
1877 status
= (1 << USB_DEVICE_SELF_POWERED
);
1879 /* don't bother with a request object! */
1880 net2272_ep_write(&dev
->ep
[0], EP_IRQENB
, 0);
1881 writew(status
, net2272_reg_addr(dev
, EP_DATA
));
1882 set_fifo_bytecount(&dev
->ep
[0], 0);
1884 dev_vdbg(dev
->dev
, "device stat %02x\n", status
);
1885 goto next_endpoints
;
1886 case USB_RECIP_INTERFACE
:
1887 if (u
.r
.wLength
> 2)
1890 /* don't bother with a request object! */
1891 net2272_ep_write(&dev
->ep
[0], EP_IRQENB
, 0);
1892 writew(status
, net2272_reg_addr(dev
, EP_DATA
));
1893 set_fifo_bytecount(&dev
->ep
[0], 0);
1895 dev_vdbg(dev
->dev
, "interface status %02x\n", status
);
1896 goto next_endpoints
;
1901 case USB_REQ_CLEAR_FEATURE
: {
1902 struct net2272_ep
*e
;
1904 if (u
.r
.bRequestType
!= USB_RECIP_ENDPOINT
)
1906 if (u
.r
.wValue
!= USB_ENDPOINT_HALT
||
1909 e
= net2272_get_ep_by_addr(dev
, u
.r
.wIndex
);
1913 dev_vdbg(dev
->dev
, "%s wedged, halt not cleared\n",
1916 dev_vdbg(dev
->dev
, "%s clear halt\n", ep
->ep
.name
);
1920 goto next_endpoints
;
1922 case USB_REQ_SET_FEATURE
: {
1923 struct net2272_ep
*e
;
1925 if (u
.r
.bRequestType
== USB_RECIP_DEVICE
) {
1926 if (u
.r
.wIndex
!= NORMAL_OPERATION
)
1927 net2272_set_test_mode(dev
, (u
.r
.wIndex
>> 8));
1929 dev_vdbg(dev
->dev
, "test mode: %d\n", u
.r
.wIndex
);
1930 goto next_endpoints
;
1931 } else if (u
.r
.bRequestType
!= USB_RECIP_ENDPOINT
)
1933 if (u
.r
.wValue
!= USB_ENDPOINT_HALT
||
1936 e
= net2272_get_ep_by_addr(dev
, u
.r
.wIndex
);
1941 dev_vdbg(dev
->dev
, "%s set halt\n", ep
->ep
.name
);
1942 goto next_endpoints
;
1944 case USB_REQ_SET_ADDRESS
: {
1945 net2272_write(dev
, OURADDR
, u
.r
.wValue
& 0xff);
1951 dev_vdbg(dev
->dev
, "setup %02x.%02x v%04x i%04x "
1953 u
.r
.bRequestType
, u
.r
.bRequest
,
1954 u
.r
.wValue
, u
.r
.wIndex
,
1955 net2272_ep_read(ep
, EP_CFG
));
1956 spin_unlock(&dev
->lock
);
1957 tmp
= dev
->driver
->setup(&dev
->gadget
, &u
.r
);
1958 spin_lock(&dev
->lock
);
1961 /* stall ep0 on error */
1964 dev_vdbg(dev
->dev
, "req %02x.%02x protocol STALL; stat %d\n",
1965 u
.r
.bRequestType
, u
.r
.bRequest
, tmp
);
1966 dev
->protocol_stall
= 1;
1968 /* endpoint dma irq? */
1969 } else if (stat
& (1 << DMA_DONE_INTERRUPT
)) {
1970 net2272_cancel_dma(dev
);
1971 net2272_write(dev
, IRQSTAT0
, 1 << DMA_DONE_INTERRUPT
);
1972 stat
&= ~(1 << DMA_DONE_INTERRUPT
);
1973 num
= (net2272_read(dev
, DMAREQ
) & (1 << DMA_ENDPOINT_SELECT
))
1977 net2272_handle_dma(ep
);
1981 /* endpoint data irq? */
1982 scratch
= stat
& 0x0f;
1984 for (num
= 0; scratch
; num
++) {
1987 /* does this endpoint's FIFO and queue need tending? */
1989 if ((scratch
& t
) == 0)
1994 net2272_handle_ep(ep
);
1997 /* some interrupts we can just ignore */
1998 stat
&= ~(1 << SOF_INTERRUPT
);
2001 dev_dbg(dev
->dev
, "unhandled irqstat0 %02x\n", stat
);
2005 net2272_handle_stat1_irqs(struct net2272
*dev
, u8 stat
)
2009 /* after disconnect there's nothing else to do! */
2010 tmp
= (1 << VBUS_INTERRUPT
) | (1 << ROOT_PORT_RESET_INTERRUPT
);
2011 mask
= (1 << USB_HIGH_SPEED
) | (1 << USB_FULL_SPEED
);
2014 net2272_write(dev
, IRQSTAT1
, tmp
);
2015 if ((((stat
& (1 << ROOT_PORT_RESET_INTERRUPT
)) &&
2016 ((net2272_read(dev
, USBCTL1
) & mask
) == 0))
2017 || ((net2272_read(dev
, USBCTL1
) & (1 << VBUS_PIN
))
2019 && (dev
->gadget
.speed
!= USB_SPEED_UNKNOWN
)) {
2020 dev_dbg(dev
->dev
, "disconnect %s\n",
2021 dev
->driver
->driver
.name
);
2022 stop_activity(dev
, dev
->driver
);
2023 net2272_ep0_start(dev
);
2032 tmp
= (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT
);
2034 net2272_write(dev
, IRQSTAT1
, tmp
);
2035 if (stat
& (1 << SUSPEND_REQUEST_INTERRUPT
)) {
2036 if (dev
->driver
->suspend
)
2037 dev
->driver
->suspend(&dev
->gadget
);
2038 if (!enable_suspend
) {
2039 stat
&= ~(1 << SUSPEND_REQUEST_INTERRUPT
);
2040 dev_dbg(dev
->dev
, "Suspend disabled, ignoring\n");
2043 if (dev
->driver
->resume
)
2044 dev
->driver
->resume(&dev
->gadget
);
2049 /* clear any other status/irqs */
2051 net2272_write(dev
, IRQSTAT1
, stat
);
2053 /* some status we can just ignore */
2054 stat
&= ~((1 << CONTROL_STATUS_INTERRUPT
)
2055 | (1 << SUSPEND_REQUEST_INTERRUPT
)
2056 | (1 << RESUME_INTERRUPT
));
2060 dev_dbg(dev
->dev
, "unhandled irqstat1 %02x\n", stat
);
2063 static irqreturn_t
net2272_irq(int irq
, void *_dev
)
2065 struct net2272
*dev
= _dev
;
2066 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2069 #if defined(PLX_PCI_RDK)
2072 spin_lock(&dev
->lock
);
2073 #if defined(PLX_PCI_RDK)
2074 intcsr
= readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2076 if ((intcsr
& LOCAL_INTERRUPT_TEST
) == LOCAL_INTERRUPT_TEST
) {
2077 writel(intcsr
& ~(1 << PCI_INTERRUPT_ENABLE
),
2078 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2079 net2272_handle_stat1_irqs(dev
, net2272_read(dev
, IRQSTAT1
));
2080 net2272_handle_stat0_irqs(dev
, net2272_read(dev
, IRQSTAT0
));
2081 intcsr
= readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2082 writel(intcsr
| (1 << PCI_INTERRUPT_ENABLE
),
2083 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2085 if ((intcsr
& DMA_CHANNEL_0_TEST
) == DMA_CHANNEL_0_TEST
) {
2086 writeb((1 << CHANNEL_CLEAR_INTERRUPT
| (0 << CHANNEL_ENABLE
)),
2087 dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
2089 dmareq
= net2272_read(dev
, DMAREQ
);
2091 net2272_handle_dma(&dev
->ep
[2]);
2093 net2272_handle_dma(&dev
->ep
[1]);
2096 #if defined(PLX_PCI_RDK2)
2097 /* see if PCI int for us by checking irqstat */
2098 intcsr
= readl(dev
->rdk2
.fpga_base_addr
+ RDK2_IRQSTAT
);
2099 if (!intcsr
& (1 << NET2272_PCI_IRQ
))
2101 /* check dma interrupts */
2103 /* Platform/devcice interrupt handler */
2104 #if !defined(PLX_PCI_RDK)
2105 net2272_handle_stat1_irqs(dev
, net2272_read(dev
, IRQSTAT1
));
2106 net2272_handle_stat0_irqs(dev
, net2272_read(dev
, IRQSTAT0
));
2108 spin_unlock(&dev
->lock
);
2113 static int net2272_present(struct net2272
*dev
)
2116 * Quick test to see if CPU can communicate properly with the NET2272.
2117 * Verifies connection using writes and reads to write/read and
2118 * read-only registers.
2120 * This routine is strongly recommended especially during early bring-up
2121 * of new hardware, however for designs that do not apply Power On System
2122 * Tests (POST) it may discarded (or perhaps minimized).
2127 /* Verify NET2272 write/read SCRATCH register can write and read */
2128 refval
= net2272_read(dev
, SCRATCH
);
2129 for (ii
= 0; ii
< 0x100; ii
+= 7) {
2130 net2272_write(dev
, SCRATCH
, ii
);
2131 val
= net2272_read(dev
, SCRATCH
);
2134 "%s: write/read SCRATCH register test failed: "
2135 "wrote:0x%2.2x, read:0x%2.2x\n",
2140 /* To be nice, we write the original SCRATCH value back: */
2141 net2272_write(dev
, SCRATCH
, refval
);
2143 /* Verify NET2272 CHIPREV register is read-only: */
2144 refval
= net2272_read(dev
, CHIPREV_2272
);
2145 for (ii
= 0; ii
< 0x100; ii
+= 7) {
2146 net2272_write(dev
, CHIPREV_2272
, ii
);
2147 val
= net2272_read(dev
, CHIPREV_2272
);
2148 if (val
!= refval
) {
2150 "%s: write/read CHIPREV register test failed: "
2151 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2152 __func__
, ii
, val
, refval
);
2158 * Verify NET2272's "NET2270 legacy revision" register
2159 * - NET2272 has two revision registers. The NET2270 legacy revision
2160 * register should read the same value, regardless of the NET2272
2161 * silicon revision. The legacy register applies to NET2270
2162 * firmware being applied to the NET2272.
2164 val
= net2272_read(dev
, CHIPREV_LEGACY
);
2165 if (val
!= NET2270_LEGACY_REV
) {
2167 * Unexpected legacy revision value
2168 * - Perhaps the chip is a NET2270?
2171 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2172 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2173 __func__
, NET2270_LEGACY_REV
, val
);
2178 * Verify NET2272 silicon revision
2179 * - This revision register is appropriate for the silicon version
2182 val
= net2272_read(dev
, CHIPREV_2272
);
2184 case CHIPREV_NET2272_R1
:
2186 * NET2272 Rev 1 has DMA related errata:
2187 * - Newer silicon (Rev 1A or better) required
2190 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2193 case CHIPREV_NET2272_R1A
:
2196 /* NET2272 silicon version *may* not work with this firmware */
2198 "%s: unexpected silicon revision register value: "
2199 " CHIPREV_2272: 0x%2.2x\n",
2202 * Return Success, even though the chip rev is not an expected value
2203 * - Older, pre-built firmware can attempt to operate on newer silicon
2204 * - Often, new silicon is perfectly compatible
2208 /* Success: NET2272 checks out OK */
2213 net2272_gadget_release(struct device
*_dev
)
2215 struct net2272
*dev
= dev_get_drvdata(_dev
);
2219 /*---------------------------------------------------------------------------*/
2221 static void __devexit
2222 net2272_remove(struct net2272
*dev
)
2224 usb_del_gadget_udc(&dev
->gadget
);
2226 /* start with the driver above us */
2228 /* should have been done already by driver model core */
2229 dev_warn(dev
->dev
, "pci remove, driver '%s' is still registered\n",
2230 dev
->driver
->driver
.name
);
2231 usb_gadget_unregister_driver(dev
->driver
);
2234 free_irq(dev
->irq
, dev
);
2235 iounmap(dev
->base_addr
);
2237 device_unregister(&dev
->gadget
.dev
);
2238 device_remove_file(dev
->dev
, &dev_attr_registers
);
2240 dev_info(dev
->dev
, "unbind\n");
2241 the_controller
= NULL
;
2244 static struct net2272
* __devinit
2245 net2272_probe_init(struct device
*dev
, unsigned int irq
)
2247 struct net2272
*ret
;
2249 if (the_controller
) {
2250 dev_warn(dev
, "ignoring\n");
2251 return ERR_PTR(-EBUSY
);
2255 dev_dbg(dev
, "No IRQ!\n");
2256 return ERR_PTR(-ENODEV
);
2259 /* alloc, and start init */
2260 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
2262 return ERR_PTR(-ENOMEM
);
2264 spin_lock_init(&ret
->lock
);
2267 ret
->gadget
.ops
= &net2272_ops
;
2268 ret
->gadget
.is_dualspeed
= 1;
2270 /* the "gadget" abstracts/virtualizes the controller */
2271 dev_set_name(&ret
->gadget
.dev
, "gadget");
2272 ret
->gadget
.dev
.parent
= dev
;
2273 ret
->gadget
.dev
.dma_mask
= dev
->dma_mask
;
2274 ret
->gadget
.dev
.release
= net2272_gadget_release
;
2275 ret
->gadget
.name
= driver_name
;
2280 static int __devinit
2281 net2272_probe_fin(struct net2272
*dev
, unsigned int irqflags
)
2285 /* See if there... */
2286 if (net2272_present(dev
)) {
2287 dev_warn(dev
->dev
, "2272 not found!\n");
2292 net2272_usb_reset(dev
);
2293 net2272_usb_reinit(dev
);
2295 ret
= request_irq(dev
->irq
, net2272_irq
, irqflags
, driver_name
, dev
);
2297 dev_err(dev
->dev
, "request interrupt %i failed\n", dev
->irq
);
2301 dev
->chiprev
= net2272_read(dev
, CHIPREV_2272
);
2304 dev_info(dev
->dev
, "%s\n", driver_desc
);
2305 dev_info(dev
->dev
, "irq %i, mem %p, chip rev %04x, dma %s\n",
2306 dev
->irq
, dev
->base_addr
, dev
->chiprev
,
2308 dev_info(dev
->dev
, "version: %s\n", driver_vers
);
2310 the_controller
= dev
;
2312 ret
= device_register(&dev
->gadget
.dev
);
2315 ret
= device_create_file(dev
->dev
, &dev_attr_registers
);
2319 ret
= usb_add_gadget_udc(dev
->dev
, &dev
->gadget
);
2326 device_remove_file(dev
->dev
, &dev_attr_registers
);
2328 device_unregister(&dev
->gadget
.dev
);
2330 free_irq(dev
->irq
, dev
);
2338 * wrap this driver around the specified device, but
2339 * don't respond over USB until a gadget driver binds to us
2342 static int __devinit
2343 net2272_rdk1_probe(struct pci_dev
*pdev
, struct net2272
*dev
)
2345 unsigned long resource
, len
, tmp
;
2346 void __iomem
*mem_mapped_addr
[4];
2350 * BAR 0 holds PLX 9054 config registers
2351 * BAR 1 is i/o memory; unused here
2352 * BAR 2 holds EPLD config registers
2353 * BAR 3 holds NET2272 registers
2356 /* Find and map all address spaces */
2357 for (i
= 0; i
< 4; ++i
) {
2359 continue; /* BAR1 unused */
2361 resource
= pci_resource_start(pdev
, i
);
2362 len
= pci_resource_len(pdev
, i
);
2364 if (!request_mem_region(resource
, len
, driver_name
)) {
2365 dev_dbg(dev
->dev
, "controller already in use\n");
2370 mem_mapped_addr
[i
] = ioremap_nocache(resource
, len
);
2371 if (mem_mapped_addr
[i
] == NULL
) {
2372 release_mem_region(resource
, len
);
2373 dev_dbg(dev
->dev
, "can't map memory\n");
2379 dev
->rdk1
.plx9054_base_addr
= mem_mapped_addr
[0];
2380 dev
->rdk1
.epld_base_addr
= mem_mapped_addr
[2];
2381 dev
->base_addr
= mem_mapped_addr
[3];
2383 /* Set PLX 9054 bus width (16 bits) */
2384 tmp
= readl(dev
->rdk1
.plx9054_base_addr
+ LBRD1
);
2385 writel((tmp
& ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH
)) | W16_BIT
,
2386 dev
->rdk1
.plx9054_base_addr
+ LBRD1
);
2388 /* Enable PLX 9054 Interrupts */
2389 writel(readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
) |
2390 (1 << PCI_INTERRUPT_ENABLE
) |
2391 (1 << LOCAL_INTERRUPT_INPUT_ENABLE
),
2392 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2394 writeb((1 << CHANNEL_CLEAR_INTERRUPT
| (0 << CHANNEL_ENABLE
)),
2395 dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
2398 writeb((1 << EPLD_DMA_ENABLE
) |
2399 (1 << DMA_CTL_DACK
) |
2400 (1 << DMA_TIMEOUT_ENABLE
) |
2404 (1 << NET2272_RESET
),
2405 dev
->base_addr
+ EPLD_IO_CONTROL_REGISTER
);
2408 writeb(readb(dev
->base_addr
+ EPLD_IO_CONTROL_REGISTER
) &
2409 ~(1 << NET2272_RESET
),
2410 dev
->base_addr
+ EPLD_IO_CONTROL_REGISTER
);
2417 iounmap(mem_mapped_addr
[i
]);
2418 release_mem_region(pci_resource_start(pdev
, i
),
2419 pci_resource_len(pdev
, i
));
2425 static int __devinit
2426 net2272_rdk2_probe(struct pci_dev
*pdev
, struct net2272
*dev
)
2428 unsigned long resource
, len
;
2429 void __iomem
*mem_mapped_addr
[2];
2433 * BAR 0 holds FGPA config registers
2434 * BAR 1 holds NET2272 registers
2437 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2438 for (i
= 0; i
< 2; ++i
) {
2439 resource
= pci_resource_start(pdev
, i
);
2440 len
= pci_resource_len(pdev
, i
);
2442 if (!request_mem_region(resource
, len
, driver_name
)) {
2443 dev_dbg(dev
->dev
, "controller already in use\n");
2448 mem_mapped_addr
[i
] = ioremap_nocache(resource
, len
);
2449 if (mem_mapped_addr
[i
] == NULL
) {
2450 release_mem_region(resource
, len
);
2451 dev_dbg(dev
->dev
, "can't map memory\n");
2457 dev
->rdk2
.fpga_base_addr
= mem_mapped_addr
[0];
2458 dev
->base_addr
= mem_mapped_addr
[1];
2461 /* Set 2272 bus width (16 bits) and reset */
2462 writel((1 << CHIP_RESET
), dev
->rdk2
.fpga_base_addr
+ RDK2_LOCCTLRDK
);
2464 writel((1 << BUS_WIDTH
), dev
->rdk2
.fpga_base_addr
+ RDK2_LOCCTLRDK
);
2465 /* Print fpga version number */
2466 dev_info(dev
->dev
, "RDK2 FPGA version %08x\n",
2467 readl(dev
->rdk2
.fpga_base_addr
+ RDK2_FPGAREV
));
2468 /* Enable FPGA Interrupts */
2469 writel((1 << NET2272_PCI_IRQ
), dev
->rdk2
.fpga_base_addr
+ RDK2_IRQENB
);
2475 iounmap(mem_mapped_addr
[i
]);
2476 release_mem_region(pci_resource_start(pdev
, i
),
2477 pci_resource_len(pdev
, i
));
2483 static int __devinit
2484 net2272_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2486 struct net2272
*dev
;
2489 dev
= net2272_probe_init(&pdev
->dev
, pdev
->irq
);
2491 return PTR_ERR(dev
);
2492 dev
->dev_id
= pdev
->device
;
2494 if (pci_enable_device(pdev
) < 0) {
2499 pci_set_master(pdev
);
2501 switch (pdev
->device
) {
2502 case PCI_DEVICE_ID_RDK1
: ret
= net2272_rdk1_probe(pdev
, dev
); break;
2503 case PCI_DEVICE_ID_RDK2
: ret
= net2272_rdk2_probe(pdev
, dev
); break;
2509 ret
= net2272_probe_fin(dev
, 0);
2513 pci_set_drvdata(pdev
, dev
);
2518 pci_disable_device(pdev
);
2525 static void __devexit
2526 net2272_rdk1_remove(struct pci_dev
*pdev
, struct net2272
*dev
)
2530 /* disable PLX 9054 interrupts */
2531 writel(readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
) &
2532 ~(1 << PCI_INTERRUPT_ENABLE
),
2533 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2535 /* clean up resources allocated during probe() */
2536 iounmap(dev
->rdk1
.plx9054_base_addr
);
2537 iounmap(dev
->rdk1
.epld_base_addr
);
2539 for (i
= 0; i
< 4; ++i
) {
2541 continue; /* BAR1 unused */
2542 release_mem_region(pci_resource_start(pdev
, i
),
2543 pci_resource_len(pdev
, i
));
2547 static void __devexit
2548 net2272_rdk2_remove(struct pci_dev
*pdev
, struct net2272
*dev
)
2552 /* disable fpga interrupts
2553 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2554 ~(1 << PCI_INTERRUPT_ENABLE),
2555 dev->rdk1.plx9054_base_addr + INTCSR);
2558 /* clean up resources allocated during probe() */
2559 iounmap(dev
->rdk2
.fpga_base_addr
);
2561 for (i
= 0; i
< 2; ++i
)
2562 release_mem_region(pci_resource_start(pdev
, i
),
2563 pci_resource_len(pdev
, i
));
2566 static void __devexit
2567 net2272_pci_remove(struct pci_dev
*pdev
)
2569 struct net2272
*dev
= pci_get_drvdata(pdev
);
2571 net2272_remove(dev
);
2573 switch (pdev
->device
) {
2574 case PCI_DEVICE_ID_RDK1
: net2272_rdk1_remove(pdev
, dev
); break;
2575 case PCI_DEVICE_ID_RDK2
: net2272_rdk2_remove(pdev
, dev
); break;
2579 pci_disable_device(pdev
);
2584 /* Table of matching PCI IDs */
2585 static struct pci_device_id __devinitdata pci_ids
[] = {
2587 .class = ((PCI_CLASS_BRIDGE_OTHER
<< 8) | 0xfe),
2589 .vendor
= PCI_VENDOR_ID_PLX
,
2590 .device
= PCI_DEVICE_ID_RDK1
,
2591 .subvendor
= PCI_ANY_ID
,
2592 .subdevice
= PCI_ANY_ID
,
2595 .class = ((PCI_CLASS_BRIDGE_OTHER
<< 8) | 0xfe),
2597 .vendor
= PCI_VENDOR_ID_PLX
,
2598 .device
= PCI_DEVICE_ID_RDK2
,
2599 .subvendor
= PCI_ANY_ID
,
2600 .subdevice
= PCI_ANY_ID
,
2604 MODULE_DEVICE_TABLE(pci
, pci_ids
);
2606 static struct pci_driver net2272_pci_driver
= {
2607 .name
= driver_name
,
2608 .id_table
= pci_ids
,
2610 .probe
= net2272_pci_probe
,
2611 .remove
= __devexit_p(net2272_pci_remove
),
2614 static int net2272_pci_register(void)
2616 return pci_register_driver(&net2272_pci_driver
);
2619 static void net2272_pci_unregister(void)
2621 pci_unregister_driver(&net2272_pci_driver
);
2625 static inline int net2272_pci_register(void) { return 0; }
2626 static inline void net2272_pci_unregister(void) { }
2629 /*---------------------------------------------------------------------------*/
2631 static int __devinit
2632 net2272_plat_probe(struct platform_device
*pdev
)
2634 struct net2272
*dev
;
2636 unsigned int irqflags
;
2637 resource_size_t base
, len
;
2638 struct resource
*iomem
, *iomem_bus
, *irq_res
;
2640 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
2641 iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2642 iomem_bus
= platform_get_resource(pdev
, IORESOURCE_BUS
, 0);
2643 if (!irq_res
|| !iomem
) {
2644 dev_err(&pdev
->dev
, "must provide irq/base addr");
2648 dev
= net2272_probe_init(&pdev
->dev
, irq_res
->start
);
2650 return PTR_ERR(dev
);
2653 if (irq_res
->flags
& IORESOURCE_IRQ_HIGHEDGE
)
2654 irqflags
|= IRQF_TRIGGER_RISING
;
2655 if (irq_res
->flags
& IORESOURCE_IRQ_LOWEDGE
)
2656 irqflags
|= IRQF_TRIGGER_FALLING
;
2657 if (irq_res
->flags
& IORESOURCE_IRQ_HIGHLEVEL
)
2658 irqflags
|= IRQF_TRIGGER_HIGH
;
2659 if (irq_res
->flags
& IORESOURCE_IRQ_LOWLEVEL
)
2660 irqflags
|= IRQF_TRIGGER_LOW
;
2662 base
= iomem
->start
;
2663 len
= resource_size(iomem
);
2665 dev
->base_shift
= iomem_bus
->start
;
2667 if (!request_mem_region(base
, len
, driver_name
)) {
2668 dev_dbg(dev
->dev
, "get request memory region!\n");
2672 dev
->base_addr
= ioremap_nocache(base
, len
);
2673 if (!dev
->base_addr
) {
2674 dev_dbg(dev
->dev
, "can't map memory\n");
2679 ret
= net2272_probe_fin(dev
, IRQF_TRIGGER_LOW
);
2683 platform_set_drvdata(pdev
, dev
);
2684 dev_info(&pdev
->dev
, "running in 16-bit, %sbyte swap local bus mode\n",
2685 (net2272_read(dev
, LOCCTL
) & (1 << BYTE_SWAP
)) ? "" : "no ");
2687 the_controller
= dev
;
2692 iounmap(dev
->base_addr
);
2694 release_mem_region(base
, len
);
2699 static int __devexit
2700 net2272_plat_remove(struct platform_device
*pdev
)
2702 struct net2272
*dev
= platform_get_drvdata(pdev
);
2704 net2272_remove(dev
);
2706 release_mem_region(pdev
->resource
[0].start
,
2707 resource_size(&pdev
->resource
[0]));
2714 static struct platform_driver net2272_plat_driver
= {
2715 .probe
= net2272_plat_probe
,
2716 .remove
= __devexit_p(net2272_plat_remove
),
2718 .name
= driver_name
,
2719 .owner
= THIS_MODULE
,
2721 /* FIXME .suspend, .resume */
2723 MODULE_ALIAS("platform:net2272");
2725 static int __init
net2272_init(void)
2729 ret
= net2272_pci_register();
2732 ret
= platform_driver_register(&net2272_plat_driver
);
2738 net2272_pci_unregister();
2741 module_init(net2272_init
);
2743 static void __exit
net2272_cleanup(void)
2745 net2272_pci_unregister();
2746 platform_driver_unregister(&net2272_plat_driver
);
2748 module_exit(net2272_cleanup
);
2750 MODULE_DESCRIPTION(DRIVER_DESC
);
2751 MODULE_AUTHOR("PLX Technology, Inc.");
2752 MODULE_LICENSE("GPL");