2 * Copyright (C) 2012 Red Hat
4 * based in parts on udlfb.c:
5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License v2. See the file COPYING in the main directory of this archive for
16 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
19 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
20 #define WRITES_IN_FLIGHT (4)
21 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
23 #define GET_URB_TIMEOUT HZ
24 #define FREE_URB_TIMEOUT (HZ*2)
26 static int udl_parse_vendor_descriptor(struct drm_device
*dev
,
27 struct usb_device
*usbdev
)
29 struct udl_device
*udl
= dev
->dev_private
;
36 buf
= kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE
, GFP_KERNEL
);
41 total_len
= usb_get_descriptor(usbdev
, 0x5f, /* vendor specific */
42 0, desc
, MAX_VENDOR_DESCRIPTOR_SIZE
);
44 DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
45 "%02x %02x %02x %02x %02x %02x %02x\n",
47 desc
[1], desc
[2], desc
[3], desc
[4], desc
[5], desc
[6],
48 desc
[7], desc
[8], desc
[9], desc
[10]);
50 if ((desc
[0] != total_len
) || /* descriptor length */
51 (desc
[1] != 0x5f) || /* vendor descriptor type */
52 (desc
[2] != 0x01) || /* version (2 bytes) */
54 (desc
[4] != total_len
- 2)) /* length after type */
57 desc_end
= desc
+ total_len
;
58 desc
+= 5; /* the fixed header we've already parsed */
60 while (desc
< desc_end
) {
64 key
= le16_to_cpu(*((u16
*) desc
));
70 case 0x0200: { /* max_area */
72 max_area
= le32_to_cpu(*((u32
*)desc
));
73 DRM_DEBUG("DL chip limited to %d pixel modes\n",
75 udl
->sku_pixel_limit
= max_area
;
88 /* allow udlfb to load for now even if firmware unrecognized */
89 DRM_ERROR("Unrecognized vendor firmware descriptor\n");
96 static void udl_release_urb_work(struct work_struct
*work
)
98 struct urb_node
*unode
= container_of(work
, struct urb_node
,
99 release_urb_work
.work
);
101 up(&unode
->dev
->urbs
.limit_sem
);
104 void udl_urb_completion(struct urb
*urb
)
106 struct urb_node
*unode
= urb
->context
;
107 struct udl_device
*udl
= unode
->dev
;
110 /* sync/async unlink faults aren't errors */
112 if (!(urb
->status
== -ENOENT
||
113 urb
->status
== -ECONNRESET
||
114 urb
->status
== -ESHUTDOWN
)) {
115 DRM_ERROR("%s - nonzero write bulk status received: %d\n",
116 __func__
, urb
->status
);
117 atomic_set(&udl
->lost_pixels
, 1);
121 urb
->transfer_buffer_length
= udl
->urbs
.size
; /* reset to actual */
123 spin_lock_irqsave(&udl
->urbs
.lock
, flags
);
124 list_add_tail(&unode
->entry
, &udl
->urbs
.list
);
125 udl
->urbs
.available
++;
126 spin_unlock_irqrestore(&udl
->urbs
.lock
, flags
);
130 * When using fb_defio, we deadlock if up() is called
131 * while another is waiting. So queue to another process.
134 schedule_delayed_work(&unode
->release_urb_work
, 0);
137 up(&udl
->urbs
.limit_sem
);
140 static void udl_free_urb_list(struct drm_device
*dev
)
142 struct udl_device
*udl
= dev
->dev_private
;
143 int count
= udl
->urbs
.count
;
144 struct list_head
*node
;
145 struct urb_node
*unode
;
150 DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
152 /* keep waiting and freeing, until we've got 'em all */
155 /* Getting interrupted means a leak, but ok at shutdown*/
156 ret
= down_interruptible(&udl
->urbs
.limit_sem
);
160 spin_lock_irqsave(&udl
->urbs
.lock
, flags
);
162 node
= udl
->urbs
.list
.next
; /* have reserved one with sem */
165 spin_unlock_irqrestore(&udl
->urbs
.lock
, flags
);
167 unode
= list_entry(node
, struct urb_node
, entry
);
170 /* Free each separately allocated piece */
171 usb_free_coherent(urb
->dev
, udl
->urbs
.size
,
172 urb
->transfer_buffer
, urb
->transfer_dma
);
179 static int udl_alloc_urb_list(struct drm_device
*dev
, int count
, size_t size
)
181 struct udl_device
*udl
= dev
->dev_private
;
184 struct urb_node
*unode
;
187 spin_lock_init(&udl
->urbs
.lock
);
189 udl
->urbs
.size
= size
;
190 INIT_LIST_HEAD(&udl
->urbs
.list
);
193 unode
= kzalloc(sizeof(struct urb_node
), GFP_KERNEL
);
198 INIT_DELAYED_WORK(&unode
->release_urb_work
,
199 udl_release_urb_work
);
201 urb
= usb_alloc_urb(0, GFP_KERNEL
);
208 buf
= usb_alloc_coherent(udl
->ddev
->usbdev
, MAX_TRANSFER
, GFP_KERNEL
,
216 /* urb->transfer_buffer_length set to actual before submit */
217 usb_fill_bulk_urb(urb
, udl
->ddev
->usbdev
, usb_sndbulkpipe(udl
->ddev
->usbdev
, 1),
218 buf
, size
, udl_urb_completion
, unode
);
219 urb
->transfer_flags
|= URB_NO_TRANSFER_DMA_MAP
;
221 list_add_tail(&unode
->entry
, &udl
->urbs
.list
);
226 sema_init(&udl
->urbs
.limit_sem
, i
);
228 udl
->urbs
.available
= i
;
230 DRM_DEBUG("allocated %d %d byte urbs\n", i
, (int) size
);
235 struct urb
*udl_get_urb(struct drm_device
*dev
)
237 struct udl_device
*udl
= dev
->dev_private
;
239 struct list_head
*entry
;
240 struct urb_node
*unode
;
241 struct urb
*urb
= NULL
;
244 /* Wait for an in-flight buffer to complete and get re-queued */
245 ret
= down_timeout(&udl
->urbs
.limit_sem
, GET_URB_TIMEOUT
);
247 atomic_set(&udl
->lost_pixels
, 1);
248 DRM_INFO("wait for urb interrupted: %x available: %d\n",
249 ret
, udl
->urbs
.available
);
253 spin_lock_irqsave(&udl
->urbs
.lock
, flags
);
255 BUG_ON(list_empty(&udl
->urbs
.list
)); /* reserved one with limit_sem */
256 entry
= udl
->urbs
.list
.next
;
257 list_del_init(entry
);
258 udl
->urbs
.available
--;
260 spin_unlock_irqrestore(&udl
->urbs
.lock
, flags
);
262 unode
= list_entry(entry
, struct urb_node
, entry
);
269 int udl_submit_urb(struct drm_device
*dev
, struct urb
*urb
, size_t len
)
271 struct udl_device
*udl
= dev
->dev_private
;
274 BUG_ON(len
> udl
->urbs
.size
);
276 urb
->transfer_buffer_length
= len
; /* set to actual payload len */
277 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
279 udl_urb_completion(urb
); /* because no one else will */
280 atomic_set(&udl
->lost_pixels
, 1);
281 DRM_ERROR("usb_submit_urb error %x\n", ret
);
286 int udl_driver_load(struct drm_device
*dev
, unsigned long flags
)
288 struct udl_device
*udl
;
292 udl
= kzalloc(sizeof(struct udl_device
), GFP_KERNEL
);
297 dev
->dev_private
= udl
;
299 if (!udl_parse_vendor_descriptor(dev
, dev
->usbdev
)) {
300 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
304 if (!udl_alloc_urb_list(dev
, WRITES_IN_FLIGHT
, MAX_TRANSFER
)) {
306 DRM_ERROR("udl_alloc_urb_list failed\n");
311 ret
= udl_modeset_init(dev
);
313 ret
= udl_fbdev_init(dev
);
317 DRM_ERROR("%d\n", ret
);
321 int udl_drop_usb(struct drm_device
*dev
)
323 udl_free_urb_list(dev
);
327 int udl_driver_unload(struct drm_device
*dev
)
329 struct udl_device
*udl
= dev
->dev_private
;
332 udl_free_urb_list(dev
);
334 udl_fbdev_cleanup(dev
);
335 udl_modeset_cleanup(dev
);