2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom make_request_fn function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <linux/backing-dev.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_ioctl.h>
67 #include <scsi/scsi.h>
68 #include <linux/debugfs.h>
69 #include <linux/device.h>
71 #include <linux/uaccess.h>
73 #define DRIVER_NAME "pktcdvd"
75 #define pkt_err(pd, fmt, ...) \
76 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_notice(pd, fmt, ...) \
78 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_info(pd, fmt, ...) \
80 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
82 #define pkt_dbg(level, pd, fmt, ...) \
84 if (level == 2 && PACKET_DEBUG >= 2) \
85 pr_notice("%s: %s():" fmt, \
86 pd->name, __func__, ##__VA_ARGS__); \
87 else if (level == 1 && PACKET_DEBUG >= 1) \
88 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
91 #define MAX_SPEED 0xffff
93 static DEFINE_MUTEX(pktcdvd_mutex
);
94 static struct pktcdvd_device
*pkt_devs
[MAX_WRITERS
];
95 static struct proc_dir_entry
*pkt_proc
;
96 static int pktdev_major
;
97 static int write_congestion_on
= PKT_WRITE_CONGESTION_ON
;
98 static int write_congestion_off
= PKT_WRITE_CONGESTION_OFF
;
99 static struct mutex ctl_mutex
; /* Serialize open/close/setup/teardown */
100 static mempool_t
*psd_pool
;
102 static struct class *class_pktcdvd
= NULL
; /* /sys/class/pktcdvd */
103 static struct dentry
*pkt_debugfs_root
= NULL
; /* /sys/kernel/debug/pktcdvd */
105 /* forward declaration */
106 static int pkt_setup_dev(dev_t dev
, dev_t
* pkt_dev
);
107 static int pkt_remove_dev(dev_t pkt_dev
);
108 static int pkt_seq_show(struct seq_file
*m
, void *p
);
110 static sector_t
get_zone(sector_t sector
, struct pktcdvd_device
*pd
)
112 return (sector
+ pd
->offset
) & ~(sector_t
)(pd
->settings
.size
- 1);
116 * create and register a pktcdvd kernel object.
118 static struct pktcdvd_kobj
* pkt_kobj_create(struct pktcdvd_device
*pd
,
120 struct kobject
* parent
,
121 struct kobj_type
* ktype
)
123 struct pktcdvd_kobj
*p
;
126 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
130 error
= kobject_init_and_add(&p
->kobj
, ktype
, parent
, "%s", name
);
132 kobject_put(&p
->kobj
);
135 kobject_uevent(&p
->kobj
, KOBJ_ADD
);
139 * remove a pktcdvd kernel object.
141 static void pkt_kobj_remove(struct pktcdvd_kobj
*p
)
144 kobject_put(&p
->kobj
);
147 * default release function for pktcdvd kernel objects.
149 static void pkt_kobj_release(struct kobject
*kobj
)
151 kfree(to_pktcdvdkobj(kobj
));
155 /**********************************************************
157 * sysfs interface for pktcdvd
158 * by (C) 2006 Thomas Maier <balagi@justmail.de>
160 **********************************************************/
162 #define DEF_ATTR(_obj,_name,_mode) \
163 static struct attribute _obj = { .name = _name, .mode = _mode }
165 /**********************************************************
166 /sys/class/pktcdvd/pktcdvd[0-7]/
169 stat/packets_finished
174 write_queue/congestion_off
175 write_queue/congestion_on
176 **********************************************************/
178 DEF_ATTR(kobj_pkt_attr_st1
, "reset", 0200);
179 DEF_ATTR(kobj_pkt_attr_st2
, "packets_started", 0444);
180 DEF_ATTR(kobj_pkt_attr_st3
, "packets_finished", 0444);
181 DEF_ATTR(kobj_pkt_attr_st4
, "kb_written", 0444);
182 DEF_ATTR(kobj_pkt_attr_st5
, "kb_read", 0444);
183 DEF_ATTR(kobj_pkt_attr_st6
, "kb_read_gather", 0444);
185 static struct attribute
*kobj_pkt_attrs_stat
[] = {
195 DEF_ATTR(kobj_pkt_attr_wq1
, "size", 0444);
196 DEF_ATTR(kobj_pkt_attr_wq2
, "congestion_off", 0644);
197 DEF_ATTR(kobj_pkt_attr_wq3
, "congestion_on", 0644);
199 static struct attribute
*kobj_pkt_attrs_wqueue
[] = {
206 static ssize_t
kobj_pkt_show(struct kobject
*kobj
,
207 struct attribute
*attr
, char *data
)
209 struct pktcdvd_device
*pd
= to_pktcdvdkobj(kobj
)->pd
;
212 if (strcmp(attr
->name
, "packets_started") == 0) {
213 n
= sprintf(data
, "%lu\n", pd
->stats
.pkt_started
);
215 } else if (strcmp(attr
->name
, "packets_finished") == 0) {
216 n
= sprintf(data
, "%lu\n", pd
->stats
.pkt_ended
);
218 } else if (strcmp(attr
->name
, "kb_written") == 0) {
219 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_w
>> 1);
221 } else if (strcmp(attr
->name
, "kb_read") == 0) {
222 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_r
>> 1);
224 } else if (strcmp(attr
->name
, "kb_read_gather") == 0) {
225 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_rg
>> 1);
227 } else if (strcmp(attr
->name
, "size") == 0) {
228 spin_lock(&pd
->lock
);
229 v
= pd
->bio_queue_size
;
230 spin_unlock(&pd
->lock
);
231 n
= sprintf(data
, "%d\n", v
);
233 } else if (strcmp(attr
->name
, "congestion_off") == 0) {
234 spin_lock(&pd
->lock
);
235 v
= pd
->write_congestion_off
;
236 spin_unlock(&pd
->lock
);
237 n
= sprintf(data
, "%d\n", v
);
239 } else if (strcmp(attr
->name
, "congestion_on") == 0) {
240 spin_lock(&pd
->lock
);
241 v
= pd
->write_congestion_on
;
242 spin_unlock(&pd
->lock
);
243 n
= sprintf(data
, "%d\n", v
);
248 static void init_write_congestion_marks(int* lo
, int* hi
)
252 *hi
= min(*hi
, 1000000);
256 *lo
= min(*lo
, *hi
- 100);
265 static ssize_t
kobj_pkt_store(struct kobject
*kobj
,
266 struct attribute
*attr
,
267 const char *data
, size_t len
)
269 struct pktcdvd_device
*pd
= to_pktcdvdkobj(kobj
)->pd
;
272 if (strcmp(attr
->name
, "reset") == 0 && len
> 0) {
273 pd
->stats
.pkt_started
= 0;
274 pd
->stats
.pkt_ended
= 0;
275 pd
->stats
.secs_w
= 0;
276 pd
->stats
.secs_rg
= 0;
277 pd
->stats
.secs_r
= 0;
279 } else if (strcmp(attr
->name
, "congestion_off") == 0
280 && sscanf(data
, "%d", &val
) == 1) {
281 spin_lock(&pd
->lock
);
282 pd
->write_congestion_off
= val
;
283 init_write_congestion_marks(&pd
->write_congestion_off
,
284 &pd
->write_congestion_on
);
285 spin_unlock(&pd
->lock
);
287 } else if (strcmp(attr
->name
, "congestion_on") == 0
288 && sscanf(data
, "%d", &val
) == 1) {
289 spin_lock(&pd
->lock
);
290 pd
->write_congestion_on
= val
;
291 init_write_congestion_marks(&pd
->write_congestion_off
,
292 &pd
->write_congestion_on
);
293 spin_unlock(&pd
->lock
);
298 static const struct sysfs_ops kobj_pkt_ops
= {
299 .show
= kobj_pkt_show
,
300 .store
= kobj_pkt_store
302 static struct kobj_type kobj_pkt_type_stat
= {
303 .release
= pkt_kobj_release
,
304 .sysfs_ops
= &kobj_pkt_ops
,
305 .default_attrs
= kobj_pkt_attrs_stat
307 static struct kobj_type kobj_pkt_type_wqueue
= {
308 .release
= pkt_kobj_release
,
309 .sysfs_ops
= &kobj_pkt_ops
,
310 .default_attrs
= kobj_pkt_attrs_wqueue
313 static void pkt_sysfs_dev_new(struct pktcdvd_device
*pd
)
316 pd
->dev
= device_create(class_pktcdvd
, NULL
, MKDEV(0, 0), NULL
,
322 pd
->kobj_stat
= pkt_kobj_create(pd
, "stat",
324 &kobj_pkt_type_stat
);
325 pd
->kobj_wqueue
= pkt_kobj_create(pd
, "write_queue",
327 &kobj_pkt_type_wqueue
);
331 static void pkt_sysfs_dev_remove(struct pktcdvd_device
*pd
)
333 pkt_kobj_remove(pd
->kobj_stat
);
334 pkt_kobj_remove(pd
->kobj_wqueue
);
336 device_unregister(pd
->dev
);
340 /********************************************************************
343 remove unmap packet dev
344 device_map show mappings
345 *******************************************************************/
347 static void class_pktcdvd_release(struct class *cls
)
351 static ssize_t
class_pktcdvd_show_map(struct class *c
,
352 struct class_attribute
*attr
,
357 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
358 for (idx
= 0; idx
< MAX_WRITERS
; idx
++) {
359 struct pktcdvd_device
*pd
= pkt_devs
[idx
];
362 n
+= sprintf(data
+n
, "%s %u:%u %u:%u\n",
364 MAJOR(pd
->pkt_dev
), MINOR(pd
->pkt_dev
),
365 MAJOR(pd
->bdev
->bd_dev
),
366 MINOR(pd
->bdev
->bd_dev
));
368 mutex_unlock(&ctl_mutex
);
372 static ssize_t
class_pktcdvd_store_add(struct class *c
,
373 struct class_attribute
*attr
,
377 unsigned int major
, minor
;
379 if (sscanf(buf
, "%u:%u", &major
, &minor
) == 2) {
380 /* pkt_setup_dev() expects caller to hold reference to self */
381 if (!try_module_get(THIS_MODULE
))
384 pkt_setup_dev(MKDEV(major
, minor
), NULL
);
386 module_put(THIS_MODULE
);
394 static ssize_t
class_pktcdvd_store_remove(struct class *c
,
395 struct class_attribute
*attr
,
399 unsigned int major
, minor
;
400 if (sscanf(buf
, "%u:%u", &major
, &minor
) == 2) {
401 pkt_remove_dev(MKDEV(major
, minor
));
407 static struct class_attribute class_pktcdvd_attrs
[] = {
408 __ATTR(add
, 0200, NULL
, class_pktcdvd_store_add
),
409 __ATTR(remove
, 0200, NULL
, class_pktcdvd_store_remove
),
410 __ATTR(device_map
, 0444, class_pktcdvd_show_map
, NULL
),
415 static int pkt_sysfs_init(void)
420 * create control files in sysfs
421 * /sys/class/pktcdvd/...
423 class_pktcdvd
= kzalloc(sizeof(*class_pktcdvd
), GFP_KERNEL
);
426 class_pktcdvd
->name
= DRIVER_NAME
;
427 class_pktcdvd
->owner
= THIS_MODULE
;
428 class_pktcdvd
->class_release
= class_pktcdvd_release
;
429 class_pktcdvd
->class_attrs
= class_pktcdvd_attrs
;
430 ret
= class_register(class_pktcdvd
);
432 kfree(class_pktcdvd
);
433 class_pktcdvd
= NULL
;
434 pr_err("failed to create class pktcdvd\n");
440 static void pkt_sysfs_cleanup(void)
443 class_destroy(class_pktcdvd
);
444 class_pktcdvd
= NULL
;
447 /********************************************************************
450 /sys/kernel/debug/pktcdvd[0-7]/
453 *******************************************************************/
455 static int pkt_debugfs_seq_show(struct seq_file
*m
, void *p
)
457 return pkt_seq_show(m
, p
);
460 static int pkt_debugfs_fops_open(struct inode
*inode
, struct file
*file
)
462 return single_open(file
, pkt_debugfs_seq_show
, inode
->i_private
);
465 static const struct file_operations debug_fops
= {
466 .open
= pkt_debugfs_fops_open
,
469 .release
= single_release
,
470 .owner
= THIS_MODULE
,
473 static void pkt_debugfs_dev_new(struct pktcdvd_device
*pd
)
475 if (!pkt_debugfs_root
)
477 pd
->dfs_d_root
= debugfs_create_dir(pd
->name
, pkt_debugfs_root
);
481 pd
->dfs_f_info
= debugfs_create_file("info", S_IRUGO
,
482 pd
->dfs_d_root
, pd
, &debug_fops
);
485 static void pkt_debugfs_dev_remove(struct pktcdvd_device
*pd
)
487 if (!pkt_debugfs_root
)
489 debugfs_remove(pd
->dfs_f_info
);
490 debugfs_remove(pd
->dfs_d_root
);
491 pd
->dfs_f_info
= NULL
;
492 pd
->dfs_d_root
= NULL
;
495 static void pkt_debugfs_init(void)
497 pkt_debugfs_root
= debugfs_create_dir(DRIVER_NAME
, NULL
);
500 static void pkt_debugfs_cleanup(void)
502 debugfs_remove(pkt_debugfs_root
);
503 pkt_debugfs_root
= NULL
;
506 /* ----------------------------------------------------------*/
509 static void pkt_bio_finished(struct pktcdvd_device
*pd
)
511 BUG_ON(atomic_read(&pd
->cdrw
.pending_bios
) <= 0);
512 if (atomic_dec_and_test(&pd
->cdrw
.pending_bios
)) {
513 pkt_dbg(2, pd
, "queue empty\n");
514 atomic_set(&pd
->iosched
.attention
, 1);
515 wake_up(&pd
->wqueue
);
520 * Allocate a packet_data struct
522 static struct packet_data
*pkt_alloc_packet_data(int frames
)
525 struct packet_data
*pkt
;
527 pkt
= kzalloc(sizeof(struct packet_data
), GFP_KERNEL
);
531 pkt
->frames
= frames
;
532 pkt
->w_bio
= bio_kmalloc(GFP_KERNEL
, frames
);
536 for (i
= 0; i
< frames
/ FRAMES_PER_PAGE
; i
++) {
537 pkt
->pages
[i
] = alloc_page(GFP_KERNEL
|__GFP_ZERO
);
542 spin_lock_init(&pkt
->lock
);
543 bio_list_init(&pkt
->orig_bios
);
545 for (i
= 0; i
< frames
; i
++) {
546 struct bio
*bio
= bio_kmalloc(GFP_KERNEL
, 1);
550 pkt
->r_bios
[i
] = bio
;
556 for (i
= 0; i
< frames
; i
++) {
557 struct bio
*bio
= pkt
->r_bios
[i
];
563 for (i
= 0; i
< frames
/ FRAMES_PER_PAGE
; i
++)
565 __free_page(pkt
->pages
[i
]);
574 * Free a packet_data struct
576 static void pkt_free_packet_data(struct packet_data
*pkt
)
580 for (i
= 0; i
< pkt
->frames
; i
++) {
581 struct bio
*bio
= pkt
->r_bios
[i
];
585 for (i
= 0; i
< pkt
->frames
/ FRAMES_PER_PAGE
; i
++)
586 __free_page(pkt
->pages
[i
]);
591 static void pkt_shrink_pktlist(struct pktcdvd_device
*pd
)
593 struct packet_data
*pkt
, *next
;
595 BUG_ON(!list_empty(&pd
->cdrw
.pkt_active_list
));
597 list_for_each_entry_safe(pkt
, next
, &pd
->cdrw
.pkt_free_list
, list
) {
598 pkt_free_packet_data(pkt
);
600 INIT_LIST_HEAD(&pd
->cdrw
.pkt_free_list
);
603 static int pkt_grow_pktlist(struct pktcdvd_device
*pd
, int nr_packets
)
605 struct packet_data
*pkt
;
607 BUG_ON(!list_empty(&pd
->cdrw
.pkt_free_list
));
609 while (nr_packets
> 0) {
610 pkt
= pkt_alloc_packet_data(pd
->settings
.size
>> 2);
612 pkt_shrink_pktlist(pd
);
615 pkt
->id
= nr_packets
;
617 list_add(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
623 static inline struct pkt_rb_node
*pkt_rbtree_next(struct pkt_rb_node
*node
)
625 struct rb_node
*n
= rb_next(&node
->rb_node
);
628 return rb_entry(n
, struct pkt_rb_node
, rb_node
);
631 static void pkt_rbtree_erase(struct pktcdvd_device
*pd
, struct pkt_rb_node
*node
)
633 rb_erase(&node
->rb_node
, &pd
->bio_queue
);
634 mempool_free(node
, pd
->rb_pool
);
635 pd
->bio_queue_size
--;
636 BUG_ON(pd
->bio_queue_size
< 0);
640 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
642 static struct pkt_rb_node
*pkt_rbtree_find(struct pktcdvd_device
*pd
, sector_t s
)
644 struct rb_node
*n
= pd
->bio_queue
.rb_node
;
645 struct rb_node
*next
;
646 struct pkt_rb_node
*tmp
;
649 BUG_ON(pd
->bio_queue_size
> 0);
654 tmp
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
655 if (s
<= tmp
->bio
->bi_iter
.bi_sector
)
664 if (s
> tmp
->bio
->bi_iter
.bi_sector
) {
665 tmp
= pkt_rbtree_next(tmp
);
669 BUG_ON(s
> tmp
->bio
->bi_iter
.bi_sector
);
674 * Insert a node into the pd->bio_queue rb tree.
676 static void pkt_rbtree_insert(struct pktcdvd_device
*pd
, struct pkt_rb_node
*node
)
678 struct rb_node
**p
= &pd
->bio_queue
.rb_node
;
679 struct rb_node
*parent
= NULL
;
680 sector_t s
= node
->bio
->bi_iter
.bi_sector
;
681 struct pkt_rb_node
*tmp
;
685 tmp
= rb_entry(parent
, struct pkt_rb_node
, rb_node
);
686 if (s
< tmp
->bio
->bi_iter
.bi_sector
)
691 rb_link_node(&node
->rb_node
, parent
, p
);
692 rb_insert_color(&node
->rb_node
, &pd
->bio_queue
);
693 pd
->bio_queue_size
++;
697 * Send a packet_command to the underlying block device and
698 * wait for completion.
700 static int pkt_generic_packet(struct pktcdvd_device
*pd
, struct packet_command
*cgc
)
702 struct request_queue
*q
= bdev_get_queue(pd
->bdev
);
706 rq
= blk_get_request(q
, (cgc
->data_direction
== CGC_DATA_WRITE
) ?
707 REQ_OP_SCSI_OUT
: REQ_OP_SCSI_IN
, __GFP_RECLAIM
);
713 ret
= blk_rq_map_kern(q
, rq
, cgc
->buffer
, cgc
->buflen
,
719 scsi_req(rq
)->cmd_len
= COMMAND_SIZE(cgc
->cmd
[0]);
720 memcpy(scsi_req(rq
)->cmd
, cgc
->cmd
, CDROM_PACKET_SIZE
);
724 rq
->rq_flags
|= RQF_QUIET
;
726 blk_execute_rq(rq
->q
, pd
->bdev
->bd_disk
, rq
, 0);
727 if (scsi_req(rq
)->result
)
734 static const char *sense_key_string(__u8 index
)
736 static const char * const info
[] = {
737 "No sense", "Recovered error", "Not ready",
738 "Medium error", "Hardware error", "Illegal request",
739 "Unit attention", "Data protect", "Blank check",
742 return index
< ARRAY_SIZE(info
) ? info
[index
] : "INVALID";
746 * A generic sense dump / resolve mechanism should be implemented across
747 * all ATAPI + SCSI devices.
749 static void pkt_dump_sense(struct pktcdvd_device
*pd
,
750 struct packet_command
*cgc
)
752 struct request_sense
*sense
= cgc
->sense
;
755 pkt_err(pd
, "%*ph - sense %02x.%02x.%02x (%s)\n",
756 CDROM_PACKET_SIZE
, cgc
->cmd
,
757 sense
->sense_key
, sense
->asc
, sense
->ascq
,
758 sense_key_string(sense
->sense_key
));
760 pkt_err(pd
, "%*ph - no sense\n", CDROM_PACKET_SIZE
, cgc
->cmd
);
764 * flush the drive cache to media
766 static int pkt_flush_cache(struct pktcdvd_device
*pd
)
768 struct packet_command cgc
;
770 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
771 cgc
.cmd
[0] = GPCMD_FLUSH_CACHE
;
775 * the IMMED bit -- we default to not setting it, although that
776 * would allow a much faster close, this is safer
781 return pkt_generic_packet(pd
, &cgc
);
785 * speed is given as the normal factor, e.g. 4 for 4x
787 static noinline_for_stack
int pkt_set_speed(struct pktcdvd_device
*pd
,
788 unsigned write_speed
, unsigned read_speed
)
790 struct packet_command cgc
;
791 struct request_sense sense
;
794 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
796 cgc
.cmd
[0] = GPCMD_SET_SPEED
;
797 cgc
.cmd
[2] = (read_speed
>> 8) & 0xff;
798 cgc
.cmd
[3] = read_speed
& 0xff;
799 cgc
.cmd
[4] = (write_speed
>> 8) & 0xff;
800 cgc
.cmd
[5] = write_speed
& 0xff;
802 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
803 pkt_dump_sense(pd
, &cgc
);
809 * Queue a bio for processing by the low-level CD device. Must be called
810 * from process context.
812 static void pkt_queue_bio(struct pktcdvd_device
*pd
, struct bio
*bio
)
814 spin_lock(&pd
->iosched
.lock
);
815 if (bio_data_dir(bio
) == READ
)
816 bio_list_add(&pd
->iosched
.read_queue
, bio
);
818 bio_list_add(&pd
->iosched
.write_queue
, bio
);
819 spin_unlock(&pd
->iosched
.lock
);
821 atomic_set(&pd
->iosched
.attention
, 1);
822 wake_up(&pd
->wqueue
);
826 * Process the queued read/write requests. This function handles special
827 * requirements for CDRW drives:
828 * - A cache flush command must be inserted before a read request if the
829 * previous request was a write.
830 * - Switching between reading and writing is slow, so don't do it more often
832 * - Optimize for throughput at the expense of latency. This means that streaming
833 * writes will never be interrupted by a read, but if the drive has to seek
834 * before the next write, switch to reading instead if there are any pending
836 * - Set the read speed according to current usage pattern. When only reading
837 * from the device, it's best to use the highest possible read speed, but
838 * when switching often between reading and writing, it's better to have the
839 * same read and write speeds.
841 static void pkt_iosched_process_queue(struct pktcdvd_device
*pd
)
844 if (atomic_read(&pd
->iosched
.attention
) == 0)
846 atomic_set(&pd
->iosched
.attention
, 0);
850 int reads_queued
, writes_queued
;
852 spin_lock(&pd
->iosched
.lock
);
853 reads_queued
= !bio_list_empty(&pd
->iosched
.read_queue
);
854 writes_queued
= !bio_list_empty(&pd
->iosched
.write_queue
);
855 spin_unlock(&pd
->iosched
.lock
);
857 if (!reads_queued
&& !writes_queued
)
860 if (pd
->iosched
.writing
) {
861 int need_write_seek
= 1;
862 spin_lock(&pd
->iosched
.lock
);
863 bio
= bio_list_peek(&pd
->iosched
.write_queue
);
864 spin_unlock(&pd
->iosched
.lock
);
865 if (bio
&& (bio
->bi_iter
.bi_sector
==
866 pd
->iosched
.last_write
))
868 if (need_write_seek
&& reads_queued
) {
869 if (atomic_read(&pd
->cdrw
.pending_bios
) > 0) {
870 pkt_dbg(2, pd
, "write, waiting\n");
874 pd
->iosched
.writing
= 0;
877 if (!reads_queued
&& writes_queued
) {
878 if (atomic_read(&pd
->cdrw
.pending_bios
) > 0) {
879 pkt_dbg(2, pd
, "read, waiting\n");
882 pd
->iosched
.writing
= 1;
886 spin_lock(&pd
->iosched
.lock
);
887 if (pd
->iosched
.writing
)
888 bio
= bio_list_pop(&pd
->iosched
.write_queue
);
890 bio
= bio_list_pop(&pd
->iosched
.read_queue
);
891 spin_unlock(&pd
->iosched
.lock
);
896 if (bio_data_dir(bio
) == READ
)
897 pd
->iosched
.successive_reads
+=
898 bio
->bi_iter
.bi_size
>> 10;
900 pd
->iosched
.successive_reads
= 0;
901 pd
->iosched
.last_write
= bio_end_sector(bio
);
903 if (pd
->iosched
.successive_reads
>= HI_SPEED_SWITCH
) {
904 if (pd
->read_speed
== pd
->write_speed
) {
905 pd
->read_speed
= MAX_SPEED
;
906 pkt_set_speed(pd
, pd
->write_speed
, pd
->read_speed
);
909 if (pd
->read_speed
!= pd
->write_speed
) {
910 pd
->read_speed
= pd
->write_speed
;
911 pkt_set_speed(pd
, pd
->write_speed
, pd
->read_speed
);
915 atomic_inc(&pd
->cdrw
.pending_bios
);
916 generic_make_request(bio
);
921 * Special care is needed if the underlying block device has a small
922 * max_phys_segments value.
924 static int pkt_set_segment_merging(struct pktcdvd_device
*pd
, struct request_queue
*q
)
926 if ((pd
->settings
.size
<< 9) / CD_FRAMESIZE
927 <= queue_max_segments(q
)) {
929 * The cdrom device can handle one segment/frame
931 clear_bit(PACKET_MERGE_SEGS
, &pd
->flags
);
933 } else if ((pd
->settings
.size
<< 9) / PAGE_SIZE
934 <= queue_max_segments(q
)) {
936 * We can handle this case at the expense of some extra memory
937 * copies during write operations
939 set_bit(PACKET_MERGE_SEGS
, &pd
->flags
);
942 pkt_err(pd
, "cdrom max_phys_segments too small\n");
947 static void pkt_end_io_read(struct bio
*bio
)
949 struct packet_data
*pkt
= bio
->bi_private
;
950 struct pktcdvd_device
*pd
= pkt
->pd
;
953 pkt_dbg(2, pd
, "bio=%p sec0=%llx sec=%llx err=%d\n",
954 bio
, (unsigned long long)pkt
->sector
,
955 (unsigned long long)bio
->bi_iter
.bi_sector
, bio
->bi_error
);
958 atomic_inc(&pkt
->io_errors
);
959 if (atomic_dec_and_test(&pkt
->io_wait
)) {
960 atomic_inc(&pkt
->run_sm
);
961 wake_up(&pd
->wqueue
);
963 pkt_bio_finished(pd
);
966 static void pkt_end_io_packet_write(struct bio
*bio
)
968 struct packet_data
*pkt
= bio
->bi_private
;
969 struct pktcdvd_device
*pd
= pkt
->pd
;
972 pkt_dbg(2, pd
, "id=%d, err=%d\n", pkt
->id
, bio
->bi_error
);
974 pd
->stats
.pkt_ended
++;
976 pkt_bio_finished(pd
);
977 atomic_dec(&pkt
->io_wait
);
978 atomic_inc(&pkt
->run_sm
);
979 wake_up(&pd
->wqueue
);
983 * Schedule reads for the holes in a packet
985 static void pkt_gather_data(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
990 char written
[PACKET_MAX_SIZE
];
992 BUG_ON(bio_list_empty(&pkt
->orig_bios
));
994 atomic_set(&pkt
->io_wait
, 0);
995 atomic_set(&pkt
->io_errors
, 0);
998 * Figure out which frames we need to read before we can write.
1000 memset(written
, 0, sizeof(written
));
1001 spin_lock(&pkt
->lock
);
1002 bio_list_for_each(bio
, &pkt
->orig_bios
) {
1003 int first_frame
= (bio
->bi_iter
.bi_sector
- pkt
->sector
) /
1004 (CD_FRAMESIZE
>> 9);
1005 int num_frames
= bio
->bi_iter
.bi_size
/ CD_FRAMESIZE
;
1006 pd
->stats
.secs_w
+= num_frames
* (CD_FRAMESIZE
>> 9);
1007 BUG_ON(first_frame
< 0);
1008 BUG_ON(first_frame
+ num_frames
> pkt
->frames
);
1009 for (f
= first_frame
; f
< first_frame
+ num_frames
; f
++)
1012 spin_unlock(&pkt
->lock
);
1014 if (pkt
->cache_valid
) {
1015 pkt_dbg(2, pd
, "zone %llx cached\n",
1016 (unsigned long long)pkt
->sector
);
1021 * Schedule reads for missing parts of the packet.
1023 for (f
= 0; f
< pkt
->frames
; f
++) {
1029 bio
= pkt
->r_bios
[f
];
1031 bio
->bi_iter
.bi_sector
= pkt
->sector
+ f
* (CD_FRAMESIZE
>> 9);
1032 bio
->bi_bdev
= pd
->bdev
;
1033 bio
->bi_end_io
= pkt_end_io_read
;
1034 bio
->bi_private
= pkt
;
1036 p
= (f
* CD_FRAMESIZE
) / PAGE_SIZE
;
1037 offset
= (f
* CD_FRAMESIZE
) % PAGE_SIZE
;
1038 pkt_dbg(2, pd
, "Adding frame %d, page:%p offs:%d\n",
1039 f
, pkt
->pages
[p
], offset
);
1040 if (!bio_add_page(bio
, pkt
->pages
[p
], CD_FRAMESIZE
, offset
))
1043 atomic_inc(&pkt
->io_wait
);
1044 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
1045 pkt_queue_bio(pd
, bio
);
1050 pkt_dbg(2, pd
, "need %d frames for zone %llx\n",
1051 frames_read
, (unsigned long long)pkt
->sector
);
1052 pd
->stats
.pkt_started
++;
1053 pd
->stats
.secs_rg
+= frames_read
* (CD_FRAMESIZE
>> 9);
1057 * Find a packet matching zone, or the least recently used packet if
1058 * there is no match.
1060 static struct packet_data
*pkt_get_packet_data(struct pktcdvd_device
*pd
, int zone
)
1062 struct packet_data
*pkt
;
1064 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_free_list
, list
) {
1065 if (pkt
->sector
== zone
|| pkt
->list
.next
== &pd
->cdrw
.pkt_free_list
) {
1066 list_del_init(&pkt
->list
);
1067 if (pkt
->sector
!= zone
)
1068 pkt
->cache_valid
= 0;
1076 static void pkt_put_packet_data(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1078 if (pkt
->cache_valid
) {
1079 list_add(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
1081 list_add_tail(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
1086 * recover a failed write, query for relocation if possible
1088 * returns 1 if recovery is possible, or 0 if not
1091 static int pkt_start_recovery(struct packet_data
*pkt
)
1094 * FIXME. We need help from the file system to implement
1095 * recovery handling.
1099 struct request
*rq
= pkt
->rq
;
1100 struct pktcdvd_device
*pd
= rq
->rq_disk
->private_data
;
1101 struct block_device
*pkt_bdev
;
1102 struct super_block
*sb
= NULL
;
1103 unsigned long old_block
, new_block
;
1104 sector_t new_sector
;
1106 pkt_bdev
= bdget(kdev_t_to_nr(pd
->pkt_dev
));
1108 sb
= get_super(pkt_bdev
);
1115 if (!sb
->s_op
->relocate_blocks
)
1118 old_block
= pkt
->sector
/ (CD_FRAMESIZE
>> 9);
1119 if (sb
->s_op
->relocate_blocks(sb
, old_block
, &new_block
))
1122 new_sector
= new_block
* (CD_FRAMESIZE
>> 9);
1123 pkt
->sector
= new_sector
;
1125 bio_reset(pkt
->bio
);
1126 pkt
->bio
->bi_bdev
= pd
->bdev
;
1127 bio_set_op_attrs(pkt
->bio
, REQ_OP_WRITE
, 0);
1128 pkt
->bio
->bi_iter
.bi_sector
= new_sector
;
1129 pkt
->bio
->bi_iter
.bi_size
= pkt
->frames
* CD_FRAMESIZE
;
1130 pkt
->bio
->bi_vcnt
= pkt
->frames
;
1132 pkt
->bio
->bi_end_io
= pkt_end_io_packet_write
;
1133 pkt
->bio
->bi_private
= pkt
;
1144 static inline void pkt_set_state(struct packet_data
*pkt
, enum packet_data_state state
)
1146 #if PACKET_DEBUG > 1
1147 static const char *state_name
[] = {
1148 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1150 enum packet_data_state old_state
= pkt
->state
;
1151 pkt_dbg(2, pd
, "pkt %2d : s=%6llx %s -> %s\n",
1152 pkt
->id
, (unsigned long long)pkt
->sector
,
1153 state_name
[old_state
], state_name
[state
]);
1159 * Scan the work queue to see if we can start a new packet.
1160 * returns non-zero if any work was done.
1162 static int pkt_handle_queue(struct pktcdvd_device
*pd
)
1164 struct packet_data
*pkt
, *p
;
1165 struct bio
*bio
= NULL
;
1166 sector_t zone
= 0; /* Suppress gcc warning */
1167 struct pkt_rb_node
*node
, *first_node
;
1171 atomic_set(&pd
->scan_queue
, 0);
1173 if (list_empty(&pd
->cdrw
.pkt_free_list
)) {
1174 pkt_dbg(2, pd
, "no pkt\n");
1179 * Try to find a zone we are not already working on.
1181 spin_lock(&pd
->lock
);
1182 first_node
= pkt_rbtree_find(pd
, pd
->current_sector
);
1184 n
= rb_first(&pd
->bio_queue
);
1186 first_node
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
1191 zone
= get_zone(bio
->bi_iter
.bi_sector
, pd
);
1192 list_for_each_entry(p
, &pd
->cdrw
.pkt_active_list
, list
) {
1193 if (p
->sector
== zone
) {
1200 node
= pkt_rbtree_next(node
);
1202 n
= rb_first(&pd
->bio_queue
);
1204 node
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
1206 if (node
== first_node
)
1209 spin_unlock(&pd
->lock
);
1211 pkt_dbg(2, pd
, "no bio\n");
1215 pkt
= pkt_get_packet_data(pd
, zone
);
1217 pd
->current_sector
= zone
+ pd
->settings
.size
;
1219 BUG_ON(pkt
->frames
!= pd
->settings
.size
>> 2);
1220 pkt
->write_size
= 0;
1223 * Scan work queue for bios in the same zone and link them
1226 spin_lock(&pd
->lock
);
1227 pkt_dbg(2, pd
, "looking for zone %llx\n", (unsigned long long)zone
);
1228 while ((node
= pkt_rbtree_find(pd
, zone
)) != NULL
) {
1230 pkt_dbg(2, pd
, "found zone=%llx\n", (unsigned long long)
1231 get_zone(bio
->bi_iter
.bi_sector
, pd
));
1232 if (get_zone(bio
->bi_iter
.bi_sector
, pd
) != zone
)
1234 pkt_rbtree_erase(pd
, node
);
1235 spin_lock(&pkt
->lock
);
1236 bio_list_add(&pkt
->orig_bios
, bio
);
1237 pkt
->write_size
+= bio
->bi_iter
.bi_size
/ CD_FRAMESIZE
;
1238 spin_unlock(&pkt
->lock
);
1240 /* check write congestion marks, and if bio_queue_size is
1241 below, wake up any waiters */
1242 wakeup
= (pd
->write_congestion_on
> 0
1243 && pd
->bio_queue_size
<= pd
->write_congestion_off
);
1244 spin_unlock(&pd
->lock
);
1246 clear_bdi_congested(pd
->disk
->queue
->backing_dev_info
,
1250 pkt
->sleep_time
= max(PACKET_WAIT_TIME
, 1);
1251 pkt_set_state(pkt
, PACKET_WAITING_STATE
);
1252 atomic_set(&pkt
->run_sm
, 1);
1254 spin_lock(&pd
->cdrw
.active_list_lock
);
1255 list_add(&pkt
->list
, &pd
->cdrw
.pkt_active_list
);
1256 spin_unlock(&pd
->cdrw
.active_list_lock
);
1262 * Assemble a bio to write one packet and queue the bio for processing
1263 * by the underlying block device.
1265 static void pkt_start_write(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1269 bio_reset(pkt
->w_bio
);
1270 pkt
->w_bio
->bi_iter
.bi_sector
= pkt
->sector
;
1271 pkt
->w_bio
->bi_bdev
= pd
->bdev
;
1272 pkt
->w_bio
->bi_end_io
= pkt_end_io_packet_write
;
1273 pkt
->w_bio
->bi_private
= pkt
;
1276 for (f
= 0; f
< pkt
->frames
; f
++) {
1277 struct page
*page
= pkt
->pages
[(f
* CD_FRAMESIZE
) / PAGE_SIZE
];
1278 unsigned offset
= (f
* CD_FRAMESIZE
) % PAGE_SIZE
;
1280 if (!bio_add_page(pkt
->w_bio
, page
, CD_FRAMESIZE
, offset
))
1283 pkt_dbg(2, pd
, "vcnt=%d\n", pkt
->w_bio
->bi_vcnt
);
1286 * Fill-in bvec with data from orig_bios.
1288 spin_lock(&pkt
->lock
);
1289 bio_copy_data(pkt
->w_bio
, pkt
->orig_bios
.head
);
1291 pkt_set_state(pkt
, PACKET_WRITE_WAIT_STATE
);
1292 spin_unlock(&pkt
->lock
);
1294 pkt_dbg(2, pd
, "Writing %d frames for zone %llx\n",
1295 pkt
->write_size
, (unsigned long long)pkt
->sector
);
1297 if (test_bit(PACKET_MERGE_SEGS
, &pd
->flags
) || (pkt
->write_size
< pkt
->frames
))
1298 pkt
->cache_valid
= 1;
1300 pkt
->cache_valid
= 0;
1302 /* Start the write request */
1303 atomic_set(&pkt
->io_wait
, 1);
1304 bio_set_op_attrs(pkt
->w_bio
, REQ_OP_WRITE
, 0);
1305 pkt_queue_bio(pd
, pkt
->w_bio
);
1308 static void pkt_finish_packet(struct packet_data
*pkt
, int error
)
1313 pkt
->cache_valid
= 0;
1315 /* Finish all bios corresponding to this packet */
1316 while ((bio
= bio_list_pop(&pkt
->orig_bios
))) {
1317 bio
->bi_error
= error
;
1322 static void pkt_run_state_machine(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1324 pkt_dbg(2, pd
, "pkt %d\n", pkt
->id
);
1327 switch (pkt
->state
) {
1328 case PACKET_WAITING_STATE
:
1329 if ((pkt
->write_size
< pkt
->frames
) && (pkt
->sleep_time
> 0))
1332 pkt
->sleep_time
= 0;
1333 pkt_gather_data(pd
, pkt
);
1334 pkt_set_state(pkt
, PACKET_READ_WAIT_STATE
);
1337 case PACKET_READ_WAIT_STATE
:
1338 if (atomic_read(&pkt
->io_wait
) > 0)
1341 if (atomic_read(&pkt
->io_errors
) > 0) {
1342 pkt_set_state(pkt
, PACKET_RECOVERY_STATE
);
1344 pkt_start_write(pd
, pkt
);
1348 case PACKET_WRITE_WAIT_STATE
:
1349 if (atomic_read(&pkt
->io_wait
) > 0)
1352 if (!pkt
->w_bio
->bi_error
) {
1353 pkt_set_state(pkt
, PACKET_FINISHED_STATE
);
1355 pkt_set_state(pkt
, PACKET_RECOVERY_STATE
);
1359 case PACKET_RECOVERY_STATE
:
1360 if (pkt_start_recovery(pkt
)) {
1361 pkt_start_write(pd
, pkt
);
1363 pkt_dbg(2, pd
, "No recovery possible\n");
1364 pkt_set_state(pkt
, PACKET_FINISHED_STATE
);
1368 case PACKET_FINISHED_STATE
:
1369 pkt_finish_packet(pkt
, pkt
->w_bio
->bi_error
);
1379 static void pkt_handle_packets(struct pktcdvd_device
*pd
)
1381 struct packet_data
*pkt
, *next
;
1384 * Run state machine for active packets
1386 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1387 if (atomic_read(&pkt
->run_sm
) > 0) {
1388 atomic_set(&pkt
->run_sm
, 0);
1389 pkt_run_state_machine(pd
, pkt
);
1394 * Move no longer active packets to the free list
1396 spin_lock(&pd
->cdrw
.active_list_lock
);
1397 list_for_each_entry_safe(pkt
, next
, &pd
->cdrw
.pkt_active_list
, list
) {
1398 if (pkt
->state
== PACKET_FINISHED_STATE
) {
1399 list_del(&pkt
->list
);
1400 pkt_put_packet_data(pd
, pkt
);
1401 pkt_set_state(pkt
, PACKET_IDLE_STATE
);
1402 atomic_set(&pd
->scan_queue
, 1);
1405 spin_unlock(&pd
->cdrw
.active_list_lock
);
1408 static void pkt_count_states(struct pktcdvd_device
*pd
, int *states
)
1410 struct packet_data
*pkt
;
1413 for (i
= 0; i
< PACKET_NUM_STATES
; i
++)
1416 spin_lock(&pd
->cdrw
.active_list_lock
);
1417 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1418 states
[pkt
->state
]++;
1420 spin_unlock(&pd
->cdrw
.active_list_lock
);
1424 * kcdrwd is woken up when writes have been queued for one of our
1425 * registered devices
1427 static int kcdrwd(void *foobar
)
1429 struct pktcdvd_device
*pd
= foobar
;
1430 struct packet_data
*pkt
;
1431 long min_sleep_time
, residue
;
1433 set_user_nice(current
, MIN_NICE
);
1437 DECLARE_WAITQUEUE(wait
, current
);
1440 * Wait until there is something to do
1442 add_wait_queue(&pd
->wqueue
, &wait
);
1444 set_current_state(TASK_INTERRUPTIBLE
);
1446 /* Check if we need to run pkt_handle_queue */
1447 if (atomic_read(&pd
->scan_queue
) > 0)
1450 /* Check if we need to run the state machine for some packet */
1451 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1452 if (atomic_read(&pkt
->run_sm
) > 0)
1456 /* Check if we need to process the iosched queues */
1457 if (atomic_read(&pd
->iosched
.attention
) != 0)
1460 /* Otherwise, go to sleep */
1461 if (PACKET_DEBUG
> 1) {
1462 int states
[PACKET_NUM_STATES
];
1463 pkt_count_states(pd
, states
);
1464 pkt_dbg(2, pd
, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1465 states
[0], states
[1], states
[2],
1466 states
[3], states
[4], states
[5]);
1469 min_sleep_time
= MAX_SCHEDULE_TIMEOUT
;
1470 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1471 if (pkt
->sleep_time
&& pkt
->sleep_time
< min_sleep_time
)
1472 min_sleep_time
= pkt
->sleep_time
;
1475 pkt_dbg(2, pd
, "sleeping\n");
1476 residue
= schedule_timeout(min_sleep_time
);
1477 pkt_dbg(2, pd
, "wake up\n");
1479 /* make swsusp happy with our thread */
1482 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1483 if (!pkt
->sleep_time
)
1485 pkt
->sleep_time
-= min_sleep_time
- residue
;
1486 if (pkt
->sleep_time
<= 0) {
1487 pkt
->sleep_time
= 0;
1488 atomic_inc(&pkt
->run_sm
);
1492 if (kthread_should_stop())
1496 set_current_state(TASK_RUNNING
);
1497 remove_wait_queue(&pd
->wqueue
, &wait
);
1499 if (kthread_should_stop())
1503 * if pkt_handle_queue returns true, we can queue
1506 while (pkt_handle_queue(pd
))
1510 * Handle packet state machine
1512 pkt_handle_packets(pd
);
1515 * Handle iosched queues
1517 pkt_iosched_process_queue(pd
);
1523 static void pkt_print_settings(struct pktcdvd_device
*pd
)
1525 pkt_info(pd
, "%s packets, %u blocks, Mode-%c disc\n",
1526 pd
->settings
.fp
? "Fixed" : "Variable",
1527 pd
->settings
.size
>> 2,
1528 pd
->settings
.block_mode
== 8 ? '1' : '2');
1531 static int pkt_mode_sense(struct pktcdvd_device
*pd
, struct packet_command
*cgc
, int page_code
, int page_control
)
1533 memset(cgc
->cmd
, 0, sizeof(cgc
->cmd
));
1535 cgc
->cmd
[0] = GPCMD_MODE_SENSE_10
;
1536 cgc
->cmd
[2] = page_code
| (page_control
<< 6);
1537 cgc
->cmd
[7] = cgc
->buflen
>> 8;
1538 cgc
->cmd
[8] = cgc
->buflen
& 0xff;
1539 cgc
->data_direction
= CGC_DATA_READ
;
1540 return pkt_generic_packet(pd
, cgc
);
1543 static int pkt_mode_select(struct pktcdvd_device
*pd
, struct packet_command
*cgc
)
1545 memset(cgc
->cmd
, 0, sizeof(cgc
->cmd
));
1546 memset(cgc
->buffer
, 0, 2);
1547 cgc
->cmd
[0] = GPCMD_MODE_SELECT_10
;
1548 cgc
->cmd
[1] = 0x10; /* PF */
1549 cgc
->cmd
[7] = cgc
->buflen
>> 8;
1550 cgc
->cmd
[8] = cgc
->buflen
& 0xff;
1551 cgc
->data_direction
= CGC_DATA_WRITE
;
1552 return pkt_generic_packet(pd
, cgc
);
1555 static int pkt_get_disc_info(struct pktcdvd_device
*pd
, disc_information
*di
)
1557 struct packet_command cgc
;
1560 /* set up command and get the disc info */
1561 init_cdrom_command(&cgc
, di
, sizeof(*di
), CGC_DATA_READ
);
1562 cgc
.cmd
[0] = GPCMD_READ_DISC_INFO
;
1563 cgc
.cmd
[8] = cgc
.buflen
= 2;
1566 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
1569 /* not all drives have the same disc_info length, so requeue
1570 * packet with the length the drive tells us it can supply
1572 cgc
.buflen
= be16_to_cpu(di
->disc_information_length
) +
1573 sizeof(di
->disc_information_length
);
1575 if (cgc
.buflen
> sizeof(disc_information
))
1576 cgc
.buflen
= sizeof(disc_information
);
1578 cgc
.cmd
[8] = cgc
.buflen
;
1579 return pkt_generic_packet(pd
, &cgc
);
1582 static int pkt_get_track_info(struct pktcdvd_device
*pd
, __u16 track
, __u8 type
, track_information
*ti
)
1584 struct packet_command cgc
;
1587 init_cdrom_command(&cgc
, ti
, 8, CGC_DATA_READ
);
1588 cgc
.cmd
[0] = GPCMD_READ_TRACK_RZONE_INFO
;
1589 cgc
.cmd
[1] = type
& 3;
1590 cgc
.cmd
[4] = (track
& 0xff00) >> 8;
1591 cgc
.cmd
[5] = track
& 0xff;
1595 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
1598 cgc
.buflen
= be16_to_cpu(ti
->track_information_length
) +
1599 sizeof(ti
->track_information_length
);
1601 if (cgc
.buflen
> sizeof(track_information
))
1602 cgc
.buflen
= sizeof(track_information
);
1604 cgc
.cmd
[8] = cgc
.buflen
;
1605 return pkt_generic_packet(pd
, &cgc
);
1608 static noinline_for_stack
int pkt_get_last_written(struct pktcdvd_device
*pd
,
1611 disc_information di
;
1612 track_information ti
;
1616 if ((ret
= pkt_get_disc_info(pd
, &di
)))
1619 last_track
= (di
.last_track_msb
<< 8) | di
.last_track_lsb
;
1620 if ((ret
= pkt_get_track_info(pd
, last_track
, 1, &ti
)))
1623 /* if this track is blank, try the previous. */
1626 if ((ret
= pkt_get_track_info(pd
, last_track
, 1, &ti
)))
1630 /* if last recorded field is valid, return it. */
1632 *last_written
= be32_to_cpu(ti
.last_rec_address
);
1634 /* make it up instead */
1635 *last_written
= be32_to_cpu(ti
.track_start
) +
1636 be32_to_cpu(ti
.track_size
);
1638 *last_written
-= (be32_to_cpu(ti
.free_blocks
) + 7);
1644 * write mode select package based on pd->settings
1646 static noinline_for_stack
int pkt_set_write_settings(struct pktcdvd_device
*pd
)
1648 struct packet_command cgc
;
1649 struct request_sense sense
;
1650 write_param_page
*wp
;
1654 /* doesn't apply to DVD+RW or DVD-RAM */
1655 if ((pd
->mmc3_profile
== 0x1a) || (pd
->mmc3_profile
== 0x12))
1658 memset(buffer
, 0, sizeof(buffer
));
1659 init_cdrom_command(&cgc
, buffer
, sizeof(*wp
), CGC_DATA_READ
);
1661 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WRITE_PARMS_PAGE
, 0))) {
1662 pkt_dump_sense(pd
, &cgc
);
1666 size
= 2 + ((buffer
[0] << 8) | (buffer
[1] & 0xff));
1667 pd
->mode_offset
= (buffer
[6] << 8) | (buffer
[7] & 0xff);
1668 if (size
> sizeof(buffer
))
1669 size
= sizeof(buffer
);
1674 init_cdrom_command(&cgc
, buffer
, size
, CGC_DATA_READ
);
1676 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WRITE_PARMS_PAGE
, 0))) {
1677 pkt_dump_sense(pd
, &cgc
);
1682 * write page is offset header + block descriptor length
1684 wp
= (write_param_page
*) &buffer
[sizeof(struct mode_page_header
) + pd
->mode_offset
];
1686 wp
->fp
= pd
->settings
.fp
;
1687 wp
->track_mode
= pd
->settings
.track_mode
;
1688 wp
->write_type
= pd
->settings
.write_type
;
1689 wp
->data_block_type
= pd
->settings
.block_mode
;
1691 wp
->multi_session
= 0;
1693 #ifdef PACKET_USE_LS
1698 if (wp
->data_block_type
== PACKET_BLOCK_MODE1
) {
1699 wp
->session_format
= 0;
1701 } else if (wp
->data_block_type
== PACKET_BLOCK_MODE2
) {
1702 wp
->session_format
= 0x20;
1706 memcpy(&wp
->mcn
[1], PACKET_MCN
, sizeof(wp
->mcn
) - 1);
1712 pkt_err(pd
, "write mode wrong %d\n", wp
->data_block_type
);
1715 wp
->packet_size
= cpu_to_be32(pd
->settings
.size
>> 2);
1717 cgc
.buflen
= cgc
.cmd
[8] = size
;
1718 if ((ret
= pkt_mode_select(pd
, &cgc
))) {
1719 pkt_dump_sense(pd
, &cgc
);
1723 pkt_print_settings(pd
);
1728 * 1 -- we can write to this track, 0 -- we can't
1730 static int pkt_writable_track(struct pktcdvd_device
*pd
, track_information
*ti
)
1732 switch (pd
->mmc3_profile
) {
1733 case 0x1a: /* DVD+RW */
1734 case 0x12: /* DVD-RAM */
1735 /* The track is always writable on DVD+RW/DVD-RAM */
1741 if (!ti
->packet
|| !ti
->fp
)
1745 * "good" settings as per Mt Fuji.
1747 if (ti
->rt
== 0 && ti
->blank
== 0)
1750 if (ti
->rt
== 0 && ti
->blank
== 1)
1753 if (ti
->rt
== 1 && ti
->blank
== 0)
1756 pkt_err(pd
, "bad state %d-%d-%d\n", ti
->rt
, ti
->blank
, ti
->packet
);
1761 * 1 -- we can write to this disc, 0 -- we can't
1763 static int pkt_writable_disc(struct pktcdvd_device
*pd
, disc_information
*di
)
1765 switch (pd
->mmc3_profile
) {
1766 case 0x0a: /* CD-RW */
1767 case 0xffff: /* MMC3 not supported */
1769 case 0x1a: /* DVD+RW */
1770 case 0x13: /* DVD-RW */
1771 case 0x12: /* DVD-RAM */
1774 pkt_dbg(2, pd
, "Wrong disc profile (%x)\n",
1780 * for disc type 0xff we should probably reserve a new track.
1781 * but i'm not sure, should we leave this to user apps? probably.
1783 if (di
->disc_type
== 0xff) {
1784 pkt_notice(pd
, "unknown disc - no track?\n");
1788 if (di
->disc_type
!= 0x20 && di
->disc_type
!= 0) {
1789 pkt_err(pd
, "wrong disc type (%x)\n", di
->disc_type
);
1793 if (di
->erasable
== 0) {
1794 pkt_notice(pd
, "disc not erasable\n");
1798 if (di
->border_status
== PACKET_SESSION_RESERVED
) {
1799 pkt_err(pd
, "can't write to last track (reserved)\n");
1806 static noinline_for_stack
int pkt_probe_settings(struct pktcdvd_device
*pd
)
1808 struct packet_command cgc
;
1809 unsigned char buf
[12];
1810 disc_information di
;
1811 track_information ti
;
1814 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_READ
);
1815 cgc
.cmd
[0] = GPCMD_GET_CONFIGURATION
;
1817 ret
= pkt_generic_packet(pd
, &cgc
);
1818 pd
->mmc3_profile
= ret
? 0xffff : buf
[6] << 8 | buf
[7];
1820 memset(&di
, 0, sizeof(disc_information
));
1821 memset(&ti
, 0, sizeof(track_information
));
1823 if ((ret
= pkt_get_disc_info(pd
, &di
))) {
1824 pkt_err(pd
, "failed get_disc\n");
1828 if (!pkt_writable_disc(pd
, &di
))
1831 pd
->type
= di
.erasable
? PACKET_CDRW
: PACKET_CDR
;
1833 track
= 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1834 if ((ret
= pkt_get_track_info(pd
, track
, 1, &ti
))) {
1835 pkt_err(pd
, "failed get_track\n");
1839 if (!pkt_writable_track(pd
, &ti
)) {
1840 pkt_err(pd
, "can't write to this track\n");
1845 * we keep packet size in 512 byte units, makes it easier to
1846 * deal with request calculations.
1848 pd
->settings
.size
= be32_to_cpu(ti
.fixed_packet_size
) << 2;
1849 if (pd
->settings
.size
== 0) {
1850 pkt_notice(pd
, "detected zero packet size!\n");
1853 if (pd
->settings
.size
> PACKET_MAX_SECTORS
) {
1854 pkt_err(pd
, "packet size is too big\n");
1857 pd
->settings
.fp
= ti
.fp
;
1858 pd
->offset
= (be32_to_cpu(ti
.track_start
) << 2) & (pd
->settings
.size
- 1);
1861 pd
->nwa
= be32_to_cpu(ti
.next_writable
);
1862 set_bit(PACKET_NWA_VALID
, &pd
->flags
);
1866 * in theory we could use lra on -RW media as well and just zero
1867 * blocks that haven't been written yet, but in practice that
1868 * is just a no-go. we'll use that for -R, naturally.
1871 pd
->lra
= be32_to_cpu(ti
.last_rec_address
);
1872 set_bit(PACKET_LRA_VALID
, &pd
->flags
);
1874 pd
->lra
= 0xffffffff;
1875 set_bit(PACKET_LRA_VALID
, &pd
->flags
);
1881 pd
->settings
.link_loss
= 7;
1882 pd
->settings
.write_type
= 0; /* packet */
1883 pd
->settings
.track_mode
= ti
.track_mode
;
1886 * mode1 or mode2 disc
1888 switch (ti
.data_mode
) {
1890 pd
->settings
.block_mode
= PACKET_BLOCK_MODE1
;
1893 pd
->settings
.block_mode
= PACKET_BLOCK_MODE2
;
1896 pkt_err(pd
, "unknown data mode\n");
1903 * enable/disable write caching on drive
1905 static noinline_for_stack
int pkt_write_caching(struct pktcdvd_device
*pd
,
1908 struct packet_command cgc
;
1909 struct request_sense sense
;
1910 unsigned char buf
[64];
1913 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_READ
);
1915 cgc
.buflen
= pd
->mode_offset
+ 12;
1918 * caching mode page might not be there, so quiet this command
1922 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WCACHING_PAGE
, 0)))
1925 buf
[pd
->mode_offset
+ 10] |= (!!set
<< 2);
1927 cgc
.buflen
= cgc
.cmd
[8] = 2 + ((buf
[0] << 8) | (buf
[1] & 0xff));
1928 ret
= pkt_mode_select(pd
, &cgc
);
1930 pkt_err(pd
, "write caching control failed\n");
1931 pkt_dump_sense(pd
, &cgc
);
1932 } else if (!ret
&& set
)
1933 pkt_notice(pd
, "enabled write caching\n");
1937 static int pkt_lock_door(struct pktcdvd_device
*pd
, int lockflag
)
1939 struct packet_command cgc
;
1941 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
1942 cgc
.cmd
[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL
;
1943 cgc
.cmd
[4] = lockflag
? 1 : 0;
1944 return pkt_generic_packet(pd
, &cgc
);
1948 * Returns drive maximum write speed
1950 static noinline_for_stack
int pkt_get_max_speed(struct pktcdvd_device
*pd
,
1951 unsigned *write_speed
)
1953 struct packet_command cgc
;
1954 struct request_sense sense
;
1955 unsigned char buf
[256+18];
1956 unsigned char *cap_buf
;
1959 cap_buf
= &buf
[sizeof(struct mode_page_header
) + pd
->mode_offset
];
1960 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_UNKNOWN
);
1963 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_CAPABILITIES_PAGE
, 0);
1965 cgc
.buflen
= pd
->mode_offset
+ cap_buf
[1] + 2 +
1966 sizeof(struct mode_page_header
);
1967 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_CAPABILITIES_PAGE
, 0);
1969 pkt_dump_sense(pd
, &cgc
);
1974 offset
= 20; /* Obsoleted field, used by older drives */
1975 if (cap_buf
[1] >= 28)
1976 offset
= 28; /* Current write speed selected */
1977 if (cap_buf
[1] >= 30) {
1978 /* If the drive reports at least one "Logical Unit Write
1979 * Speed Performance Descriptor Block", use the information
1980 * in the first block. (contains the highest speed)
1982 int num_spdb
= (cap_buf
[30] << 8) + cap_buf
[31];
1987 *write_speed
= (cap_buf
[offset
] << 8) | cap_buf
[offset
+ 1];
1991 /* These tables from cdrecord - I don't have orange book */
1992 /* standard speed CD-RW (1-4x) */
1993 static char clv_to_speed
[16] = {
1994 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1995 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1997 /* high speed CD-RW (-10x) */
1998 static char hs_clv_to_speed
[16] = {
1999 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2000 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2002 /* ultra high speed CD-RW */
2003 static char us_clv_to_speed
[16] = {
2004 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2005 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2009 * reads the maximum media speed from ATIP
2011 static noinline_for_stack
int pkt_media_speed(struct pktcdvd_device
*pd
,
2014 struct packet_command cgc
;
2015 struct request_sense sense
;
2016 unsigned char buf
[64];
2017 unsigned int size
, st
, sp
;
2020 init_cdrom_command(&cgc
, buf
, 2, CGC_DATA_READ
);
2022 cgc
.cmd
[0] = GPCMD_READ_TOC_PMA_ATIP
;
2024 cgc
.cmd
[2] = 4; /* READ ATIP */
2026 ret
= pkt_generic_packet(pd
, &cgc
);
2028 pkt_dump_sense(pd
, &cgc
);
2031 size
= ((unsigned int) buf
[0]<<8) + buf
[1] + 2;
2032 if (size
> sizeof(buf
))
2035 init_cdrom_command(&cgc
, buf
, size
, CGC_DATA_READ
);
2037 cgc
.cmd
[0] = GPCMD_READ_TOC_PMA_ATIP
;
2041 ret
= pkt_generic_packet(pd
, &cgc
);
2043 pkt_dump_sense(pd
, &cgc
);
2047 if (!(buf
[6] & 0x40)) {
2048 pkt_notice(pd
, "disc type is not CD-RW\n");
2051 if (!(buf
[6] & 0x4)) {
2052 pkt_notice(pd
, "A1 values on media are not valid, maybe not CDRW?\n");
2056 st
= (buf
[6] >> 3) & 0x7; /* disc sub-type */
2058 sp
= buf
[16] & 0xf; /* max speed from ATIP A1 field */
2060 /* Info from cdrecord */
2062 case 0: /* standard speed */
2063 *speed
= clv_to_speed
[sp
];
2065 case 1: /* high speed */
2066 *speed
= hs_clv_to_speed
[sp
];
2068 case 2: /* ultra high speed */
2069 *speed
= us_clv_to_speed
[sp
];
2072 pkt_notice(pd
, "unknown disc sub-type %d\n", st
);
2076 pkt_info(pd
, "maximum media speed: %d\n", *speed
);
2079 pkt_notice(pd
, "unknown speed %d for sub-type %d\n", sp
, st
);
2084 static noinline_for_stack
int pkt_perform_opc(struct pktcdvd_device
*pd
)
2086 struct packet_command cgc
;
2087 struct request_sense sense
;
2090 pkt_dbg(2, pd
, "Performing OPC\n");
2092 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
2094 cgc
.timeout
= 60*HZ
;
2095 cgc
.cmd
[0] = GPCMD_SEND_OPC
;
2097 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
2098 pkt_dump_sense(pd
, &cgc
);
2102 static int pkt_open_write(struct pktcdvd_device
*pd
)
2105 unsigned int write_speed
, media_write_speed
, read_speed
;
2107 if ((ret
= pkt_probe_settings(pd
))) {
2108 pkt_dbg(2, pd
, "failed probe\n");
2112 if ((ret
= pkt_set_write_settings(pd
))) {
2113 pkt_dbg(1, pd
, "failed saving write settings\n");
2117 pkt_write_caching(pd
, USE_WCACHING
);
2119 if ((ret
= pkt_get_max_speed(pd
, &write_speed
)))
2120 write_speed
= 16 * 177;
2121 switch (pd
->mmc3_profile
) {
2122 case 0x13: /* DVD-RW */
2123 case 0x1a: /* DVD+RW */
2124 case 0x12: /* DVD-RAM */
2125 pkt_dbg(1, pd
, "write speed %ukB/s\n", write_speed
);
2128 if ((ret
= pkt_media_speed(pd
, &media_write_speed
)))
2129 media_write_speed
= 16;
2130 write_speed
= min(write_speed
, media_write_speed
* 177);
2131 pkt_dbg(1, pd
, "write speed %ux\n", write_speed
/ 176);
2134 read_speed
= write_speed
;
2136 if ((ret
= pkt_set_speed(pd
, write_speed
, read_speed
))) {
2137 pkt_dbg(1, pd
, "couldn't set write speed\n");
2140 pd
->write_speed
= write_speed
;
2141 pd
->read_speed
= read_speed
;
2143 if ((ret
= pkt_perform_opc(pd
))) {
2144 pkt_dbg(1, pd
, "Optimum Power Calibration failed\n");
2151 * called at open time.
2153 static int pkt_open_dev(struct pktcdvd_device
*pd
, fmode_t write
)
2157 struct request_queue
*q
;
2160 * We need to re-open the cdrom device without O_NONBLOCK to be able
2161 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2162 * so bdget() can't fail.
2164 bdget(pd
->bdev
->bd_dev
);
2165 if ((ret
= blkdev_get(pd
->bdev
, FMODE_READ
| FMODE_EXCL
, pd
)))
2168 if ((ret
= pkt_get_last_written(pd
, &lba
))) {
2169 pkt_err(pd
, "pkt_get_last_written failed\n");
2173 set_capacity(pd
->disk
, lba
<< 2);
2174 set_capacity(pd
->bdev
->bd_disk
, lba
<< 2);
2175 bd_set_size(pd
->bdev
, (loff_t
)lba
<< 11);
2177 q
= bdev_get_queue(pd
->bdev
);
2179 if ((ret
= pkt_open_write(pd
)))
2182 * Some CDRW drives can not handle writes larger than one packet,
2183 * even if the size is a multiple of the packet size.
2185 spin_lock_irq(q
->queue_lock
);
2186 blk_queue_max_hw_sectors(q
, pd
->settings
.size
);
2187 spin_unlock_irq(q
->queue_lock
);
2188 set_bit(PACKET_WRITABLE
, &pd
->flags
);
2190 pkt_set_speed(pd
, MAX_SPEED
, MAX_SPEED
);
2191 clear_bit(PACKET_WRITABLE
, &pd
->flags
);
2194 if ((ret
= pkt_set_segment_merging(pd
, q
)))
2198 if (!pkt_grow_pktlist(pd
, CONFIG_CDROM_PKTCDVD_BUFFERS
)) {
2199 pkt_err(pd
, "not enough memory for buffers\n");
2203 pkt_info(pd
, "%lukB available on disc\n", lba
<< 1);
2209 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_EXCL
);
2215 * called when the device is closed. makes sure that the device flushes
2216 * the internal cache before we close.
2218 static void pkt_release_dev(struct pktcdvd_device
*pd
, int flush
)
2220 if (flush
&& pkt_flush_cache(pd
))
2221 pkt_dbg(1, pd
, "not flushing cache\n");
2223 pkt_lock_door(pd
, 0);
2225 pkt_set_speed(pd
, MAX_SPEED
, MAX_SPEED
);
2226 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_EXCL
);
2228 pkt_shrink_pktlist(pd
);
2231 static struct pktcdvd_device
*pkt_find_dev_from_minor(unsigned int dev_minor
)
2233 if (dev_minor
>= MAX_WRITERS
)
2235 return pkt_devs
[dev_minor
];
2238 static int pkt_open(struct block_device
*bdev
, fmode_t mode
)
2240 struct pktcdvd_device
*pd
= NULL
;
2243 mutex_lock(&pktcdvd_mutex
);
2244 mutex_lock(&ctl_mutex
);
2245 pd
= pkt_find_dev_from_minor(MINOR(bdev
->bd_dev
));
2250 BUG_ON(pd
->refcnt
< 0);
2253 if (pd
->refcnt
> 1) {
2254 if ((mode
& FMODE_WRITE
) &&
2255 !test_bit(PACKET_WRITABLE
, &pd
->flags
)) {
2260 ret
= pkt_open_dev(pd
, mode
& FMODE_WRITE
);
2264 * needed here as well, since ext2 (among others) may change
2265 * the blocksize at mount time
2267 set_blocksize(bdev
, CD_FRAMESIZE
);
2270 mutex_unlock(&ctl_mutex
);
2271 mutex_unlock(&pktcdvd_mutex
);
2277 mutex_unlock(&ctl_mutex
);
2278 mutex_unlock(&pktcdvd_mutex
);
2282 static void pkt_close(struct gendisk
*disk
, fmode_t mode
)
2284 struct pktcdvd_device
*pd
= disk
->private_data
;
2286 mutex_lock(&pktcdvd_mutex
);
2287 mutex_lock(&ctl_mutex
);
2289 BUG_ON(pd
->refcnt
< 0);
2290 if (pd
->refcnt
== 0) {
2291 int flush
= test_bit(PACKET_WRITABLE
, &pd
->flags
);
2292 pkt_release_dev(pd
, flush
);
2294 mutex_unlock(&ctl_mutex
);
2295 mutex_unlock(&pktcdvd_mutex
);
2299 static void pkt_end_io_read_cloned(struct bio
*bio
)
2301 struct packet_stacked_data
*psd
= bio
->bi_private
;
2302 struct pktcdvd_device
*pd
= psd
->pd
;
2304 psd
->bio
->bi_error
= bio
->bi_error
;
2306 bio_endio(psd
->bio
);
2307 mempool_free(psd
, psd_pool
);
2308 pkt_bio_finished(pd
);
2311 static void pkt_make_request_read(struct pktcdvd_device
*pd
, struct bio
*bio
)
2313 struct bio
*cloned_bio
= bio_clone(bio
, GFP_NOIO
);
2314 struct packet_stacked_data
*psd
= mempool_alloc(psd_pool
, GFP_NOIO
);
2318 cloned_bio
->bi_bdev
= pd
->bdev
;
2319 cloned_bio
->bi_private
= psd
;
2320 cloned_bio
->bi_end_io
= pkt_end_io_read_cloned
;
2321 pd
->stats
.secs_r
+= bio_sectors(bio
);
2322 pkt_queue_bio(pd
, cloned_bio
);
2325 static void pkt_make_request_write(struct request_queue
*q
, struct bio
*bio
)
2327 struct pktcdvd_device
*pd
= q
->queuedata
;
2329 struct packet_data
*pkt
;
2330 int was_empty
, blocked_bio
;
2331 struct pkt_rb_node
*node
;
2333 zone
= get_zone(bio
->bi_iter
.bi_sector
, pd
);
2336 * If we find a matching packet in state WAITING or READ_WAIT, we can
2337 * just append this bio to that packet.
2339 spin_lock(&pd
->cdrw
.active_list_lock
);
2341 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
2342 if (pkt
->sector
== zone
) {
2343 spin_lock(&pkt
->lock
);
2344 if ((pkt
->state
== PACKET_WAITING_STATE
) ||
2345 (pkt
->state
== PACKET_READ_WAIT_STATE
)) {
2346 bio_list_add(&pkt
->orig_bios
, bio
);
2348 bio
->bi_iter
.bi_size
/ CD_FRAMESIZE
;
2349 if ((pkt
->write_size
>= pkt
->frames
) &&
2350 (pkt
->state
== PACKET_WAITING_STATE
)) {
2351 atomic_inc(&pkt
->run_sm
);
2352 wake_up(&pd
->wqueue
);
2354 spin_unlock(&pkt
->lock
);
2355 spin_unlock(&pd
->cdrw
.active_list_lock
);
2360 spin_unlock(&pkt
->lock
);
2363 spin_unlock(&pd
->cdrw
.active_list_lock
);
2366 * Test if there is enough room left in the bio work queue
2367 * (queue size >= congestion on mark).
2368 * If not, wait till the work queue size is below the congestion off mark.
2370 spin_lock(&pd
->lock
);
2371 if (pd
->write_congestion_on
> 0
2372 && pd
->bio_queue_size
>= pd
->write_congestion_on
) {
2373 set_bdi_congested(q
->backing_dev_info
, BLK_RW_ASYNC
);
2375 spin_unlock(&pd
->lock
);
2376 congestion_wait(BLK_RW_ASYNC
, HZ
);
2377 spin_lock(&pd
->lock
);
2378 } while(pd
->bio_queue_size
> pd
->write_congestion_off
);
2380 spin_unlock(&pd
->lock
);
2383 * No matching packet found. Store the bio in the work queue.
2385 node
= mempool_alloc(pd
->rb_pool
, GFP_NOIO
);
2387 spin_lock(&pd
->lock
);
2388 BUG_ON(pd
->bio_queue_size
< 0);
2389 was_empty
= (pd
->bio_queue_size
== 0);
2390 pkt_rbtree_insert(pd
, node
);
2391 spin_unlock(&pd
->lock
);
2394 * Wake up the worker thread.
2396 atomic_set(&pd
->scan_queue
, 1);
2398 /* This wake_up is required for correct operation */
2399 wake_up(&pd
->wqueue
);
2400 } else if (!list_empty(&pd
->cdrw
.pkt_free_list
) && !blocked_bio
) {
2402 * This wake up is not required for correct operation,
2403 * but improves performance in some cases.
2405 wake_up(&pd
->wqueue
);
2409 static blk_qc_t
pkt_make_request(struct request_queue
*q
, struct bio
*bio
)
2411 struct pktcdvd_device
*pd
;
2412 char b
[BDEVNAME_SIZE
];
2415 blk_queue_bounce(q
, &bio
);
2417 blk_queue_split(q
, &bio
, q
->bio_split
);
2421 pr_err("%s incorrect request queue\n",
2422 bdevname(bio
->bi_bdev
, b
));
2426 pkt_dbg(2, pd
, "start = %6llx stop = %6llx\n",
2427 (unsigned long long)bio
->bi_iter
.bi_sector
,
2428 (unsigned long long)bio_end_sector(bio
));
2431 * Clone READ bios so we can have our own bi_end_io callback.
2433 if (bio_data_dir(bio
) == READ
) {
2434 pkt_make_request_read(pd
, bio
);
2435 return BLK_QC_T_NONE
;
2438 if (!test_bit(PACKET_WRITABLE
, &pd
->flags
)) {
2439 pkt_notice(pd
, "WRITE for ro device (%llu)\n",
2440 (unsigned long long)bio
->bi_iter
.bi_sector
);
2444 if (!bio
->bi_iter
.bi_size
|| (bio
->bi_iter
.bi_size
% CD_FRAMESIZE
)) {
2445 pkt_err(pd
, "wrong bio size\n");
2450 sector_t zone
= get_zone(bio
->bi_iter
.bi_sector
, pd
);
2451 sector_t last_zone
= get_zone(bio_end_sector(bio
) - 1, pd
);
2453 if (last_zone
!= zone
) {
2454 BUG_ON(last_zone
!= zone
+ pd
->settings
.size
);
2456 split
= bio_split(bio
, last_zone
-
2457 bio
->bi_iter
.bi_sector
,
2458 GFP_NOIO
, fs_bio_set
);
2459 bio_chain(split
, bio
);
2464 pkt_make_request_write(q
, split
);
2465 } while (split
!= bio
);
2467 return BLK_QC_T_NONE
;
2470 return BLK_QC_T_NONE
;
2473 static void pkt_init_queue(struct pktcdvd_device
*pd
)
2475 struct request_queue
*q
= pd
->disk
->queue
;
2477 blk_queue_make_request(q
, pkt_make_request
);
2478 blk_queue_logical_block_size(q
, CD_FRAMESIZE
);
2479 blk_queue_max_hw_sectors(q
, PACKET_MAX_SECTORS
);
2483 static int pkt_seq_show(struct seq_file
*m
, void *p
)
2485 struct pktcdvd_device
*pd
= m
->private;
2487 char bdev_buf
[BDEVNAME_SIZE
];
2488 int states
[PACKET_NUM_STATES
];
2490 seq_printf(m
, "Writer %s mapped to %s:\n", pd
->name
,
2491 bdevname(pd
->bdev
, bdev_buf
));
2493 seq_printf(m
, "\nSettings:\n");
2494 seq_printf(m
, "\tpacket size:\t\t%dkB\n", pd
->settings
.size
/ 2);
2496 if (pd
->settings
.write_type
== 0)
2500 seq_printf(m
, "\twrite type:\t\t%s\n", msg
);
2502 seq_printf(m
, "\tpacket type:\t\t%s\n", pd
->settings
.fp
? "Fixed" : "Variable");
2503 seq_printf(m
, "\tlink loss:\t\t%d\n", pd
->settings
.link_loss
);
2505 seq_printf(m
, "\ttrack mode:\t\t%d\n", pd
->settings
.track_mode
);
2507 if (pd
->settings
.block_mode
== PACKET_BLOCK_MODE1
)
2509 else if (pd
->settings
.block_mode
== PACKET_BLOCK_MODE2
)
2513 seq_printf(m
, "\tblock mode:\t\t%s\n", msg
);
2515 seq_printf(m
, "\nStatistics:\n");
2516 seq_printf(m
, "\tpackets started:\t%lu\n", pd
->stats
.pkt_started
);
2517 seq_printf(m
, "\tpackets ended:\t\t%lu\n", pd
->stats
.pkt_ended
);
2518 seq_printf(m
, "\twritten:\t\t%lukB\n", pd
->stats
.secs_w
>> 1);
2519 seq_printf(m
, "\tread gather:\t\t%lukB\n", pd
->stats
.secs_rg
>> 1);
2520 seq_printf(m
, "\tread:\t\t\t%lukB\n", pd
->stats
.secs_r
>> 1);
2522 seq_printf(m
, "\nMisc:\n");
2523 seq_printf(m
, "\treference count:\t%d\n", pd
->refcnt
);
2524 seq_printf(m
, "\tflags:\t\t\t0x%lx\n", pd
->flags
);
2525 seq_printf(m
, "\tread speed:\t\t%ukB/s\n", pd
->read_speed
);
2526 seq_printf(m
, "\twrite speed:\t\t%ukB/s\n", pd
->write_speed
);
2527 seq_printf(m
, "\tstart offset:\t\t%lu\n", pd
->offset
);
2528 seq_printf(m
, "\tmode page offset:\t%u\n", pd
->mode_offset
);
2530 seq_printf(m
, "\nQueue state:\n");
2531 seq_printf(m
, "\tbios queued:\t\t%d\n", pd
->bio_queue_size
);
2532 seq_printf(m
, "\tbios pending:\t\t%d\n", atomic_read(&pd
->cdrw
.pending_bios
));
2533 seq_printf(m
, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd
->current_sector
);
2535 pkt_count_states(pd
, states
);
2536 seq_printf(m
, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2537 states
[0], states
[1], states
[2], states
[3], states
[4], states
[5]);
2539 seq_printf(m
, "\twrite congestion marks:\toff=%d on=%d\n",
2540 pd
->write_congestion_off
,
2541 pd
->write_congestion_on
);
2545 static int pkt_seq_open(struct inode
*inode
, struct file
*file
)
2547 return single_open(file
, pkt_seq_show
, PDE_DATA(inode
));
2550 static const struct file_operations pkt_proc_fops
= {
2551 .open
= pkt_seq_open
,
2553 .llseek
= seq_lseek
,
2554 .release
= single_release
2557 static int pkt_new_dev(struct pktcdvd_device
*pd
, dev_t dev
)
2561 char b
[BDEVNAME_SIZE
];
2562 struct block_device
*bdev
;
2564 if (pd
->pkt_dev
== dev
) {
2565 pkt_err(pd
, "recursive setup not allowed\n");
2568 for (i
= 0; i
< MAX_WRITERS
; i
++) {
2569 struct pktcdvd_device
*pd2
= pkt_devs
[i
];
2572 if (pd2
->bdev
->bd_dev
== dev
) {
2573 pkt_err(pd
, "%s already setup\n",
2574 bdevname(pd2
->bdev
, b
));
2577 if (pd2
->pkt_dev
== dev
) {
2578 pkt_err(pd
, "can't chain pktcdvd devices\n");
2586 ret
= blkdev_get(bdev
, FMODE_READ
| FMODE_NDELAY
, NULL
);
2590 /* This is safe, since we have a reference from open(). */
2591 __module_get(THIS_MODULE
);
2594 set_blocksize(bdev
, CD_FRAMESIZE
);
2598 atomic_set(&pd
->cdrw
.pending_bios
, 0);
2599 pd
->cdrw
.thread
= kthread_run(kcdrwd
, pd
, "%s", pd
->name
);
2600 if (IS_ERR(pd
->cdrw
.thread
)) {
2601 pkt_err(pd
, "can't start kernel thread\n");
2606 proc_create_data(pd
->name
, 0, pkt_proc
, &pkt_proc_fops
, pd
);
2607 pkt_dbg(1, pd
, "writer mapped to %s\n", bdevname(bdev
, b
));
2611 blkdev_put(bdev
, FMODE_READ
| FMODE_NDELAY
);
2612 /* This is safe: open() is still holding a reference. */
2613 module_put(THIS_MODULE
);
2617 static int pkt_ioctl(struct block_device
*bdev
, fmode_t mode
, unsigned int cmd
, unsigned long arg
)
2619 struct pktcdvd_device
*pd
= bdev
->bd_disk
->private_data
;
2622 pkt_dbg(2, pd
, "cmd %x, dev %d:%d\n",
2623 cmd
, MAJOR(bdev
->bd_dev
), MINOR(bdev
->bd_dev
));
2625 mutex_lock(&pktcdvd_mutex
);
2629 * The door gets locked when the device is opened, so we
2630 * have to unlock it or else the eject command fails.
2632 if (pd
->refcnt
== 1)
2633 pkt_lock_door(pd
, 0);
2636 * forward selected CDROM ioctls to CD-ROM, for UDF
2638 case CDROMMULTISESSION
:
2639 case CDROMREADTOCENTRY
:
2640 case CDROM_LAST_WRITTEN
:
2641 case CDROM_SEND_PACKET
:
2642 case SCSI_IOCTL_SEND_COMMAND
:
2643 ret
= __blkdev_driver_ioctl(pd
->bdev
, mode
, cmd
, arg
);
2647 pkt_dbg(2, pd
, "Unknown ioctl (%x)\n", cmd
);
2650 mutex_unlock(&pktcdvd_mutex
);
2655 static unsigned int pkt_check_events(struct gendisk
*disk
,
2656 unsigned int clearing
)
2658 struct pktcdvd_device
*pd
= disk
->private_data
;
2659 struct gendisk
*attached_disk
;
2665 attached_disk
= pd
->bdev
->bd_disk
;
2666 if (!attached_disk
|| !attached_disk
->fops
->check_events
)
2668 return attached_disk
->fops
->check_events(attached_disk
, clearing
);
2671 static const struct block_device_operations pktcdvd_ops
= {
2672 .owner
= THIS_MODULE
,
2674 .release
= pkt_close
,
2676 .check_events
= pkt_check_events
,
2679 static char *pktcdvd_devnode(struct gendisk
*gd
, umode_t
*mode
)
2681 return kasprintf(GFP_KERNEL
, "pktcdvd/%s", gd
->disk_name
);
2685 * Set up mapping from pktcdvd device to CD-ROM device.
2687 static int pkt_setup_dev(dev_t dev
, dev_t
* pkt_dev
)
2691 struct pktcdvd_device
*pd
;
2692 struct gendisk
*disk
;
2694 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2696 for (idx
= 0; idx
< MAX_WRITERS
; idx
++)
2699 if (idx
== MAX_WRITERS
) {
2700 pr_err("max %d writers supported\n", MAX_WRITERS
);
2705 pd
= kzalloc(sizeof(struct pktcdvd_device
), GFP_KERNEL
);
2709 pd
->rb_pool
= mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE
,
2710 sizeof(struct pkt_rb_node
));
2714 INIT_LIST_HEAD(&pd
->cdrw
.pkt_free_list
);
2715 INIT_LIST_HEAD(&pd
->cdrw
.pkt_active_list
);
2716 spin_lock_init(&pd
->cdrw
.active_list_lock
);
2718 spin_lock_init(&pd
->lock
);
2719 spin_lock_init(&pd
->iosched
.lock
);
2720 bio_list_init(&pd
->iosched
.read_queue
);
2721 bio_list_init(&pd
->iosched
.write_queue
);
2722 sprintf(pd
->name
, DRIVER_NAME
"%d", idx
);
2723 init_waitqueue_head(&pd
->wqueue
);
2724 pd
->bio_queue
= RB_ROOT
;
2726 pd
->write_congestion_on
= write_congestion_on
;
2727 pd
->write_congestion_off
= write_congestion_off
;
2729 disk
= alloc_disk(1);
2733 disk
->major
= pktdev_major
;
2734 disk
->first_minor
= idx
;
2735 disk
->fops
= &pktcdvd_ops
;
2736 disk
->flags
= GENHD_FL_REMOVABLE
;
2737 strcpy(disk
->disk_name
, pd
->name
);
2738 disk
->devnode
= pktcdvd_devnode
;
2739 disk
->private_data
= pd
;
2740 disk
->queue
= blk_alloc_queue(GFP_KERNEL
);
2744 pd
->pkt_dev
= MKDEV(pktdev_major
, idx
);
2745 ret
= pkt_new_dev(pd
, dev
);
2749 /* inherit events of the host device */
2750 disk
->events
= pd
->bdev
->bd_disk
->events
;
2751 disk
->async_events
= pd
->bdev
->bd_disk
->async_events
;
2755 pkt_sysfs_dev_new(pd
);
2756 pkt_debugfs_dev_new(pd
);
2760 *pkt_dev
= pd
->pkt_dev
;
2762 mutex_unlock(&ctl_mutex
);
2766 blk_cleanup_queue(disk
->queue
);
2770 mempool_destroy(pd
->rb_pool
);
2773 mutex_unlock(&ctl_mutex
);
2774 pr_err("setup of pktcdvd device failed\n");
2779 * Tear down mapping from pktcdvd device to CD-ROM device.
2781 static int pkt_remove_dev(dev_t pkt_dev
)
2783 struct pktcdvd_device
*pd
;
2787 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2789 for (idx
= 0; idx
< MAX_WRITERS
; idx
++) {
2791 if (pd
&& (pd
->pkt_dev
== pkt_dev
))
2794 if (idx
== MAX_WRITERS
) {
2795 pr_debug("dev not setup\n");
2800 if (pd
->refcnt
> 0) {
2804 if (!IS_ERR(pd
->cdrw
.thread
))
2805 kthread_stop(pd
->cdrw
.thread
);
2807 pkt_devs
[idx
] = NULL
;
2809 pkt_debugfs_dev_remove(pd
);
2810 pkt_sysfs_dev_remove(pd
);
2812 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_NDELAY
);
2814 remove_proc_entry(pd
->name
, pkt_proc
);
2815 pkt_dbg(1, pd
, "writer unmapped\n");
2817 del_gendisk(pd
->disk
);
2818 blk_cleanup_queue(pd
->disk
->queue
);
2821 mempool_destroy(pd
->rb_pool
);
2824 /* This is safe: open() is still holding a reference. */
2825 module_put(THIS_MODULE
);
2828 mutex_unlock(&ctl_mutex
);
2832 static void pkt_get_status(struct pkt_ctrl_command
*ctrl_cmd
)
2834 struct pktcdvd_device
*pd
;
2836 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2838 pd
= pkt_find_dev_from_minor(ctrl_cmd
->dev_index
);
2840 ctrl_cmd
->dev
= new_encode_dev(pd
->bdev
->bd_dev
);
2841 ctrl_cmd
->pkt_dev
= new_encode_dev(pd
->pkt_dev
);
2844 ctrl_cmd
->pkt_dev
= 0;
2846 ctrl_cmd
->num_devices
= MAX_WRITERS
;
2848 mutex_unlock(&ctl_mutex
);
2851 static long pkt_ctl_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2853 void __user
*argp
= (void __user
*)arg
;
2854 struct pkt_ctrl_command ctrl_cmd
;
2858 if (cmd
!= PACKET_CTRL_CMD
)
2861 if (copy_from_user(&ctrl_cmd
, argp
, sizeof(struct pkt_ctrl_command
)))
2864 switch (ctrl_cmd
.command
) {
2865 case PKT_CTRL_CMD_SETUP
:
2866 if (!capable(CAP_SYS_ADMIN
))
2868 ret
= pkt_setup_dev(new_decode_dev(ctrl_cmd
.dev
), &pkt_dev
);
2869 ctrl_cmd
.pkt_dev
= new_encode_dev(pkt_dev
);
2871 case PKT_CTRL_CMD_TEARDOWN
:
2872 if (!capable(CAP_SYS_ADMIN
))
2874 ret
= pkt_remove_dev(new_decode_dev(ctrl_cmd
.pkt_dev
));
2876 case PKT_CTRL_CMD_STATUS
:
2877 pkt_get_status(&ctrl_cmd
);
2883 if (copy_to_user(argp
, &ctrl_cmd
, sizeof(struct pkt_ctrl_command
)))
2888 #ifdef CONFIG_COMPAT
2889 static long pkt_ctl_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2891 return pkt_ctl_ioctl(file
, cmd
, (unsigned long)compat_ptr(arg
));
2895 static const struct file_operations pkt_ctl_fops
= {
2896 .open
= nonseekable_open
,
2897 .unlocked_ioctl
= pkt_ctl_ioctl
,
2898 #ifdef CONFIG_COMPAT
2899 .compat_ioctl
= pkt_ctl_compat_ioctl
,
2901 .owner
= THIS_MODULE
,
2902 .llseek
= no_llseek
,
2905 static struct miscdevice pkt_misc
= {
2906 .minor
= MISC_DYNAMIC_MINOR
,
2907 .name
= DRIVER_NAME
,
2908 .nodename
= "pktcdvd/control",
2909 .fops
= &pkt_ctl_fops
2912 static int __init
pkt_init(void)
2916 mutex_init(&ctl_mutex
);
2918 psd_pool
= mempool_create_kmalloc_pool(PSD_POOL_SIZE
,
2919 sizeof(struct packet_stacked_data
));
2923 ret
= register_blkdev(pktdev_major
, DRIVER_NAME
);
2925 pr_err("unable to register block device\n");
2931 ret
= pkt_sysfs_init();
2937 ret
= misc_register(&pkt_misc
);
2939 pr_err("unable to register misc device\n");
2943 pkt_proc
= proc_mkdir("driver/"DRIVER_NAME
, NULL
);
2948 pkt_debugfs_cleanup();
2949 pkt_sysfs_cleanup();
2951 unregister_blkdev(pktdev_major
, DRIVER_NAME
);
2953 mempool_destroy(psd_pool
);
2957 static void __exit
pkt_exit(void)
2959 remove_proc_entry("driver/"DRIVER_NAME
, NULL
);
2960 misc_deregister(&pkt_misc
);
2962 pkt_debugfs_cleanup();
2963 pkt_sysfs_cleanup();
2965 unregister_blkdev(pktdev_major
, DRIVER_NAME
);
2966 mempool_destroy(psd_pool
);
2969 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2970 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2971 MODULE_LICENSE("GPL");
2973 module_init(pkt_init
);
2974 module_exit(pkt_exit
);