2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * such as drivers/scsi/sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom ->submit_bio function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #include <linux/backing-dev.h>
50 #include <linux/compat.h>
51 #include <linux/debugfs.h>
52 #include <linux/device.h>
53 #include <linux/errno.h>
54 #include <linux/file.h>
55 #include <linux/freezer.h>
56 #include <linux/kernel.h>
57 #include <linux/kthread.h>
58 #include <linux/miscdevice.h>
59 #include <linux/module.h>
60 #include <linux/mutex.h>
61 #include <linux/nospec.h>
62 #include <linux/pktcdvd.h>
63 #include <linux/proc_fs.h>
64 #include <linux/seq_file.h>
65 #include <linux/slab.h>
66 #include <linux/spinlock.h>
67 #include <linux/types.h>
68 #include <linux/uaccess.h>
70 #include <scsi/scsi.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_ioctl.h>
74 #include <linux/unaligned.h>
76 #define DRIVER_NAME "pktcdvd"
78 #define MAX_SPEED 0xffff
80 static DEFINE_MUTEX(pktcdvd_mutex
);
81 static struct pktcdvd_device
*pkt_devs
[MAX_WRITERS
];
82 static struct proc_dir_entry
*pkt_proc
;
83 static int pktdev_major
;
84 static int write_congestion_on
= PKT_WRITE_CONGESTION_ON
;
85 static int write_congestion_off
= PKT_WRITE_CONGESTION_OFF
;
86 static struct mutex ctl_mutex
; /* Serialize open/close/setup/teardown */
87 static mempool_t psd_pool
;
88 static struct bio_set pkt_bio_set
;
90 /* /sys/class/pktcdvd */
91 static struct class class_pktcdvd
;
92 static struct dentry
*pkt_debugfs_root
= NULL
; /* /sys/kernel/debug/pktcdvd */
94 /* forward declaration */
95 static int pkt_setup_dev(dev_t dev
, dev_t
* pkt_dev
);
96 static int pkt_remove_dev(dev_t pkt_dev
);
98 static sector_t
get_zone(sector_t sector
, struct pktcdvd_device
*pd
)
100 return (sector
+ pd
->offset
) & ~(sector_t
)(pd
->settings
.size
- 1);
103 /**********************************************************
104 * sysfs interface for pktcdvd
105 * by (C) 2006 Thomas Maier <balagi@justmail.de>
107 /sys/class/pktcdvd/pktcdvd[0-7]/
110 stat/packets_finished
115 write_queue/congestion_off
116 write_queue/congestion_on
117 **********************************************************/
119 static ssize_t
packets_started_show(struct device
*dev
,
120 struct device_attribute
*attr
, char *buf
)
122 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
124 return sysfs_emit(buf
, "%lu\n", pd
->stats
.pkt_started
);
126 static DEVICE_ATTR_RO(packets_started
);
128 static ssize_t
packets_finished_show(struct device
*dev
,
129 struct device_attribute
*attr
, char *buf
)
131 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
133 return sysfs_emit(buf
, "%lu\n", pd
->stats
.pkt_ended
);
135 static DEVICE_ATTR_RO(packets_finished
);
137 static ssize_t
kb_written_show(struct device
*dev
,
138 struct device_attribute
*attr
, char *buf
)
140 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
142 return sysfs_emit(buf
, "%lu\n", pd
->stats
.secs_w
>> 1);
144 static DEVICE_ATTR_RO(kb_written
);
146 static ssize_t
kb_read_show(struct device
*dev
,
147 struct device_attribute
*attr
, char *buf
)
149 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
151 return sysfs_emit(buf
, "%lu\n", pd
->stats
.secs_r
>> 1);
153 static DEVICE_ATTR_RO(kb_read
);
155 static ssize_t
kb_read_gather_show(struct device
*dev
,
156 struct device_attribute
*attr
, char *buf
)
158 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
160 return sysfs_emit(buf
, "%lu\n", pd
->stats
.secs_rg
>> 1);
162 static DEVICE_ATTR_RO(kb_read_gather
);
164 static ssize_t
reset_store(struct device
*dev
, struct device_attribute
*attr
,
165 const char *buf
, size_t len
)
167 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
170 pd
->stats
.pkt_started
= 0;
171 pd
->stats
.pkt_ended
= 0;
172 pd
->stats
.secs_w
= 0;
173 pd
->stats
.secs_rg
= 0;
174 pd
->stats
.secs_r
= 0;
178 static DEVICE_ATTR_WO(reset
);
180 static struct attribute
*pkt_stat_attrs
[] = {
181 &dev_attr_packets_finished
.attr
,
182 &dev_attr_packets_started
.attr
,
183 &dev_attr_kb_read
.attr
,
184 &dev_attr_kb_written
.attr
,
185 &dev_attr_kb_read_gather
.attr
,
186 &dev_attr_reset
.attr
,
190 static const struct attribute_group pkt_stat_group
= {
192 .attrs
= pkt_stat_attrs
,
195 static ssize_t
size_show(struct device
*dev
,
196 struct device_attribute
*attr
, char *buf
)
198 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
201 spin_lock(&pd
->lock
);
202 n
= sysfs_emit(buf
, "%d\n", pd
->bio_queue_size
);
203 spin_unlock(&pd
->lock
);
206 static DEVICE_ATTR_RO(size
);
208 static void init_write_congestion_marks(int* lo
, int* hi
)
212 *hi
= min(*hi
, 1000000);
216 *lo
= min(*lo
, *hi
- 100);
225 static ssize_t
congestion_off_show(struct device
*dev
,
226 struct device_attribute
*attr
, char *buf
)
228 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
231 spin_lock(&pd
->lock
);
232 n
= sysfs_emit(buf
, "%d\n", pd
->write_congestion_off
);
233 spin_unlock(&pd
->lock
);
237 static ssize_t
congestion_off_store(struct device
*dev
,
238 struct device_attribute
*attr
,
239 const char *buf
, size_t len
)
241 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
244 ret
= kstrtoint(buf
, 10, &val
);
248 spin_lock(&pd
->lock
);
249 pd
->write_congestion_off
= val
;
250 init_write_congestion_marks(&pd
->write_congestion_off
, &pd
->write_congestion_on
);
251 spin_unlock(&pd
->lock
);
254 static DEVICE_ATTR_RW(congestion_off
);
256 static ssize_t
congestion_on_show(struct device
*dev
,
257 struct device_attribute
*attr
, char *buf
)
259 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
262 spin_lock(&pd
->lock
);
263 n
= sysfs_emit(buf
, "%d\n", pd
->write_congestion_on
);
264 spin_unlock(&pd
->lock
);
268 static ssize_t
congestion_on_store(struct device
*dev
,
269 struct device_attribute
*attr
,
270 const char *buf
, size_t len
)
272 struct pktcdvd_device
*pd
= dev_get_drvdata(dev
);
275 ret
= kstrtoint(buf
, 10, &val
);
279 spin_lock(&pd
->lock
);
280 pd
->write_congestion_on
= val
;
281 init_write_congestion_marks(&pd
->write_congestion_off
, &pd
->write_congestion_on
);
282 spin_unlock(&pd
->lock
);
285 static DEVICE_ATTR_RW(congestion_on
);
287 static struct attribute
*pkt_wq_attrs
[] = {
288 &dev_attr_congestion_on
.attr
,
289 &dev_attr_congestion_off
.attr
,
294 static const struct attribute_group pkt_wq_group
= {
295 .name
= "write_queue",
296 .attrs
= pkt_wq_attrs
,
299 static const struct attribute_group
*pkt_groups
[] = {
305 static void pkt_sysfs_dev_new(struct pktcdvd_device
*pd
)
307 if (class_is_registered(&class_pktcdvd
)) {
308 pd
->dev
= device_create_with_groups(&class_pktcdvd
, NULL
,
309 MKDEV(0, 0), pd
, pkt_groups
,
310 "%s", pd
->disk
->disk_name
);
316 static void pkt_sysfs_dev_remove(struct pktcdvd_device
*pd
)
318 if (class_is_registered(&class_pktcdvd
))
319 device_unregister(pd
->dev
);
323 /********************************************************************
326 remove unmap packet dev
327 device_map show mappings
328 *******************************************************************/
330 static ssize_t
device_map_show(const struct class *c
, const struct class_attribute
*attr
,
335 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
336 for (idx
= 0; idx
< MAX_WRITERS
; idx
++) {
337 struct pktcdvd_device
*pd
= pkt_devs
[idx
];
340 n
+= sysfs_emit_at(data
, n
, "%s %u:%u %u:%u\n",
342 MAJOR(pd
->pkt_dev
), MINOR(pd
->pkt_dev
),
343 MAJOR(file_bdev(pd
->bdev_file
)->bd_dev
),
344 MINOR(file_bdev(pd
->bdev_file
)->bd_dev
));
346 mutex_unlock(&ctl_mutex
);
349 static CLASS_ATTR_RO(device_map
);
351 static ssize_t
add_store(const struct class *c
, const struct class_attribute
*attr
,
352 const char *buf
, size_t count
)
354 unsigned int major
, minor
;
356 if (sscanf(buf
, "%u:%u", &major
, &minor
) == 2) {
357 /* pkt_setup_dev() expects caller to hold reference to self */
358 if (!try_module_get(THIS_MODULE
))
361 pkt_setup_dev(MKDEV(major
, minor
), NULL
);
363 module_put(THIS_MODULE
);
370 static CLASS_ATTR_WO(add
);
372 static ssize_t
remove_store(const struct class *c
, const struct class_attribute
*attr
,
373 const char *buf
, size_t count
)
375 unsigned int major
, minor
;
376 if (sscanf(buf
, "%u:%u", &major
, &minor
) == 2) {
377 pkt_remove_dev(MKDEV(major
, minor
));
382 static CLASS_ATTR_WO(remove
);
384 static struct attribute
*class_pktcdvd_attrs
[] = {
385 &class_attr_add
.attr
,
386 &class_attr_remove
.attr
,
387 &class_attr_device_map
.attr
,
390 ATTRIBUTE_GROUPS(class_pktcdvd
);
392 static struct class class_pktcdvd
= {
394 .class_groups
= class_pktcdvd_groups
,
397 static int pkt_sysfs_init(void)
400 * create control files in sysfs
401 * /sys/class/pktcdvd/...
403 return class_register(&class_pktcdvd
);
406 static void pkt_sysfs_cleanup(void)
408 class_unregister(&class_pktcdvd
);
411 /********************************************************************
414 /sys/kernel/debug/pktcdvd[0-7]/
417 *******************************************************************/
419 static void pkt_count_states(struct pktcdvd_device
*pd
, int *states
)
421 struct packet_data
*pkt
;
424 for (i
= 0; i
< PACKET_NUM_STATES
; i
++)
427 spin_lock(&pd
->cdrw
.active_list_lock
);
428 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
429 states
[pkt
->state
]++;
431 spin_unlock(&pd
->cdrw
.active_list_lock
);
434 static int pkt_seq_show(struct seq_file
*m
, void *p
)
436 struct pktcdvd_device
*pd
= m
->private;
438 int states
[PACKET_NUM_STATES
];
440 seq_printf(m
, "Writer %s mapped to %pg:\n", pd
->disk
->disk_name
,
441 file_bdev(pd
->bdev_file
));
443 seq_printf(m
, "\nSettings:\n");
444 seq_printf(m
, "\tpacket size:\t\t%dkB\n", pd
->settings
.size
/ 2);
446 if (pd
->settings
.write_type
== 0)
450 seq_printf(m
, "\twrite type:\t\t%s\n", msg
);
452 seq_printf(m
, "\tpacket type:\t\t%s\n", pd
->settings
.fp
? "Fixed" : "Variable");
453 seq_printf(m
, "\tlink loss:\t\t%d\n", pd
->settings
.link_loss
);
455 seq_printf(m
, "\ttrack mode:\t\t%d\n", pd
->settings
.track_mode
);
457 if (pd
->settings
.block_mode
== PACKET_BLOCK_MODE1
)
459 else if (pd
->settings
.block_mode
== PACKET_BLOCK_MODE2
)
463 seq_printf(m
, "\tblock mode:\t\t%s\n", msg
);
465 seq_printf(m
, "\nStatistics:\n");
466 seq_printf(m
, "\tpackets started:\t%lu\n", pd
->stats
.pkt_started
);
467 seq_printf(m
, "\tpackets ended:\t\t%lu\n", pd
->stats
.pkt_ended
);
468 seq_printf(m
, "\twritten:\t\t%lukB\n", pd
->stats
.secs_w
>> 1);
469 seq_printf(m
, "\tread gather:\t\t%lukB\n", pd
->stats
.secs_rg
>> 1);
470 seq_printf(m
, "\tread:\t\t\t%lukB\n", pd
->stats
.secs_r
>> 1);
472 seq_printf(m
, "\nMisc:\n");
473 seq_printf(m
, "\treference count:\t%d\n", pd
->refcnt
);
474 seq_printf(m
, "\tflags:\t\t\t0x%lx\n", pd
->flags
);
475 seq_printf(m
, "\tread speed:\t\t%ukB/s\n", pd
->read_speed
);
476 seq_printf(m
, "\twrite speed:\t\t%ukB/s\n", pd
->write_speed
);
477 seq_printf(m
, "\tstart offset:\t\t%lu\n", pd
->offset
);
478 seq_printf(m
, "\tmode page offset:\t%u\n", pd
->mode_offset
);
480 seq_printf(m
, "\nQueue state:\n");
481 seq_printf(m
, "\tbios queued:\t\t%d\n", pd
->bio_queue_size
);
482 seq_printf(m
, "\tbios pending:\t\t%d\n", atomic_read(&pd
->cdrw
.pending_bios
));
483 seq_printf(m
, "\tcurrent sector:\t\t0x%llx\n", pd
->current_sector
);
485 pkt_count_states(pd
, states
);
486 seq_printf(m
, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
487 states
[0], states
[1], states
[2], states
[3], states
[4], states
[5]);
489 seq_printf(m
, "\twrite congestion marks:\toff=%d on=%d\n",
490 pd
->write_congestion_off
,
491 pd
->write_congestion_on
);
494 DEFINE_SHOW_ATTRIBUTE(pkt_seq
);
496 static void pkt_debugfs_dev_new(struct pktcdvd_device
*pd
)
498 if (!pkt_debugfs_root
)
500 pd
->dfs_d_root
= debugfs_create_dir(pd
->disk
->disk_name
, pkt_debugfs_root
);
502 pd
->dfs_f_info
= debugfs_create_file("info", 0444, pd
->dfs_d_root
,
506 static void pkt_debugfs_dev_remove(struct pktcdvd_device
*pd
)
508 if (!pkt_debugfs_root
)
510 debugfs_remove(pd
->dfs_f_info
);
511 debugfs_remove(pd
->dfs_d_root
);
512 pd
->dfs_f_info
= NULL
;
513 pd
->dfs_d_root
= NULL
;
516 static void pkt_debugfs_init(void)
518 pkt_debugfs_root
= debugfs_create_dir(DRIVER_NAME
, NULL
);
521 static void pkt_debugfs_cleanup(void)
523 debugfs_remove(pkt_debugfs_root
);
524 pkt_debugfs_root
= NULL
;
527 /* ----------------------------------------------------------*/
530 static void pkt_bio_finished(struct pktcdvd_device
*pd
)
532 struct device
*ddev
= disk_to_dev(pd
->disk
);
534 BUG_ON(atomic_read(&pd
->cdrw
.pending_bios
) <= 0);
535 if (atomic_dec_and_test(&pd
->cdrw
.pending_bios
)) {
536 dev_dbg(ddev
, "queue empty\n");
537 atomic_set(&pd
->iosched
.attention
, 1);
538 wake_up(&pd
->wqueue
);
543 * Allocate a packet_data struct
545 static struct packet_data
*pkt_alloc_packet_data(int frames
)
548 struct packet_data
*pkt
;
550 pkt
= kzalloc(sizeof(struct packet_data
), GFP_KERNEL
);
554 pkt
->frames
= frames
;
555 pkt
->w_bio
= bio_kmalloc(frames
, GFP_KERNEL
);
559 for (i
= 0; i
< frames
/ FRAMES_PER_PAGE
; i
++) {
560 pkt
->pages
[i
] = alloc_page(GFP_KERNEL
|__GFP_ZERO
);
565 spin_lock_init(&pkt
->lock
);
566 bio_list_init(&pkt
->orig_bios
);
568 for (i
= 0; i
< frames
; i
++) {
569 pkt
->r_bios
[i
] = bio_kmalloc(1, GFP_KERNEL
);
577 for (i
= 0; i
< frames
; i
++)
578 kfree(pkt
->r_bios
[i
]);
580 for (i
= 0; i
< frames
/ FRAMES_PER_PAGE
; i
++)
582 __free_page(pkt
->pages
[i
]);
591 * Free a packet_data struct
593 static void pkt_free_packet_data(struct packet_data
*pkt
)
597 for (i
= 0; i
< pkt
->frames
; i
++)
598 kfree(pkt
->r_bios
[i
]);
599 for (i
= 0; i
< pkt
->frames
/ FRAMES_PER_PAGE
; i
++)
600 __free_page(pkt
->pages
[i
]);
605 static void pkt_shrink_pktlist(struct pktcdvd_device
*pd
)
607 struct packet_data
*pkt
, *next
;
609 BUG_ON(!list_empty(&pd
->cdrw
.pkt_active_list
));
611 list_for_each_entry_safe(pkt
, next
, &pd
->cdrw
.pkt_free_list
, list
) {
612 pkt_free_packet_data(pkt
);
614 INIT_LIST_HEAD(&pd
->cdrw
.pkt_free_list
);
617 static int pkt_grow_pktlist(struct pktcdvd_device
*pd
, int nr_packets
)
619 struct packet_data
*pkt
;
621 BUG_ON(!list_empty(&pd
->cdrw
.pkt_free_list
));
623 while (nr_packets
> 0) {
624 pkt
= pkt_alloc_packet_data(pd
->settings
.size
>> 2);
626 pkt_shrink_pktlist(pd
);
629 pkt
->id
= nr_packets
;
631 list_add(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
637 static inline struct pkt_rb_node
*pkt_rbtree_next(struct pkt_rb_node
*node
)
639 struct rb_node
*n
= rb_next(&node
->rb_node
);
642 return rb_entry(n
, struct pkt_rb_node
, rb_node
);
645 static void pkt_rbtree_erase(struct pktcdvd_device
*pd
, struct pkt_rb_node
*node
)
647 rb_erase(&node
->rb_node
, &pd
->bio_queue
);
648 mempool_free(node
, &pd
->rb_pool
);
649 pd
->bio_queue_size
--;
650 BUG_ON(pd
->bio_queue_size
< 0);
654 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
656 static struct pkt_rb_node
*pkt_rbtree_find(struct pktcdvd_device
*pd
, sector_t s
)
658 struct rb_node
*n
= pd
->bio_queue
.rb_node
;
659 struct rb_node
*next
;
660 struct pkt_rb_node
*tmp
;
663 BUG_ON(pd
->bio_queue_size
> 0);
668 tmp
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
669 if (s
<= tmp
->bio
->bi_iter
.bi_sector
)
678 if (s
> tmp
->bio
->bi_iter
.bi_sector
) {
679 tmp
= pkt_rbtree_next(tmp
);
683 BUG_ON(s
> tmp
->bio
->bi_iter
.bi_sector
);
688 * Insert a node into the pd->bio_queue rb tree.
690 static void pkt_rbtree_insert(struct pktcdvd_device
*pd
, struct pkt_rb_node
*node
)
692 struct rb_node
**p
= &pd
->bio_queue
.rb_node
;
693 struct rb_node
*parent
= NULL
;
694 sector_t s
= node
->bio
->bi_iter
.bi_sector
;
695 struct pkt_rb_node
*tmp
;
699 tmp
= rb_entry(parent
, struct pkt_rb_node
, rb_node
);
700 if (s
< tmp
->bio
->bi_iter
.bi_sector
)
705 rb_link_node(&node
->rb_node
, parent
, p
);
706 rb_insert_color(&node
->rb_node
, &pd
->bio_queue
);
707 pd
->bio_queue_size
++;
711 * Send a packet_command to the underlying block device and
712 * wait for completion.
714 static int pkt_generic_packet(struct pktcdvd_device
*pd
, struct packet_command
*cgc
)
716 struct request_queue
*q
= bdev_get_queue(file_bdev(pd
->bdev_file
));
717 struct scsi_cmnd
*scmd
;
721 rq
= scsi_alloc_request(q
, (cgc
->data_direction
== CGC_DATA_WRITE
) ?
722 REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
, 0);
725 scmd
= blk_mq_rq_to_pdu(rq
);
728 ret
= blk_rq_map_kern(q
, rq
, cgc
->buffer
, cgc
->buflen
,
734 scmd
->cmd_len
= COMMAND_SIZE(cgc
->cmd
[0]);
735 memcpy(scmd
->cmnd
, cgc
->cmd
, CDROM_PACKET_SIZE
);
739 rq
->rq_flags
|= RQF_QUIET
;
741 blk_execute_rq(rq
, false);
745 blk_mq_free_request(rq
);
749 static const char *sense_key_string(__u8 index
)
751 static const char * const info
[] = {
752 "No sense", "Recovered error", "Not ready",
753 "Medium error", "Hardware error", "Illegal request",
754 "Unit attention", "Data protect", "Blank check",
757 return index
< ARRAY_SIZE(info
) ? info
[index
] : "INVALID";
761 * A generic sense dump / resolve mechanism should be implemented across
762 * all ATAPI + SCSI devices.
764 static void pkt_dump_sense(struct pktcdvd_device
*pd
,
765 struct packet_command
*cgc
)
767 struct device
*ddev
= disk_to_dev(pd
->disk
);
768 struct scsi_sense_hdr
*sshdr
= cgc
->sshdr
;
771 dev_err(ddev
, "%*ph - sense %02x.%02x.%02x (%s)\n",
772 CDROM_PACKET_SIZE
, cgc
->cmd
,
773 sshdr
->sense_key
, sshdr
->asc
, sshdr
->ascq
,
774 sense_key_string(sshdr
->sense_key
));
776 dev_err(ddev
, "%*ph - no sense\n", CDROM_PACKET_SIZE
, cgc
->cmd
);
780 * flush the drive cache to media
782 static int pkt_flush_cache(struct pktcdvd_device
*pd
)
784 struct packet_command cgc
;
786 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
787 cgc
.cmd
[0] = GPCMD_FLUSH_CACHE
;
791 * the IMMED bit -- we default to not setting it, although that
792 * would allow a much faster close, this is safer
797 return pkt_generic_packet(pd
, &cgc
);
801 * speed is given as the normal factor, e.g. 4 for 4x
803 static noinline_for_stack
int pkt_set_speed(struct pktcdvd_device
*pd
,
804 unsigned write_speed
, unsigned read_speed
)
806 struct packet_command cgc
;
807 struct scsi_sense_hdr sshdr
;
810 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
812 cgc
.cmd
[0] = GPCMD_SET_SPEED
;
813 put_unaligned_be16(read_speed
, &cgc
.cmd
[2]);
814 put_unaligned_be16(write_speed
, &cgc
.cmd
[4]);
816 ret
= pkt_generic_packet(pd
, &cgc
);
818 pkt_dump_sense(pd
, &cgc
);
824 * Queue a bio for processing by the low-level CD device. Must be called
825 * from process context.
827 static void pkt_queue_bio(struct pktcdvd_device
*pd
, struct bio
*bio
)
830 * Some CDRW drives can not handle writes larger than one packet,
831 * even if the size is a multiple of the packet size.
833 bio
->bi_opf
|= REQ_NOMERGE
;
835 spin_lock(&pd
->iosched
.lock
);
836 if (bio_data_dir(bio
) == READ
)
837 bio_list_add(&pd
->iosched
.read_queue
, bio
);
839 bio_list_add(&pd
->iosched
.write_queue
, bio
);
840 spin_unlock(&pd
->iosched
.lock
);
842 atomic_set(&pd
->iosched
.attention
, 1);
843 wake_up(&pd
->wqueue
);
847 * Process the queued read/write requests. This function handles special
848 * requirements for CDRW drives:
849 * - A cache flush command must be inserted before a read request if the
850 * previous request was a write.
851 * - Switching between reading and writing is slow, so don't do it more often
853 * - Optimize for throughput at the expense of latency. This means that streaming
854 * writes will never be interrupted by a read, but if the drive has to seek
855 * before the next write, switch to reading instead if there are any pending
857 * - Set the read speed according to current usage pattern. When only reading
858 * from the device, it's best to use the highest possible read speed, but
859 * when switching often between reading and writing, it's better to have the
860 * same read and write speeds.
862 static void pkt_iosched_process_queue(struct pktcdvd_device
*pd
)
864 struct device
*ddev
= disk_to_dev(pd
->disk
);
866 if (atomic_read(&pd
->iosched
.attention
) == 0)
868 atomic_set(&pd
->iosched
.attention
, 0);
872 int reads_queued
, writes_queued
;
874 spin_lock(&pd
->iosched
.lock
);
875 reads_queued
= !bio_list_empty(&pd
->iosched
.read_queue
);
876 writes_queued
= !bio_list_empty(&pd
->iosched
.write_queue
);
877 spin_unlock(&pd
->iosched
.lock
);
879 if (!reads_queued
&& !writes_queued
)
882 if (pd
->iosched
.writing
) {
883 int need_write_seek
= 1;
884 spin_lock(&pd
->iosched
.lock
);
885 bio
= bio_list_peek(&pd
->iosched
.write_queue
);
886 spin_unlock(&pd
->iosched
.lock
);
887 if (bio
&& (bio
->bi_iter
.bi_sector
==
888 pd
->iosched
.last_write
))
890 if (need_write_seek
&& reads_queued
) {
891 if (atomic_read(&pd
->cdrw
.pending_bios
) > 0) {
892 dev_dbg(ddev
, "write, waiting\n");
896 pd
->iosched
.writing
= 0;
899 if (!reads_queued
&& writes_queued
) {
900 if (atomic_read(&pd
->cdrw
.pending_bios
) > 0) {
901 dev_dbg(ddev
, "read, waiting\n");
904 pd
->iosched
.writing
= 1;
908 spin_lock(&pd
->iosched
.lock
);
909 if (pd
->iosched
.writing
)
910 bio
= bio_list_pop(&pd
->iosched
.write_queue
);
912 bio
= bio_list_pop(&pd
->iosched
.read_queue
);
913 spin_unlock(&pd
->iosched
.lock
);
918 if (bio_data_dir(bio
) == READ
)
919 pd
->iosched
.successive_reads
+=
920 bio
->bi_iter
.bi_size
>> 10;
922 pd
->iosched
.successive_reads
= 0;
923 pd
->iosched
.last_write
= bio_end_sector(bio
);
925 if (pd
->iosched
.successive_reads
>= HI_SPEED_SWITCH
) {
926 if (pd
->read_speed
== pd
->write_speed
) {
927 pd
->read_speed
= MAX_SPEED
;
928 pkt_set_speed(pd
, pd
->write_speed
, pd
->read_speed
);
931 if (pd
->read_speed
!= pd
->write_speed
) {
932 pd
->read_speed
= pd
->write_speed
;
933 pkt_set_speed(pd
, pd
->write_speed
, pd
->read_speed
);
937 atomic_inc(&pd
->cdrw
.pending_bios
);
938 submit_bio_noacct(bio
);
943 * Special care is needed if the underlying block device has a small
944 * max_phys_segments value.
946 static int pkt_set_segment_merging(struct pktcdvd_device
*pd
, struct request_queue
*q
)
948 struct device
*ddev
= disk_to_dev(pd
->disk
);
950 if ((pd
->settings
.size
<< 9) / CD_FRAMESIZE
<= queue_max_segments(q
)) {
952 * The cdrom device can handle one segment/frame
954 clear_bit(PACKET_MERGE_SEGS
, &pd
->flags
);
958 if ((pd
->settings
.size
<< 9) / PAGE_SIZE
<= queue_max_segments(q
)) {
960 * We can handle this case at the expense of some extra memory
961 * copies during write operations
963 set_bit(PACKET_MERGE_SEGS
, &pd
->flags
);
967 dev_err(ddev
, "cdrom max_phys_segments too small\n");
971 static void pkt_end_io_read(struct bio
*bio
)
973 struct packet_data
*pkt
= bio
->bi_private
;
974 struct pktcdvd_device
*pd
= pkt
->pd
;
977 dev_dbg(disk_to_dev(pd
->disk
), "bio=%p sec0=%llx sec=%llx err=%d\n",
978 bio
, pkt
->sector
, bio
->bi_iter
.bi_sector
, bio
->bi_status
);
981 atomic_inc(&pkt
->io_errors
);
983 if (atomic_dec_and_test(&pkt
->io_wait
)) {
984 atomic_inc(&pkt
->run_sm
);
985 wake_up(&pd
->wqueue
);
987 pkt_bio_finished(pd
);
990 static void pkt_end_io_packet_write(struct bio
*bio
)
992 struct packet_data
*pkt
= bio
->bi_private
;
993 struct pktcdvd_device
*pd
= pkt
->pd
;
996 dev_dbg(disk_to_dev(pd
->disk
), "id=%d, err=%d\n", pkt
->id
, bio
->bi_status
);
998 pd
->stats
.pkt_ended
++;
1001 pkt_bio_finished(pd
);
1002 atomic_dec(&pkt
->io_wait
);
1003 atomic_inc(&pkt
->run_sm
);
1004 wake_up(&pd
->wqueue
);
1008 * Schedule reads for the holes in a packet
1010 static void pkt_gather_data(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1012 struct device
*ddev
= disk_to_dev(pd
->disk
);
1013 int frames_read
= 0;
1016 char written
[PACKET_MAX_SIZE
];
1018 BUG_ON(bio_list_empty(&pkt
->orig_bios
));
1020 atomic_set(&pkt
->io_wait
, 0);
1021 atomic_set(&pkt
->io_errors
, 0);
1024 * Figure out which frames we need to read before we can write.
1026 memset(written
, 0, sizeof(written
));
1027 spin_lock(&pkt
->lock
);
1028 bio_list_for_each(bio
, &pkt
->orig_bios
) {
1029 int first_frame
= (bio
->bi_iter
.bi_sector
- pkt
->sector
) /
1030 (CD_FRAMESIZE
>> 9);
1031 int num_frames
= bio
->bi_iter
.bi_size
/ CD_FRAMESIZE
;
1032 pd
->stats
.secs_w
+= num_frames
* (CD_FRAMESIZE
>> 9);
1033 BUG_ON(first_frame
< 0);
1034 BUG_ON(first_frame
+ num_frames
> pkt
->frames
);
1035 for (f
= first_frame
; f
< first_frame
+ num_frames
; f
++)
1038 spin_unlock(&pkt
->lock
);
1040 if (pkt
->cache_valid
) {
1041 dev_dbg(ddev
, "zone %llx cached\n", pkt
->sector
);
1046 * Schedule reads for missing parts of the packet.
1048 for (f
= 0; f
< pkt
->frames
; f
++) {
1054 bio
= pkt
->r_bios
[f
];
1055 bio_init(bio
, file_bdev(pd
->bdev_file
), bio
->bi_inline_vecs
, 1,
1057 bio
->bi_iter
.bi_sector
= pkt
->sector
+ f
* (CD_FRAMESIZE
>> 9);
1058 bio
->bi_end_io
= pkt_end_io_read
;
1059 bio
->bi_private
= pkt
;
1061 p
= (f
* CD_FRAMESIZE
) / PAGE_SIZE
;
1062 offset
= (f
* CD_FRAMESIZE
) % PAGE_SIZE
;
1063 dev_dbg(ddev
, "Adding frame %d, page:%p offs:%d\n", f
,
1064 pkt
->pages
[p
], offset
);
1065 if (!bio_add_page(bio
, pkt
->pages
[p
], CD_FRAMESIZE
, offset
))
1068 atomic_inc(&pkt
->io_wait
);
1069 pkt_queue_bio(pd
, bio
);
1074 dev_dbg(ddev
, "need %d frames for zone %llx\n", frames_read
, pkt
->sector
);
1075 pd
->stats
.pkt_started
++;
1076 pd
->stats
.secs_rg
+= frames_read
* (CD_FRAMESIZE
>> 9);
1080 * Find a packet matching zone, or the least recently used packet if
1081 * there is no match.
1083 static struct packet_data
*pkt_get_packet_data(struct pktcdvd_device
*pd
, int zone
)
1085 struct packet_data
*pkt
;
1087 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_free_list
, list
) {
1088 if (pkt
->sector
== zone
|| pkt
->list
.next
== &pd
->cdrw
.pkt_free_list
) {
1089 list_del_init(&pkt
->list
);
1090 if (pkt
->sector
!= zone
)
1091 pkt
->cache_valid
= 0;
1099 static void pkt_put_packet_data(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1101 if (pkt
->cache_valid
) {
1102 list_add(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
1104 list_add_tail(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
1108 static inline void pkt_set_state(struct device
*ddev
, struct packet_data
*pkt
,
1109 enum packet_data_state state
)
1111 static const char *state_name
[] = {
1112 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1114 enum packet_data_state old_state
= pkt
->state
;
1116 dev_dbg(ddev
, "pkt %2d : s=%6llx %s -> %s\n",
1117 pkt
->id
, pkt
->sector
, state_name
[old_state
], state_name
[state
]);
1123 * Scan the work queue to see if we can start a new packet.
1124 * returns non-zero if any work was done.
1126 static int pkt_handle_queue(struct pktcdvd_device
*pd
)
1128 struct device
*ddev
= disk_to_dev(pd
->disk
);
1129 struct packet_data
*pkt
, *p
;
1130 struct bio
*bio
= NULL
;
1131 sector_t zone
= 0; /* Suppress gcc warning */
1132 struct pkt_rb_node
*node
, *first_node
;
1135 atomic_set(&pd
->scan_queue
, 0);
1137 if (list_empty(&pd
->cdrw
.pkt_free_list
)) {
1138 dev_dbg(ddev
, "no pkt\n");
1143 * Try to find a zone we are not already working on.
1145 spin_lock(&pd
->lock
);
1146 first_node
= pkt_rbtree_find(pd
, pd
->current_sector
);
1148 n
= rb_first(&pd
->bio_queue
);
1150 first_node
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
1155 zone
= get_zone(bio
->bi_iter
.bi_sector
, pd
);
1156 list_for_each_entry(p
, &pd
->cdrw
.pkt_active_list
, list
) {
1157 if (p
->sector
== zone
) {
1164 node
= pkt_rbtree_next(node
);
1166 n
= rb_first(&pd
->bio_queue
);
1168 node
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
1170 if (node
== first_node
)
1173 spin_unlock(&pd
->lock
);
1175 dev_dbg(ddev
, "no bio\n");
1179 pkt
= pkt_get_packet_data(pd
, zone
);
1181 pd
->current_sector
= zone
+ pd
->settings
.size
;
1183 BUG_ON(pkt
->frames
!= pd
->settings
.size
>> 2);
1184 pkt
->write_size
= 0;
1187 * Scan work queue for bios in the same zone and link them
1190 spin_lock(&pd
->lock
);
1191 dev_dbg(ddev
, "looking for zone %llx\n", zone
);
1192 while ((node
= pkt_rbtree_find(pd
, zone
)) != NULL
) {
1193 sector_t tmp
= get_zone(node
->bio
->bi_iter
.bi_sector
, pd
);
1196 dev_dbg(ddev
, "found zone=%llx\n", tmp
);
1199 pkt_rbtree_erase(pd
, node
);
1200 spin_lock(&pkt
->lock
);
1201 bio_list_add(&pkt
->orig_bios
, bio
);
1202 pkt
->write_size
+= bio
->bi_iter
.bi_size
/ CD_FRAMESIZE
;
1203 spin_unlock(&pkt
->lock
);
1205 /* check write congestion marks, and if bio_queue_size is
1206 * below, wake up any waiters
1208 if (pd
->congested
&&
1209 pd
->bio_queue_size
<= pd
->write_congestion_off
) {
1210 pd
->congested
= false;
1211 wake_up_var(&pd
->congested
);
1213 spin_unlock(&pd
->lock
);
1215 pkt
->sleep_time
= max(PACKET_WAIT_TIME
, 1);
1216 pkt_set_state(ddev
, pkt
, PACKET_WAITING_STATE
);
1217 atomic_set(&pkt
->run_sm
, 1);
1219 spin_lock(&pd
->cdrw
.active_list_lock
);
1220 list_add(&pkt
->list
, &pd
->cdrw
.pkt_active_list
);
1221 spin_unlock(&pd
->cdrw
.active_list_lock
);
1227 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1229 * @src: source bio list
1230 * @dst: destination bio list
1232 * Stops when it reaches the end of either the @src list or @dst list - that is,
1233 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1236 static void bio_list_copy_data(struct bio
*dst
, struct bio
*src
)
1238 struct bvec_iter src_iter
= src
->bi_iter
;
1239 struct bvec_iter dst_iter
= dst
->bi_iter
;
1242 if (!src_iter
.bi_size
) {
1247 src_iter
= src
->bi_iter
;
1250 if (!dst_iter
.bi_size
) {
1255 dst_iter
= dst
->bi_iter
;
1258 bio_copy_data_iter(dst
, &dst_iter
, src
, &src_iter
);
1263 * Assemble a bio to write one packet and queue the bio for processing
1264 * by the underlying block device.
1266 static void pkt_start_write(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1268 struct device
*ddev
= disk_to_dev(pd
->disk
);
1271 bio_init(pkt
->w_bio
, file_bdev(pd
->bdev_file
), pkt
->w_bio
->bi_inline_vecs
,
1272 pkt
->frames
, REQ_OP_WRITE
);
1273 pkt
->w_bio
->bi_iter
.bi_sector
= pkt
->sector
;
1274 pkt
->w_bio
->bi_end_io
= pkt_end_io_packet_write
;
1275 pkt
->w_bio
->bi_private
= pkt
;
1278 for (f
= 0; f
< pkt
->frames
; f
++) {
1279 struct page
*page
= pkt
->pages
[(f
* CD_FRAMESIZE
) / PAGE_SIZE
];
1280 unsigned offset
= (f
* CD_FRAMESIZE
) % PAGE_SIZE
;
1282 if (!bio_add_page(pkt
->w_bio
, page
, CD_FRAMESIZE
, offset
))
1285 dev_dbg(ddev
, "vcnt=%d\n", pkt
->w_bio
->bi_vcnt
);
1288 * Fill-in bvec with data from orig_bios.
1290 spin_lock(&pkt
->lock
);
1291 bio_list_copy_data(pkt
->w_bio
, pkt
->orig_bios
.head
);
1293 pkt_set_state(ddev
, pkt
, PACKET_WRITE_WAIT_STATE
);
1294 spin_unlock(&pkt
->lock
);
1296 dev_dbg(ddev
, "Writing %d frames for zone %llx\n", pkt
->write_size
, pkt
->sector
);
1298 if (test_bit(PACKET_MERGE_SEGS
, &pd
->flags
) || (pkt
->write_size
< pkt
->frames
))
1299 pkt
->cache_valid
= 1;
1301 pkt
->cache_valid
= 0;
1303 /* Start the write request */
1304 atomic_set(&pkt
->io_wait
, 1);
1305 pkt_queue_bio(pd
, pkt
->w_bio
);
1308 static void pkt_finish_packet(struct packet_data
*pkt
, blk_status_t status
)
1313 pkt
->cache_valid
= 0;
1315 /* Finish all bios corresponding to this packet */
1316 while ((bio
= bio_list_pop(&pkt
->orig_bios
))) {
1317 bio
->bi_status
= status
;
1322 static void pkt_run_state_machine(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1324 struct device
*ddev
= disk_to_dev(pd
->disk
);
1326 dev_dbg(ddev
, "pkt %d\n", pkt
->id
);
1329 switch (pkt
->state
) {
1330 case PACKET_WAITING_STATE
:
1331 if ((pkt
->write_size
< pkt
->frames
) && (pkt
->sleep_time
> 0))
1334 pkt
->sleep_time
= 0;
1335 pkt_gather_data(pd
, pkt
);
1336 pkt_set_state(ddev
, pkt
, PACKET_READ_WAIT_STATE
);
1339 case PACKET_READ_WAIT_STATE
:
1340 if (atomic_read(&pkt
->io_wait
) > 0)
1343 if (atomic_read(&pkt
->io_errors
) > 0) {
1344 pkt_set_state(ddev
, pkt
, PACKET_RECOVERY_STATE
);
1346 pkt_start_write(pd
, pkt
);
1350 case PACKET_WRITE_WAIT_STATE
:
1351 if (atomic_read(&pkt
->io_wait
) > 0)
1354 if (!pkt
->w_bio
->bi_status
) {
1355 pkt_set_state(ddev
, pkt
, PACKET_FINISHED_STATE
);
1357 pkt_set_state(ddev
, pkt
, PACKET_RECOVERY_STATE
);
1361 case PACKET_RECOVERY_STATE
:
1362 dev_dbg(ddev
, "No recovery possible\n");
1363 pkt_set_state(ddev
, pkt
, PACKET_FINISHED_STATE
);
1366 case PACKET_FINISHED_STATE
:
1367 pkt_finish_packet(pkt
, pkt
->w_bio
->bi_status
);
1377 static void pkt_handle_packets(struct pktcdvd_device
*pd
)
1379 struct device
*ddev
= disk_to_dev(pd
->disk
);
1380 struct packet_data
*pkt
, *next
;
1383 * Run state machine for active packets
1385 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1386 if (atomic_read(&pkt
->run_sm
) > 0) {
1387 atomic_set(&pkt
->run_sm
, 0);
1388 pkt_run_state_machine(pd
, pkt
);
1393 * Move no longer active packets to the free list
1395 spin_lock(&pd
->cdrw
.active_list_lock
);
1396 list_for_each_entry_safe(pkt
, next
, &pd
->cdrw
.pkt_active_list
, list
) {
1397 if (pkt
->state
== PACKET_FINISHED_STATE
) {
1398 list_del(&pkt
->list
);
1399 pkt_put_packet_data(pd
, pkt
);
1400 pkt_set_state(ddev
, pkt
, PACKET_IDLE_STATE
);
1401 atomic_set(&pd
->scan_queue
, 1);
1404 spin_unlock(&pd
->cdrw
.active_list_lock
);
1408 * kcdrwd is woken up when writes have been queued for one of our
1409 * registered devices
1411 static int kcdrwd(void *foobar
)
1413 struct pktcdvd_device
*pd
= foobar
;
1414 struct device
*ddev
= disk_to_dev(pd
->disk
);
1415 struct packet_data
*pkt
;
1416 int states
[PACKET_NUM_STATES
];
1417 long min_sleep_time
, residue
;
1419 set_user_nice(current
, MIN_NICE
);
1423 DECLARE_WAITQUEUE(wait
, current
);
1426 * Wait until there is something to do
1428 add_wait_queue(&pd
->wqueue
, &wait
);
1430 set_current_state(TASK_INTERRUPTIBLE
);
1432 /* Check if we need to run pkt_handle_queue */
1433 if (atomic_read(&pd
->scan_queue
) > 0)
1436 /* Check if we need to run the state machine for some packet */
1437 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1438 if (atomic_read(&pkt
->run_sm
) > 0)
1442 /* Check if we need to process the iosched queues */
1443 if (atomic_read(&pd
->iosched
.attention
) != 0)
1446 /* Otherwise, go to sleep */
1447 pkt_count_states(pd
, states
);
1448 dev_dbg(ddev
, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1449 states
[0], states
[1], states
[2], states
[3], states
[4], states
[5]);
1451 min_sleep_time
= MAX_SCHEDULE_TIMEOUT
;
1452 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1453 if (pkt
->sleep_time
&& pkt
->sleep_time
< min_sleep_time
)
1454 min_sleep_time
= pkt
->sleep_time
;
1457 dev_dbg(ddev
, "sleeping\n");
1458 residue
= schedule_timeout(min_sleep_time
);
1459 dev_dbg(ddev
, "wake up\n");
1461 /* make swsusp happy with our thread */
1464 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1465 if (!pkt
->sleep_time
)
1467 pkt
->sleep_time
-= min_sleep_time
- residue
;
1468 if (pkt
->sleep_time
<= 0) {
1469 pkt
->sleep_time
= 0;
1470 atomic_inc(&pkt
->run_sm
);
1474 if (kthread_should_stop())
1478 set_current_state(TASK_RUNNING
);
1479 remove_wait_queue(&pd
->wqueue
, &wait
);
1481 if (kthread_should_stop())
1485 * if pkt_handle_queue returns true, we can queue
1488 while (pkt_handle_queue(pd
))
1492 * Handle packet state machine
1494 pkt_handle_packets(pd
);
1497 * Handle iosched queues
1499 pkt_iosched_process_queue(pd
);
1505 static void pkt_print_settings(struct pktcdvd_device
*pd
)
1507 dev_info(disk_to_dev(pd
->disk
), "%s packets, %u blocks, Mode-%c disc\n",
1508 pd
->settings
.fp
? "Fixed" : "Variable",
1509 pd
->settings
.size
>> 2,
1510 pd
->settings
.block_mode
== 8 ? '1' : '2');
1513 static int pkt_mode_sense(struct pktcdvd_device
*pd
, struct packet_command
*cgc
, int page_code
, int page_control
)
1515 memset(cgc
->cmd
, 0, sizeof(cgc
->cmd
));
1517 cgc
->cmd
[0] = GPCMD_MODE_SENSE_10
;
1518 cgc
->cmd
[2] = page_code
| (page_control
<< 6);
1519 put_unaligned_be16(cgc
->buflen
, &cgc
->cmd
[7]);
1520 cgc
->data_direction
= CGC_DATA_READ
;
1521 return pkt_generic_packet(pd
, cgc
);
1524 static int pkt_mode_select(struct pktcdvd_device
*pd
, struct packet_command
*cgc
)
1526 memset(cgc
->cmd
, 0, sizeof(cgc
->cmd
));
1527 memset(cgc
->buffer
, 0, 2);
1528 cgc
->cmd
[0] = GPCMD_MODE_SELECT_10
;
1529 cgc
->cmd
[1] = 0x10; /* PF */
1530 put_unaligned_be16(cgc
->buflen
, &cgc
->cmd
[7]);
1531 cgc
->data_direction
= CGC_DATA_WRITE
;
1532 return pkt_generic_packet(pd
, cgc
);
1535 static int pkt_get_disc_info(struct pktcdvd_device
*pd
, disc_information
*di
)
1537 struct packet_command cgc
;
1540 /* set up command and get the disc info */
1541 init_cdrom_command(&cgc
, di
, sizeof(*di
), CGC_DATA_READ
);
1542 cgc
.cmd
[0] = GPCMD_READ_DISC_INFO
;
1543 cgc
.cmd
[8] = cgc
.buflen
= 2;
1546 ret
= pkt_generic_packet(pd
, &cgc
);
1550 /* not all drives have the same disc_info length, so requeue
1551 * packet with the length the drive tells us it can supply
1553 cgc
.buflen
= be16_to_cpu(di
->disc_information_length
) +
1554 sizeof(di
->disc_information_length
);
1556 if (cgc
.buflen
> sizeof(disc_information
))
1557 cgc
.buflen
= sizeof(disc_information
);
1559 cgc
.cmd
[8] = cgc
.buflen
;
1560 return pkt_generic_packet(pd
, &cgc
);
1563 static int pkt_get_track_info(struct pktcdvd_device
*pd
, __u16 track
, __u8 type
, track_information
*ti
)
1565 struct packet_command cgc
;
1568 init_cdrom_command(&cgc
, ti
, 8, CGC_DATA_READ
);
1569 cgc
.cmd
[0] = GPCMD_READ_TRACK_RZONE_INFO
;
1570 cgc
.cmd
[1] = type
& 3;
1571 put_unaligned_be16(track
, &cgc
.cmd
[4]);
1575 ret
= pkt_generic_packet(pd
, &cgc
);
1579 cgc
.buflen
= be16_to_cpu(ti
->track_information_length
) +
1580 sizeof(ti
->track_information_length
);
1582 if (cgc
.buflen
> sizeof(track_information
))
1583 cgc
.buflen
= sizeof(track_information
);
1585 cgc
.cmd
[8] = cgc
.buflen
;
1586 return pkt_generic_packet(pd
, &cgc
);
1589 static noinline_for_stack
int pkt_get_last_written(struct pktcdvd_device
*pd
,
1592 disc_information di
;
1593 track_information ti
;
1597 ret
= pkt_get_disc_info(pd
, &di
);
1601 last_track
= (di
.last_track_msb
<< 8) | di
.last_track_lsb
;
1602 ret
= pkt_get_track_info(pd
, last_track
, 1, &ti
);
1606 /* if this track is blank, try the previous. */
1609 ret
= pkt_get_track_info(pd
, last_track
, 1, &ti
);
1614 /* if last recorded field is valid, return it. */
1616 *last_written
= be32_to_cpu(ti
.last_rec_address
);
1618 /* make it up instead */
1619 *last_written
= be32_to_cpu(ti
.track_start
) +
1620 be32_to_cpu(ti
.track_size
);
1622 *last_written
-= (be32_to_cpu(ti
.free_blocks
) + 7);
1628 * write mode select package based on pd->settings
1630 static noinline_for_stack
int pkt_set_write_settings(struct pktcdvd_device
*pd
)
1632 struct device
*ddev
= disk_to_dev(pd
->disk
);
1633 struct packet_command cgc
;
1634 struct scsi_sense_hdr sshdr
;
1635 write_param_page
*wp
;
1639 /* doesn't apply to DVD+RW or DVD-RAM */
1640 if ((pd
->mmc3_profile
== 0x1a) || (pd
->mmc3_profile
== 0x12))
1643 memset(buffer
, 0, sizeof(buffer
));
1644 init_cdrom_command(&cgc
, buffer
, sizeof(*wp
), CGC_DATA_READ
);
1646 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WRITE_PARMS_PAGE
, 0);
1648 pkt_dump_sense(pd
, &cgc
);
1652 size
= 2 + get_unaligned_be16(&buffer
[0]);
1653 pd
->mode_offset
= get_unaligned_be16(&buffer
[6]);
1654 if (size
> sizeof(buffer
))
1655 size
= sizeof(buffer
);
1660 init_cdrom_command(&cgc
, buffer
, size
, CGC_DATA_READ
);
1662 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WRITE_PARMS_PAGE
, 0);
1664 pkt_dump_sense(pd
, &cgc
);
1669 * write page is offset header + block descriptor length
1671 wp
= (write_param_page
*) &buffer
[sizeof(struct mode_page_header
) + pd
->mode_offset
];
1673 wp
->fp
= pd
->settings
.fp
;
1674 wp
->track_mode
= pd
->settings
.track_mode
;
1675 wp
->write_type
= pd
->settings
.write_type
;
1676 wp
->data_block_type
= pd
->settings
.block_mode
;
1678 wp
->multi_session
= 0;
1680 #ifdef PACKET_USE_LS
1685 if (wp
->data_block_type
== PACKET_BLOCK_MODE1
) {
1686 wp
->session_format
= 0;
1688 } else if (wp
->data_block_type
== PACKET_BLOCK_MODE2
) {
1689 wp
->session_format
= 0x20;
1693 memcpy(&wp
->mcn
[1], PACKET_MCN
, sizeof(wp
->mcn
) - 1);
1699 dev_err(ddev
, "write mode wrong %d\n", wp
->data_block_type
);
1702 wp
->packet_size
= cpu_to_be32(pd
->settings
.size
>> 2);
1704 cgc
.buflen
= cgc
.cmd
[8] = size
;
1705 ret
= pkt_mode_select(pd
, &cgc
);
1707 pkt_dump_sense(pd
, &cgc
);
1711 pkt_print_settings(pd
);
1716 * 1 -- we can write to this track, 0 -- we can't
1718 static int pkt_writable_track(struct pktcdvd_device
*pd
, track_information
*ti
)
1720 struct device
*ddev
= disk_to_dev(pd
->disk
);
1722 switch (pd
->mmc3_profile
) {
1723 case 0x1a: /* DVD+RW */
1724 case 0x12: /* DVD-RAM */
1725 /* The track is always writable on DVD+RW/DVD-RAM */
1731 if (!ti
->packet
|| !ti
->fp
)
1735 * "good" settings as per Mt Fuji.
1737 if (ti
->rt
== 0 && ti
->blank
== 0)
1740 if (ti
->rt
== 0 && ti
->blank
== 1)
1743 if (ti
->rt
== 1 && ti
->blank
== 0)
1746 dev_err(ddev
, "bad state %d-%d-%d\n", ti
->rt
, ti
->blank
, ti
->packet
);
1751 * 1 -- we can write to this disc, 0 -- we can't
1753 static int pkt_writable_disc(struct pktcdvd_device
*pd
, disc_information
*di
)
1755 struct device
*ddev
= disk_to_dev(pd
->disk
);
1757 switch (pd
->mmc3_profile
) {
1758 case 0x0a: /* CD-RW */
1759 case 0xffff: /* MMC3 not supported */
1761 case 0x1a: /* DVD+RW */
1762 case 0x13: /* DVD-RW */
1763 case 0x12: /* DVD-RAM */
1766 dev_dbg(ddev
, "Wrong disc profile (%x)\n", pd
->mmc3_profile
);
1771 * for disc type 0xff we should probably reserve a new track.
1772 * but i'm not sure, should we leave this to user apps? probably.
1774 if (di
->disc_type
== 0xff) {
1775 dev_notice(ddev
, "unknown disc - no track?\n");
1779 if (di
->disc_type
!= 0x20 && di
->disc_type
!= 0) {
1780 dev_err(ddev
, "wrong disc type (%x)\n", di
->disc_type
);
1784 if (di
->erasable
== 0) {
1785 dev_err(ddev
, "disc not erasable\n");
1789 if (di
->border_status
== PACKET_SESSION_RESERVED
) {
1790 dev_err(ddev
, "can't write to last track (reserved)\n");
1797 static noinline_for_stack
int pkt_probe_settings(struct pktcdvd_device
*pd
)
1799 struct device
*ddev
= disk_to_dev(pd
->disk
);
1800 struct packet_command cgc
;
1801 unsigned char buf
[12];
1802 disc_information di
;
1803 track_information ti
;
1806 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_READ
);
1807 cgc
.cmd
[0] = GPCMD_GET_CONFIGURATION
;
1809 ret
= pkt_generic_packet(pd
, &cgc
);
1810 pd
->mmc3_profile
= ret
? 0xffff : get_unaligned_be16(&buf
[6]);
1812 memset(&di
, 0, sizeof(disc_information
));
1813 memset(&ti
, 0, sizeof(track_information
));
1815 ret
= pkt_get_disc_info(pd
, &di
);
1817 dev_err(ddev
, "failed get_disc\n");
1821 if (!pkt_writable_disc(pd
, &di
))
1824 pd
->type
= di
.erasable
? PACKET_CDRW
: PACKET_CDR
;
1826 track
= 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1827 ret
= pkt_get_track_info(pd
, track
, 1, &ti
);
1829 dev_err(ddev
, "failed get_track\n");
1833 if (!pkt_writable_track(pd
, &ti
)) {
1834 dev_err(ddev
, "can't write to this track\n");
1839 * we keep packet size in 512 byte units, makes it easier to
1840 * deal with request calculations.
1842 pd
->settings
.size
= be32_to_cpu(ti
.fixed_packet_size
) << 2;
1843 if (pd
->settings
.size
== 0) {
1844 dev_notice(ddev
, "detected zero packet size!\n");
1847 if (pd
->settings
.size
> PACKET_MAX_SECTORS
) {
1848 dev_err(ddev
, "packet size is too big\n");
1851 pd
->settings
.fp
= ti
.fp
;
1852 pd
->offset
= (be32_to_cpu(ti
.track_start
) << 2) & (pd
->settings
.size
- 1);
1855 pd
->nwa
= be32_to_cpu(ti
.next_writable
);
1856 set_bit(PACKET_NWA_VALID
, &pd
->flags
);
1860 * in theory we could use lra on -RW media as well and just zero
1861 * blocks that haven't been written yet, but in practice that
1862 * is just a no-go. we'll use that for -R, naturally.
1865 pd
->lra
= be32_to_cpu(ti
.last_rec_address
);
1866 set_bit(PACKET_LRA_VALID
, &pd
->flags
);
1868 pd
->lra
= 0xffffffff;
1869 set_bit(PACKET_LRA_VALID
, &pd
->flags
);
1875 pd
->settings
.link_loss
= 7;
1876 pd
->settings
.write_type
= 0; /* packet */
1877 pd
->settings
.track_mode
= ti
.track_mode
;
1880 * mode1 or mode2 disc
1882 switch (ti
.data_mode
) {
1884 pd
->settings
.block_mode
= PACKET_BLOCK_MODE1
;
1887 pd
->settings
.block_mode
= PACKET_BLOCK_MODE2
;
1890 dev_err(ddev
, "unknown data mode\n");
1897 * enable/disable write caching on drive
1899 static noinline_for_stack
int pkt_write_caching(struct pktcdvd_device
*pd
)
1901 struct device
*ddev
= disk_to_dev(pd
->disk
);
1902 struct packet_command cgc
;
1903 struct scsi_sense_hdr sshdr
;
1904 unsigned char buf
[64];
1905 bool set
= IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE
);
1908 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_READ
);
1910 cgc
.buflen
= pd
->mode_offset
+ 12;
1913 * caching mode page might not be there, so quiet this command
1917 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WCACHING_PAGE
, 0);
1922 * use drive write caching -- we need deferred error handling to be
1923 * able to successfully recover with this option (drive will return good
1924 * status as soon as the cdb is validated).
1926 buf
[pd
->mode_offset
+ 10] |= (set
<< 2);
1928 cgc
.buflen
= cgc
.cmd
[8] = 2 + get_unaligned_be16(&buf
[0]);
1929 ret
= pkt_mode_select(pd
, &cgc
);
1931 dev_err(ddev
, "write caching control failed\n");
1932 pkt_dump_sense(pd
, &cgc
);
1933 } else if (!ret
&& set
)
1934 dev_notice(ddev
, "enabled write caching\n");
1938 static int pkt_lock_door(struct pktcdvd_device
*pd
, int lockflag
)
1940 struct packet_command cgc
;
1942 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
1943 cgc
.cmd
[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL
;
1944 cgc
.cmd
[4] = lockflag
? 1 : 0;
1945 return pkt_generic_packet(pd
, &cgc
);
1949 * Returns drive maximum write speed
1951 static noinline_for_stack
int pkt_get_max_speed(struct pktcdvd_device
*pd
,
1952 unsigned *write_speed
)
1954 struct packet_command cgc
;
1955 struct scsi_sense_hdr sshdr
;
1956 unsigned char buf
[256+18];
1957 unsigned char *cap_buf
;
1960 cap_buf
= &buf
[sizeof(struct mode_page_header
) + pd
->mode_offset
];
1961 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_UNKNOWN
);
1964 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_CAPABILITIES_PAGE
, 0);
1966 cgc
.buflen
= pd
->mode_offset
+ cap_buf
[1] + 2 +
1967 sizeof(struct mode_page_header
);
1968 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_CAPABILITIES_PAGE
, 0);
1970 pkt_dump_sense(pd
, &cgc
);
1975 offset
= 20; /* Obsoleted field, used by older drives */
1976 if (cap_buf
[1] >= 28)
1977 offset
= 28; /* Current write speed selected */
1978 if (cap_buf
[1] >= 30) {
1979 /* If the drive reports at least one "Logical Unit Write
1980 * Speed Performance Descriptor Block", use the information
1981 * in the first block. (contains the highest speed)
1983 int num_spdb
= get_unaligned_be16(&cap_buf
[30]);
1988 *write_speed
= get_unaligned_be16(&cap_buf
[offset
]);
1992 /* These tables from cdrecord - I don't have orange book */
1993 /* standard speed CD-RW (1-4x) */
1994 static char clv_to_speed
[16] = {
1995 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1996 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1998 /* high speed CD-RW (-10x) */
1999 static char hs_clv_to_speed
[16] = {
2000 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2001 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2003 /* ultra high speed CD-RW */
2004 static char us_clv_to_speed
[16] = {
2005 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2006 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2010 * reads the maximum media speed from ATIP
2012 static noinline_for_stack
int pkt_media_speed(struct pktcdvd_device
*pd
,
2015 struct device
*ddev
= disk_to_dev(pd
->disk
);
2016 struct packet_command cgc
;
2017 struct scsi_sense_hdr sshdr
;
2018 unsigned char buf
[64];
2019 unsigned int size
, st
, sp
;
2022 init_cdrom_command(&cgc
, buf
, 2, CGC_DATA_READ
);
2024 cgc
.cmd
[0] = GPCMD_READ_TOC_PMA_ATIP
;
2026 cgc
.cmd
[2] = 4; /* READ ATIP */
2028 ret
= pkt_generic_packet(pd
, &cgc
);
2030 pkt_dump_sense(pd
, &cgc
);
2033 size
= 2 + get_unaligned_be16(&buf
[0]);
2034 if (size
> sizeof(buf
))
2037 init_cdrom_command(&cgc
, buf
, size
, CGC_DATA_READ
);
2039 cgc
.cmd
[0] = GPCMD_READ_TOC_PMA_ATIP
;
2043 ret
= pkt_generic_packet(pd
, &cgc
);
2045 pkt_dump_sense(pd
, &cgc
);
2049 if (!(buf
[6] & 0x40)) {
2050 dev_notice(ddev
, "disc type is not CD-RW\n");
2053 if (!(buf
[6] & 0x4)) {
2054 dev_notice(ddev
, "A1 values on media are not valid, maybe not CDRW?\n");
2058 st
= (buf
[6] >> 3) & 0x7; /* disc sub-type */
2060 sp
= buf
[16] & 0xf; /* max speed from ATIP A1 field */
2062 /* Info from cdrecord */
2064 case 0: /* standard speed */
2065 *speed
= clv_to_speed
[sp
];
2067 case 1: /* high speed */
2068 *speed
= hs_clv_to_speed
[sp
];
2070 case 2: /* ultra high speed */
2071 *speed
= us_clv_to_speed
[sp
];
2074 dev_notice(ddev
, "unknown disc sub-type %d\n", st
);
2078 dev_info(ddev
, "maximum media speed: %d\n", *speed
);
2081 dev_notice(ddev
, "unknown speed %d for sub-type %d\n", sp
, st
);
2086 static noinline_for_stack
int pkt_perform_opc(struct pktcdvd_device
*pd
)
2088 struct device
*ddev
= disk_to_dev(pd
->disk
);
2089 struct packet_command cgc
;
2090 struct scsi_sense_hdr sshdr
;
2093 dev_dbg(ddev
, "Performing OPC\n");
2095 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
2097 cgc
.timeout
= 60*HZ
;
2098 cgc
.cmd
[0] = GPCMD_SEND_OPC
;
2100 ret
= pkt_generic_packet(pd
, &cgc
);
2102 pkt_dump_sense(pd
, &cgc
);
2106 static int pkt_open_write(struct pktcdvd_device
*pd
)
2108 struct device
*ddev
= disk_to_dev(pd
->disk
);
2110 unsigned int write_speed
, media_write_speed
, read_speed
;
2112 ret
= pkt_probe_settings(pd
);
2114 dev_dbg(ddev
, "failed probe\n");
2118 ret
= pkt_set_write_settings(pd
);
2120 dev_notice(ddev
, "failed saving write settings\n");
2124 pkt_write_caching(pd
);
2126 ret
= pkt_get_max_speed(pd
, &write_speed
);
2128 write_speed
= 16 * 177;
2129 switch (pd
->mmc3_profile
) {
2130 case 0x13: /* DVD-RW */
2131 case 0x1a: /* DVD+RW */
2132 case 0x12: /* DVD-RAM */
2133 dev_notice(ddev
, "write speed %ukB/s\n", write_speed
);
2136 ret
= pkt_media_speed(pd
, &media_write_speed
);
2138 media_write_speed
= 16;
2139 write_speed
= min(write_speed
, media_write_speed
* 177);
2140 dev_notice(ddev
, "write speed %ux\n", write_speed
/ 176);
2143 read_speed
= write_speed
;
2145 ret
= pkt_set_speed(pd
, write_speed
, read_speed
);
2147 dev_notice(ddev
, "couldn't set write speed\n");
2150 pd
->write_speed
= write_speed
;
2151 pd
->read_speed
= read_speed
;
2153 ret
= pkt_perform_opc(pd
);
2155 dev_notice(ddev
, "Optimum Power Calibration failed\n");
2161 * called at open time.
2163 static int pkt_open_dev(struct pktcdvd_device
*pd
, bool write
)
2165 struct device
*ddev
= disk_to_dev(pd
->disk
);
2168 struct request_queue
*q
;
2169 struct file
*bdev_file
;
2172 * We need to re-open the cdrom device without O_NONBLOCK to be able
2173 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2174 * so open should not fail.
2176 bdev_file
= bdev_file_open_by_dev(file_bdev(pd
->bdev_file
)->bd_dev
,
2177 BLK_OPEN_READ
, pd
, NULL
);
2178 if (IS_ERR(bdev_file
)) {
2179 ret
= PTR_ERR(bdev_file
);
2182 pd
->f_open_bdev
= bdev_file
;
2184 ret
= pkt_get_last_written(pd
, &lba
);
2186 dev_err(ddev
, "pkt_get_last_written failed\n");
2190 set_capacity(pd
->disk
, lba
<< 2);
2191 set_capacity_and_notify(file_bdev(pd
->bdev_file
)->bd_disk
, lba
<< 2);
2193 q
= bdev_get_queue(file_bdev(pd
->bdev_file
));
2195 ret
= pkt_open_write(pd
);
2198 set_bit(PACKET_WRITABLE
, &pd
->flags
);
2200 pkt_set_speed(pd
, MAX_SPEED
, MAX_SPEED
);
2201 clear_bit(PACKET_WRITABLE
, &pd
->flags
);
2204 ret
= pkt_set_segment_merging(pd
, q
);
2209 if (!pkt_grow_pktlist(pd
, CONFIG_CDROM_PKTCDVD_BUFFERS
)) {
2210 dev_err(ddev
, "not enough memory for buffers\n");
2214 dev_info(ddev
, "%lukB available on disc\n", lba
<< 1);
2216 set_blocksize(bdev_file
, CD_FRAMESIZE
);
2227 * called when the device is closed. makes sure that the device flushes
2228 * the internal cache before we close.
2230 static void pkt_release_dev(struct pktcdvd_device
*pd
, int flush
)
2232 struct device
*ddev
= disk_to_dev(pd
->disk
);
2234 if (flush
&& pkt_flush_cache(pd
))
2235 dev_notice(ddev
, "not flushing cache\n");
2237 pkt_lock_door(pd
, 0);
2239 pkt_set_speed(pd
, MAX_SPEED
, MAX_SPEED
);
2240 fput(pd
->f_open_bdev
);
2241 pd
->f_open_bdev
= NULL
;
2243 pkt_shrink_pktlist(pd
);
2246 static struct pktcdvd_device
*pkt_find_dev_from_minor(unsigned int dev_minor
)
2248 if (dev_minor
>= MAX_WRITERS
)
2251 dev_minor
= array_index_nospec(dev_minor
, MAX_WRITERS
);
2252 return pkt_devs
[dev_minor
];
2255 static int pkt_open(struct gendisk
*disk
, blk_mode_t mode
)
2257 struct pktcdvd_device
*pd
= NULL
;
2260 mutex_lock(&pktcdvd_mutex
);
2261 mutex_lock(&ctl_mutex
);
2262 pd
= pkt_find_dev_from_minor(disk
->first_minor
);
2267 BUG_ON(pd
->refcnt
< 0);
2270 if (pd
->refcnt
> 1) {
2271 if ((mode
& BLK_OPEN_WRITE
) &&
2272 !test_bit(PACKET_WRITABLE
, &pd
->flags
)) {
2277 ret
= pkt_open_dev(pd
, mode
& BLK_OPEN_WRITE
);
2281 mutex_unlock(&ctl_mutex
);
2282 mutex_unlock(&pktcdvd_mutex
);
2288 mutex_unlock(&ctl_mutex
);
2289 mutex_unlock(&pktcdvd_mutex
);
2293 static void pkt_release(struct gendisk
*disk
)
2295 struct pktcdvd_device
*pd
= disk
->private_data
;
2297 mutex_lock(&pktcdvd_mutex
);
2298 mutex_lock(&ctl_mutex
);
2300 BUG_ON(pd
->refcnt
< 0);
2301 if (pd
->refcnt
== 0) {
2302 int flush
= test_bit(PACKET_WRITABLE
, &pd
->flags
);
2303 pkt_release_dev(pd
, flush
);
2305 mutex_unlock(&ctl_mutex
);
2306 mutex_unlock(&pktcdvd_mutex
);
2310 static void pkt_end_io_read_cloned(struct bio
*bio
)
2312 struct packet_stacked_data
*psd
= bio
->bi_private
;
2313 struct pktcdvd_device
*pd
= psd
->pd
;
2315 psd
->bio
->bi_status
= bio
->bi_status
;
2317 bio_endio(psd
->bio
);
2318 mempool_free(psd
, &psd_pool
);
2319 pkt_bio_finished(pd
);
2322 static void pkt_make_request_read(struct pktcdvd_device
*pd
, struct bio
*bio
)
2324 struct bio
*cloned_bio
= bio_alloc_clone(file_bdev(pd
->bdev_file
), bio
,
2325 GFP_NOIO
, &pkt_bio_set
);
2326 struct packet_stacked_data
*psd
= mempool_alloc(&psd_pool
, GFP_NOIO
);
2330 cloned_bio
->bi_private
= psd
;
2331 cloned_bio
->bi_end_io
= pkt_end_io_read_cloned
;
2332 pd
->stats
.secs_r
+= bio_sectors(bio
);
2333 pkt_queue_bio(pd
, cloned_bio
);
2336 static void pkt_make_request_write(struct bio
*bio
)
2338 struct pktcdvd_device
*pd
= bio
->bi_bdev
->bd_disk
->private_data
;
2340 struct packet_data
*pkt
;
2341 int was_empty
, blocked_bio
;
2342 struct pkt_rb_node
*node
;
2344 zone
= get_zone(bio
->bi_iter
.bi_sector
, pd
);
2347 * If we find a matching packet in state WAITING or READ_WAIT, we can
2348 * just append this bio to that packet.
2350 spin_lock(&pd
->cdrw
.active_list_lock
);
2352 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
2353 if (pkt
->sector
== zone
) {
2354 spin_lock(&pkt
->lock
);
2355 if ((pkt
->state
== PACKET_WAITING_STATE
) ||
2356 (pkt
->state
== PACKET_READ_WAIT_STATE
)) {
2357 bio_list_add(&pkt
->orig_bios
, bio
);
2359 bio
->bi_iter
.bi_size
/ CD_FRAMESIZE
;
2360 if ((pkt
->write_size
>= pkt
->frames
) &&
2361 (pkt
->state
== PACKET_WAITING_STATE
)) {
2362 atomic_inc(&pkt
->run_sm
);
2363 wake_up(&pd
->wqueue
);
2365 spin_unlock(&pkt
->lock
);
2366 spin_unlock(&pd
->cdrw
.active_list_lock
);
2371 spin_unlock(&pkt
->lock
);
2374 spin_unlock(&pd
->cdrw
.active_list_lock
);
2377 * Test if there is enough room left in the bio work queue
2378 * (queue size >= congestion on mark).
2379 * If not, wait till the work queue size is below the congestion off mark.
2381 spin_lock(&pd
->lock
);
2382 if (pd
->write_congestion_on
> 0
2383 && pd
->bio_queue_size
>= pd
->write_congestion_on
) {
2384 struct wait_bit_queue_entry wqe
;
2386 init_wait_var_entry(&wqe
, &pd
->congested
, 0);
2388 prepare_to_wait_event(__var_waitqueue(&pd
->congested
),
2390 TASK_UNINTERRUPTIBLE
);
2391 if (pd
->bio_queue_size
<= pd
->write_congestion_off
)
2393 pd
->congested
= true;
2394 spin_unlock(&pd
->lock
);
2396 spin_lock(&pd
->lock
);
2399 spin_unlock(&pd
->lock
);
2402 * No matching packet found. Store the bio in the work queue.
2404 node
= mempool_alloc(&pd
->rb_pool
, GFP_NOIO
);
2406 spin_lock(&pd
->lock
);
2407 BUG_ON(pd
->bio_queue_size
< 0);
2408 was_empty
= (pd
->bio_queue_size
== 0);
2409 pkt_rbtree_insert(pd
, node
);
2410 spin_unlock(&pd
->lock
);
2413 * Wake up the worker thread.
2415 atomic_set(&pd
->scan_queue
, 1);
2417 /* This wake_up is required for correct operation */
2418 wake_up(&pd
->wqueue
);
2419 } else if (!list_empty(&pd
->cdrw
.pkt_free_list
) && !blocked_bio
) {
2421 * This wake up is not required for correct operation,
2422 * but improves performance in some cases.
2424 wake_up(&pd
->wqueue
);
2428 static void pkt_submit_bio(struct bio
*bio
)
2430 struct pktcdvd_device
*pd
= bio
->bi_bdev
->bd_disk
->private_data
;
2431 struct device
*ddev
= disk_to_dev(pd
->disk
);
2434 bio
= bio_split_to_limits(bio
);
2438 dev_dbg(ddev
, "start = %6llx stop = %6llx\n",
2439 bio
->bi_iter
.bi_sector
, bio_end_sector(bio
));
2442 * Clone READ bios so we can have our own bi_end_io callback.
2444 if (bio_data_dir(bio
) == READ
) {
2445 pkt_make_request_read(pd
, bio
);
2449 if (!test_bit(PACKET_WRITABLE
, &pd
->flags
)) {
2450 dev_notice(ddev
, "WRITE for ro device (%llu)\n", bio
->bi_iter
.bi_sector
);
2454 if (!bio
->bi_iter
.bi_size
|| (bio
->bi_iter
.bi_size
% CD_FRAMESIZE
)) {
2455 dev_err(ddev
, "wrong bio size\n");
2460 sector_t zone
= get_zone(bio
->bi_iter
.bi_sector
, pd
);
2461 sector_t last_zone
= get_zone(bio_end_sector(bio
) - 1, pd
);
2463 if (last_zone
!= zone
) {
2464 BUG_ON(last_zone
!= zone
+ pd
->settings
.size
);
2466 split
= bio_split(bio
, last_zone
-
2467 bio
->bi_iter
.bi_sector
,
2468 GFP_NOIO
, &pkt_bio_set
);
2469 bio_chain(split
, bio
);
2474 pkt_make_request_write(split
);
2475 } while (split
!= bio
);
2482 static int pkt_new_dev(struct pktcdvd_device
*pd
, dev_t dev
)
2484 struct device
*ddev
= disk_to_dev(pd
->disk
);
2486 struct file
*bdev_file
;
2487 struct scsi_device
*sdev
;
2489 if (pd
->pkt_dev
== dev
) {
2490 dev_err(ddev
, "recursive setup not allowed\n");
2493 for (i
= 0; i
< MAX_WRITERS
; i
++) {
2494 struct pktcdvd_device
*pd2
= pkt_devs
[i
];
2497 if (file_bdev(pd2
->bdev_file
)->bd_dev
== dev
) {
2498 dev_err(ddev
, "%pg already setup\n",
2499 file_bdev(pd2
->bdev_file
));
2502 if (pd2
->pkt_dev
== dev
) {
2503 dev_err(ddev
, "can't chain pktcdvd devices\n");
2508 bdev_file
= bdev_file_open_by_dev(dev
, BLK_OPEN_READ
| BLK_OPEN_NDELAY
,
2510 if (IS_ERR(bdev_file
))
2511 return PTR_ERR(bdev_file
);
2512 sdev
= scsi_device_from_queue(file_bdev(bdev_file
)->bd_disk
->queue
);
2517 put_device(&sdev
->sdev_gendev
);
2519 /* This is safe, since we have a reference from open(). */
2520 __module_get(THIS_MODULE
);
2522 pd
->bdev_file
= bdev_file
;
2524 atomic_set(&pd
->cdrw
.pending_bios
, 0);
2525 pd
->cdrw
.thread
= kthread_run(kcdrwd
, pd
, "%s", pd
->disk
->disk_name
);
2526 if (IS_ERR(pd
->cdrw
.thread
)) {
2527 dev_err(ddev
, "can't start kernel thread\n");
2531 proc_create_single_data(pd
->disk
->disk_name
, 0, pkt_proc
, pkt_seq_show
, pd
);
2532 dev_notice(ddev
, "writer mapped to %pg\n", file_bdev(bdev_file
));
2537 /* This is safe: open() is still holding a reference. */
2538 module_put(THIS_MODULE
);
2542 static int pkt_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
2543 unsigned int cmd
, unsigned long arg
)
2545 struct pktcdvd_device
*pd
= bdev
->bd_disk
->private_data
;
2546 struct device
*ddev
= disk_to_dev(pd
->disk
);
2549 dev_dbg(ddev
, "cmd %x, dev %d:%d\n", cmd
, MAJOR(bdev
->bd_dev
), MINOR(bdev
->bd_dev
));
2551 mutex_lock(&pktcdvd_mutex
);
2555 * The door gets locked when the device is opened, so we
2556 * have to unlock it or else the eject command fails.
2558 if (pd
->refcnt
== 1)
2559 pkt_lock_door(pd
, 0);
2562 * forward selected CDROM ioctls to CD-ROM, for UDF
2564 case CDROMMULTISESSION
:
2565 case CDROMREADTOCENTRY
:
2566 case CDROM_LAST_WRITTEN
:
2567 case CDROM_SEND_PACKET
:
2568 case SCSI_IOCTL_SEND_COMMAND
:
2569 if (!bdev
->bd_disk
->fops
->ioctl
)
2572 ret
= bdev
->bd_disk
->fops
->ioctl(bdev
, mode
, cmd
, arg
);
2575 dev_dbg(ddev
, "Unknown ioctl (%x)\n", cmd
);
2578 mutex_unlock(&pktcdvd_mutex
);
2583 static unsigned int pkt_check_events(struct gendisk
*disk
,
2584 unsigned int clearing
)
2586 struct pktcdvd_device
*pd
= disk
->private_data
;
2587 struct gendisk
*attached_disk
;
2593 attached_disk
= file_bdev(pd
->bdev_file
)->bd_disk
;
2594 if (!attached_disk
|| !attached_disk
->fops
->check_events
)
2596 return attached_disk
->fops
->check_events(attached_disk
, clearing
);
2599 static char *pkt_devnode(struct gendisk
*disk
, umode_t
*mode
)
2601 return kasprintf(GFP_KERNEL
, "pktcdvd/%s", disk
->disk_name
);
2604 static const struct block_device_operations pktcdvd_ops
= {
2605 .owner
= THIS_MODULE
,
2606 .submit_bio
= pkt_submit_bio
,
2608 .release
= pkt_release
,
2610 .compat_ioctl
= blkdev_compat_ptr_ioctl
,
2611 .check_events
= pkt_check_events
,
2612 .devnode
= pkt_devnode
,
2616 * Set up mapping from pktcdvd device to CD-ROM device.
2618 static int pkt_setup_dev(dev_t dev
, dev_t
* pkt_dev
)
2620 struct queue_limits lim
= {
2621 .max_hw_sectors
= PACKET_MAX_SECTORS
,
2622 .logical_block_size
= CD_FRAMESIZE
,
2623 .features
= BLK_FEAT_ROTATIONAL
,
2627 struct pktcdvd_device
*pd
;
2628 struct gendisk
*disk
;
2630 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2632 for (idx
= 0; idx
< MAX_WRITERS
; idx
++)
2635 if (idx
== MAX_WRITERS
) {
2636 pr_err("max %d writers supported\n", MAX_WRITERS
);
2641 pd
= kzalloc(sizeof(struct pktcdvd_device
), GFP_KERNEL
);
2645 ret
= mempool_init_kmalloc_pool(&pd
->rb_pool
, PKT_RB_POOL_SIZE
,
2646 sizeof(struct pkt_rb_node
));
2650 INIT_LIST_HEAD(&pd
->cdrw
.pkt_free_list
);
2651 INIT_LIST_HEAD(&pd
->cdrw
.pkt_active_list
);
2652 spin_lock_init(&pd
->cdrw
.active_list_lock
);
2654 spin_lock_init(&pd
->lock
);
2655 spin_lock_init(&pd
->iosched
.lock
);
2656 bio_list_init(&pd
->iosched
.read_queue
);
2657 bio_list_init(&pd
->iosched
.write_queue
);
2658 init_waitqueue_head(&pd
->wqueue
);
2659 pd
->bio_queue
= RB_ROOT
;
2661 pd
->write_congestion_on
= write_congestion_on
;
2662 pd
->write_congestion_off
= write_congestion_off
;
2664 disk
= blk_alloc_disk(&lim
, NUMA_NO_NODE
);
2666 ret
= PTR_ERR(disk
);
2670 disk
->major
= pktdev_major
;
2671 disk
->first_minor
= idx
;
2673 disk
->fops
= &pktcdvd_ops
;
2674 disk
->flags
= GENHD_FL_REMOVABLE
| GENHD_FL_NO_PART
;
2675 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), DRIVER_NAME
"%d", idx
);
2676 disk
->private_data
= pd
;
2678 pd
->pkt_dev
= MKDEV(pktdev_major
, idx
);
2679 ret
= pkt_new_dev(pd
, dev
);
2683 /* inherit events of the host device */
2684 disk
->events
= file_bdev(pd
->bdev_file
)->bd_disk
->events
;
2686 ret
= add_disk(disk
);
2690 pkt_sysfs_dev_new(pd
);
2691 pkt_debugfs_dev_new(pd
);
2695 *pkt_dev
= pd
->pkt_dev
;
2697 mutex_unlock(&ctl_mutex
);
2703 mempool_exit(&pd
->rb_pool
);
2706 mutex_unlock(&ctl_mutex
);
2707 pr_err("setup of pktcdvd device failed\n");
2712 * Tear down mapping from pktcdvd device to CD-ROM device.
2714 static int pkt_remove_dev(dev_t pkt_dev
)
2716 struct pktcdvd_device
*pd
;
2717 struct device
*ddev
;
2721 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2723 for (idx
= 0; idx
< MAX_WRITERS
; idx
++) {
2725 if (pd
&& (pd
->pkt_dev
== pkt_dev
))
2728 if (idx
== MAX_WRITERS
) {
2729 pr_debug("dev not setup\n");
2734 if (pd
->refcnt
> 0) {
2739 ddev
= disk_to_dev(pd
->disk
);
2741 if (!IS_ERR(pd
->cdrw
.thread
))
2742 kthread_stop(pd
->cdrw
.thread
);
2744 pkt_devs
[idx
] = NULL
;
2746 pkt_debugfs_dev_remove(pd
);
2747 pkt_sysfs_dev_remove(pd
);
2749 fput(pd
->bdev_file
);
2751 remove_proc_entry(pd
->disk
->disk_name
, pkt_proc
);
2752 dev_notice(ddev
, "writer unmapped\n");
2754 del_gendisk(pd
->disk
);
2757 mempool_exit(&pd
->rb_pool
);
2760 /* This is safe: open() is still holding a reference. */
2761 module_put(THIS_MODULE
);
2764 mutex_unlock(&ctl_mutex
);
2768 static void pkt_get_status(struct pkt_ctrl_command
*ctrl_cmd
)
2770 struct pktcdvd_device
*pd
;
2772 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2774 pd
= pkt_find_dev_from_minor(ctrl_cmd
->dev_index
);
2776 ctrl_cmd
->dev
= new_encode_dev(file_bdev(pd
->bdev_file
)->bd_dev
);
2777 ctrl_cmd
->pkt_dev
= new_encode_dev(pd
->pkt_dev
);
2780 ctrl_cmd
->pkt_dev
= 0;
2782 ctrl_cmd
->num_devices
= MAX_WRITERS
;
2784 mutex_unlock(&ctl_mutex
);
2787 static long pkt_ctl_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2789 void __user
*argp
= (void __user
*)arg
;
2790 struct pkt_ctrl_command ctrl_cmd
;
2794 if (cmd
!= PACKET_CTRL_CMD
)
2797 if (copy_from_user(&ctrl_cmd
, argp
, sizeof(struct pkt_ctrl_command
)))
2800 switch (ctrl_cmd
.command
) {
2801 case PKT_CTRL_CMD_SETUP
:
2802 if (!capable(CAP_SYS_ADMIN
))
2804 ret
= pkt_setup_dev(new_decode_dev(ctrl_cmd
.dev
), &pkt_dev
);
2805 ctrl_cmd
.pkt_dev
= new_encode_dev(pkt_dev
);
2807 case PKT_CTRL_CMD_TEARDOWN
:
2808 if (!capable(CAP_SYS_ADMIN
))
2810 ret
= pkt_remove_dev(new_decode_dev(ctrl_cmd
.pkt_dev
));
2812 case PKT_CTRL_CMD_STATUS
:
2813 pkt_get_status(&ctrl_cmd
);
2819 if (copy_to_user(argp
, &ctrl_cmd
, sizeof(struct pkt_ctrl_command
)))
2824 #ifdef CONFIG_COMPAT
2825 static long pkt_ctl_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2827 return pkt_ctl_ioctl(file
, cmd
, (unsigned long)compat_ptr(arg
));
2831 static const struct file_operations pkt_ctl_fops
= {
2832 .open
= nonseekable_open
,
2833 .unlocked_ioctl
= pkt_ctl_ioctl
,
2834 #ifdef CONFIG_COMPAT
2835 .compat_ioctl
= pkt_ctl_compat_ioctl
,
2837 .owner
= THIS_MODULE
,
2840 static struct miscdevice pkt_misc
= {
2841 .minor
= MISC_DYNAMIC_MINOR
,
2842 .name
= DRIVER_NAME
,
2843 .nodename
= "pktcdvd/control",
2844 .fops
= &pkt_ctl_fops
2847 static int __init
pkt_init(void)
2851 mutex_init(&ctl_mutex
);
2853 ret
= mempool_init_kmalloc_pool(&psd_pool
, PSD_POOL_SIZE
,
2854 sizeof(struct packet_stacked_data
));
2857 ret
= bioset_init(&pkt_bio_set
, BIO_POOL_SIZE
, 0, 0);
2859 mempool_exit(&psd_pool
);
2863 ret
= register_blkdev(pktdev_major
, DRIVER_NAME
);
2865 pr_err("unable to register block device\n");
2871 ret
= pkt_sysfs_init();
2877 ret
= misc_register(&pkt_misc
);
2879 pr_err("unable to register misc device\n");
2883 pkt_proc
= proc_mkdir("driver/"DRIVER_NAME
, NULL
);
2888 pkt_debugfs_cleanup();
2889 pkt_sysfs_cleanup();
2891 unregister_blkdev(pktdev_major
, DRIVER_NAME
);
2893 mempool_exit(&psd_pool
);
2894 bioset_exit(&pkt_bio_set
);
2898 static void __exit
pkt_exit(void)
2900 remove_proc_entry("driver/"DRIVER_NAME
, NULL
);
2901 misc_deregister(&pkt_misc
);
2903 pkt_debugfs_cleanup();
2904 pkt_sysfs_cleanup();
2906 unregister_blkdev(pktdev_major
, DRIVER_NAME
);
2907 mempool_exit(&psd_pool
);
2908 bioset_exit(&pkt_bio_set
);
2911 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2912 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2913 MODULE_LICENSE("GPL");
2915 module_init(pkt_init
);
2916 module_exit(pkt_exit
);