2 * drivers/s390/char/tape_block.c
3 * block device frontend for tape device driver
5 * S390 and zSeries version
6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com>
14 #include <linux/module.h>
15 #include <linux/blkdev.h>
16 #include <linux/interrupt.h>
17 #include <linux/buffer_head.h>
18 #include <linux/kernel.h>
20 #include <asm/debug.h>
22 #define TAPE_DBF_AREA tape_core_dbf
26 #define PRINTK_HEADER "TAPE_BLOCK: "
28 #define TAPEBLOCK_MAX_SEC 100
29 #define TAPEBLOCK_MIN_REQUEUE 3
32 * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
34 * In 2.5/2.6 the block device request function is very likely to be called
35 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
36 * just call any function that tries to allocate CCW requests from that con-
37 * text since it might sleep. There are two choices to work around this:
38 * a) do not allocate with kmalloc but use its own memory pool
39 * b) take requests from the queue outside that context, knowing that
40 * allocation might sleep
44 * file operation structure for tape block frontend
46 static int tapeblock_open(struct inode
*, struct file
*);
47 static int tapeblock_release(struct inode
*, struct file
*);
48 static int tapeblock_ioctl(struct inode
*, struct file
*, unsigned int,
50 static int tapeblock_medium_changed(struct gendisk
*);
51 static int tapeblock_revalidate_disk(struct gendisk
*);
53 static struct block_device_operations tapeblock_fops
= {
55 .open
= tapeblock_open
,
56 .release
= tapeblock_release
,
57 .ioctl
= tapeblock_ioctl
,
58 .media_changed
= tapeblock_medium_changed
,
59 .revalidate_disk
= tapeblock_revalidate_disk
,
62 static int tapeblock_major
= 0;
65 tapeblock_trigger_requeue(struct tape_device
*device
)
67 /* Protect against rescheduling. */
68 if (atomic_cmpxchg(&device
->blk_data
.requeue_scheduled
, 0, 1) != 0)
70 schedule_work(&device
->blk_data
.requeue_task
);
74 * Post finished request.
77 tapeblock_end_request(struct request
*req
, int error
)
79 if (__blk_end_request(req
, error
, blk_rq_bytes(req
)))
84 __tapeblock_end_request(struct tape_request
*ccw_req
, void *data
)
86 struct tape_device
*device
;
89 DBF_LH(6, "__tapeblock_end_request()\n");
91 device
= ccw_req
->device
;
92 req
= (struct request
*) data
;
93 tapeblock_end_request(req
, (ccw_req
->rc
== 0) ? 0 : -EIO
);
95 /* Update position. */
96 device
->blk_data
.block_position
=
97 (req
->sector
+ req
->nr_sectors
) >> TAPEBLOCK_HSEC_S2B
;
99 /* We lost the position information due to an error. */
100 device
->blk_data
.block_position
= -1;
101 device
->discipline
->free_bread(ccw_req
);
102 if (!list_empty(&device
->req_queue
) ||
103 elv_next_request(device
->blk_data
.request_queue
))
104 tapeblock_trigger_requeue(device
);
108 * Feed the tape device CCW queue with requests supplied in a list.
111 tapeblock_start_request(struct tape_device
*device
, struct request
*req
)
113 struct tape_request
* ccw_req
;
116 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device
, req
);
118 ccw_req
= device
->discipline
->bread(device
, req
);
119 if (IS_ERR(ccw_req
)) {
120 DBF_EVENT(1, "TBLOCK: bread failed\n");
121 tapeblock_end_request(req
, -EIO
);
122 return PTR_ERR(ccw_req
);
124 ccw_req
->callback
= __tapeblock_end_request
;
125 ccw_req
->callback_data
= (void *) req
;
126 ccw_req
->retries
= TAPEBLOCK_RETRIES
;
128 rc
= tape_do_io_async(device
, ccw_req
);
131 * Start/enqueueing failed. No retries in
134 tapeblock_end_request(req
, -EIO
);
135 device
->discipline
->free_bread(ccw_req
);
142 * Move requests from the block device request queue to the tape device ccw
146 tapeblock_requeue(struct work_struct
*work
) {
147 struct tape_blk_data
* blkdat
;
148 struct tape_device
* device
;
149 struct request_queue
* queue
;
151 struct request
* req
;
152 struct list_head
* l
;
155 blkdat
= container_of(work
, struct tape_blk_data
, requeue_task
);
156 device
= blkdat
->device
;
160 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
161 queue
= device
->blk_data
.request_queue
;
163 /* Count number of requests on ccw queue. */
165 list_for_each(l
, &device
->req_queue
)
167 spin_unlock(get_ccwdev_lock(device
->cdev
));
169 spin_lock(&device
->blk_data
.request_queue_lock
);
171 !blk_queue_plugged(queue
) &&
172 elv_next_request(queue
) &&
173 nr_queued
< TAPEBLOCK_MIN_REQUEUE
175 req
= elv_next_request(queue
);
176 if (rq_data_dir(req
) == WRITE
) {
177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
178 blkdev_dequeue_request(req
);
179 tapeblock_end_request(req
, -EIO
);
182 spin_unlock_irq(&device
->blk_data
.request_queue_lock
);
183 rc
= tapeblock_start_request(device
, req
);
184 spin_lock_irq(&device
->blk_data
.request_queue_lock
);
185 blkdev_dequeue_request(req
);
188 spin_unlock_irq(&device
->blk_data
.request_queue_lock
);
189 atomic_set(&device
->blk_data
.requeue_scheduled
, 0);
193 * Tape request queue function. Called from ll_rw_blk.c
196 tapeblock_request_fn(struct request_queue
*queue
)
198 struct tape_device
*device
;
200 device
= (struct tape_device
*) queue
->queuedata
;
201 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device
);
202 BUG_ON(device
== NULL
);
203 tapeblock_trigger_requeue(device
);
207 * This function is called for every new tapedevice
210 tapeblock_setup_device(struct tape_device
* device
)
212 struct tape_blk_data
* blkdat
;
213 struct gendisk
* disk
;
216 blkdat
= &device
->blk_data
;
217 blkdat
->device
= device
;
218 spin_lock_init(&blkdat
->request_queue_lock
);
219 atomic_set(&blkdat
->requeue_scheduled
, 0);
221 blkdat
->request_queue
= blk_init_queue(
222 tapeblock_request_fn
,
223 &blkdat
->request_queue_lock
225 if (!blkdat
->request_queue
)
228 elevator_exit(blkdat
->request_queue
->elevator
);
229 rc
= elevator_init(blkdat
->request_queue
, "noop");
233 blk_queue_hardsect_size(blkdat
->request_queue
, TAPEBLOCK_HSEC_SIZE
);
234 blk_queue_max_sectors(blkdat
->request_queue
, TAPEBLOCK_MAX_SEC
);
235 blk_queue_max_phys_segments(blkdat
->request_queue
, -1L);
236 blk_queue_max_hw_segments(blkdat
->request_queue
, -1L);
237 blk_queue_max_segment_size(blkdat
->request_queue
, -1L);
238 blk_queue_segment_boundary(blkdat
->request_queue
, -1L);
240 disk
= alloc_disk(1);
246 disk
->major
= tapeblock_major
;
247 disk
->first_minor
= device
->first_minor
;
248 disk
->fops
= &tapeblock_fops
;
249 disk
->private_data
= tape_get_device_reference(device
);
250 disk
->queue
= blkdat
->request_queue
;
251 set_capacity(disk
, 0);
252 sprintf(disk
->disk_name
, "btibm%d",
253 device
->first_minor
/ TAPE_MINORS_PER_DEV
);
256 blkdat
->medium_changed
= 1;
257 blkdat
->request_queue
->queuedata
= tape_get_device_reference(device
);
261 tape_get_device_reference(device
);
262 INIT_WORK(&blkdat
->requeue_task
, tapeblock_requeue
);
267 blk_cleanup_queue(blkdat
->request_queue
);
268 blkdat
->request_queue
= NULL
;
274 tapeblock_cleanup_device(struct tape_device
*device
)
276 flush_scheduled_work();
277 tape_put_device(device
);
279 if (!device
->blk_data
.disk
) {
280 PRINT_ERR("(%s): No gendisk to clean up!\n",
281 device
->cdev
->dev
.bus_id
);
285 del_gendisk(device
->blk_data
.disk
);
286 device
->blk_data
.disk
->private_data
=
287 tape_put_device(device
->blk_data
.disk
->private_data
);
288 put_disk(device
->blk_data
.disk
);
290 device
->blk_data
.disk
= NULL
;
292 device
->blk_data
.request_queue
->queuedata
= tape_put_device(device
);
294 blk_cleanup_queue(device
->blk_data
.request_queue
);
295 device
->blk_data
.request_queue
= NULL
;
299 * Detect number of blocks of the tape.
300 * FIXME: can we extent this to detect the blocks size as well ?
303 tapeblock_revalidate_disk(struct gendisk
*disk
)
305 struct tape_device
* device
;
306 unsigned int nr_of_blks
;
309 device
= (struct tape_device
*) disk
->private_data
;
312 if (!device
->blk_data
.medium_changed
)
315 PRINT_INFO("Detecting media size...\n");
316 rc
= tape_mtop(device
, MTFSFM
, 1);
320 rc
= tape_mtop(device
, MTTELL
, 1);
324 DBF_LH(3, "Image file ends at %d\n", rc
);
327 /* This will fail for the first file. Catch the error by checking the
329 tape_mtop(device
, MTBSF
, 1);
331 rc
= tape_mtop(device
, MTTELL
, 1);
338 DBF_LH(3, "Image file starts at %d\n", rc
);
342 PRINT_INFO("Found %i blocks on media\n", nr_of_blks
);
343 set_capacity(device
->blk_data
.disk
,
344 nr_of_blks
*(TAPEBLOCK_HSEC_SIZE
/512));
346 device
->blk_data
.block_position
= 0;
347 device
->blk_data
.medium_changed
= 0;
352 tapeblock_medium_changed(struct gendisk
*disk
)
354 struct tape_device
*device
;
356 device
= (struct tape_device
*) disk
->private_data
;
357 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
358 device
, device
->blk_data
.medium_changed
);
360 return device
->blk_data
.medium_changed
;
364 * Block frontend tape device open function.
367 tapeblock_open(struct inode
*inode
, struct file
*filp
)
369 struct gendisk
* disk
;
370 struct tape_device
* device
;
373 disk
= inode
->i_bdev
->bd_disk
;
374 device
= tape_get_device_reference(disk
->private_data
);
376 if (device
->required_tapemarks
) {
377 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
378 PRINT_ERR("TBLOCK: Refusing to open tape with missing"
379 " end of file marks.\n");
384 rc
= tape_open(device
);
388 rc
= tapeblock_revalidate_disk(disk
);
393 * Note: The reference to <device> is hold until the release function
396 tape_state_set(device
, TS_BLKUSE
);
400 tape_release(device
);
402 tape_put_device(device
);
407 * Block frontend tape device release function.
409 * Note: One reference to the tape device was made by the open function. So
410 * we just get the pointer here and release the reference.
413 tapeblock_release(struct inode
*inode
, struct file
*filp
)
415 struct gendisk
*disk
= inode
->i_bdev
->bd_disk
;
416 struct tape_device
*device
= disk
->private_data
;
418 tape_state_set(device
, TS_IN_USE
);
419 tape_release(device
);
420 tape_put_device(device
);
426 * Support of some generic block device IOCTLs.
430 struct inode
* inode
,
432 unsigned int command
,
437 struct gendisk
*disk
;
438 struct tape_device
*device
;
441 disk
= inode
->i_bdev
->bd_disk
;
443 device
= disk
->private_data
;
445 minor
= iminor(inode
);
447 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command
);
448 DBF_LH(6, "device = %d:%d\n", tapeblock_major
, minor
);
451 /* Refuse some IOCTL calls without complaining (mount). */
452 case 0x5310: /* CDROMMULTISESSION */
456 PRINT_WARN("invalid ioctl 0x%x\n", command
);
464 * Initialize block device frontend.
471 /* Register the tape major number to the kernel */
472 rc
= register_blkdev(tapeblock_major
, "tBLK");
476 if (tapeblock_major
== 0)
477 tapeblock_major
= rc
;
478 PRINT_INFO("tape gets major %d for block device\n", tapeblock_major
);
483 * Deregister major for block device frontend
488 unregister_blkdev(tapeblock_major
, "tBLK");