2 * drivers/s390/char/tape_block.c
3 * block device frontend for tape device driver
5 * S390 and zSeries version
6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com>
13 #define KMSG_COMPONENT "tape"
16 #include <linux/module.h>
17 #include <linux/blkdev.h>
18 #include <linux/interrupt.h>
19 #include <linux/buffer_head.h>
20 #include <linux/kernel.h>
22 #include <asm/debug.h>
24 #define TAPE_DBF_AREA tape_core_dbf
28 #define TAPEBLOCK_MAX_SEC 100
29 #define TAPEBLOCK_MIN_REQUEUE 3
32 * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
34 * In 2.5/2.6 the block device request function is very likely to be called
35 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
36 * just call any function that tries to allocate CCW requests from that con-
37 * text since it might sleep. There are two choices to work around this:
38 * a) do not allocate with kmalloc but use its own memory pool
39 * b) take requests from the queue outside that context, knowing that
40 * allocation might sleep
44 * file operation structure for tape block frontend
46 static int tapeblock_open(struct block_device
*, fmode_t
);
47 static int tapeblock_release(struct gendisk
*, fmode_t
);
48 static int tapeblock_ioctl(struct block_device
*, fmode_t
, unsigned int,
50 static int tapeblock_medium_changed(struct gendisk
*);
51 static int tapeblock_revalidate_disk(struct gendisk
*);
53 static const struct block_device_operations tapeblock_fops
= {
55 .open
= tapeblock_open
,
56 .release
= tapeblock_release
,
57 .locked_ioctl
= tapeblock_ioctl
,
58 .media_changed
= tapeblock_medium_changed
,
59 .revalidate_disk
= tapeblock_revalidate_disk
,
62 static int tapeblock_major
= 0;
65 tapeblock_trigger_requeue(struct tape_device
*device
)
67 /* Protect against rescheduling. */
68 if (atomic_cmpxchg(&device
->blk_data
.requeue_scheduled
, 0, 1) != 0)
70 schedule_work(&device
->blk_data
.requeue_task
);
74 * Post finished request.
77 __tapeblock_end_request(struct tape_request
*ccw_req
, void *data
)
79 struct tape_device
*device
;
82 DBF_LH(6, "__tapeblock_end_request()\n");
84 device
= ccw_req
->device
;
85 req
= (struct request
*) data
;
86 blk_end_request_all(req
, (ccw_req
->rc
== 0) ? 0 : -EIO
);
88 /* Update position. */
89 device
->blk_data
.block_position
=
90 (blk_rq_pos(req
) + blk_rq_sectors(req
)) >> TAPEBLOCK_HSEC_S2B
;
92 /* We lost the position information due to an error. */
93 device
->blk_data
.block_position
= -1;
94 device
->discipline
->free_bread(ccw_req
);
95 if (!list_empty(&device
->req_queue
) ||
96 blk_peek_request(device
->blk_data
.request_queue
))
97 tapeblock_trigger_requeue(device
);
101 * Feed the tape device CCW queue with requests supplied in a list.
104 tapeblock_start_request(struct tape_device
*device
, struct request
*req
)
106 struct tape_request
* ccw_req
;
109 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device
, req
);
111 ccw_req
= device
->discipline
->bread(device
, req
);
112 if (IS_ERR(ccw_req
)) {
113 DBF_EVENT(1, "TBLOCK: bread failed\n");
114 blk_end_request_all(req
, -EIO
);
115 return PTR_ERR(ccw_req
);
117 ccw_req
->callback
= __tapeblock_end_request
;
118 ccw_req
->callback_data
= (void *) req
;
119 ccw_req
->retries
= TAPEBLOCK_RETRIES
;
121 rc
= tape_do_io_async(device
, ccw_req
);
124 * Start/enqueueing failed. No retries in
127 blk_end_request_all(req
, -EIO
);
128 device
->discipline
->free_bread(ccw_req
);
135 * Move requests from the block device request queue to the tape device ccw
139 tapeblock_requeue(struct work_struct
*work
) {
140 struct tape_blk_data
* blkdat
;
141 struct tape_device
* device
;
142 struct request_queue
* queue
;
144 struct request
* req
;
145 struct list_head
* l
;
148 blkdat
= container_of(work
, struct tape_blk_data
, requeue_task
);
149 device
= blkdat
->device
;
153 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
154 queue
= device
->blk_data
.request_queue
;
156 /* Count number of requests on ccw queue. */
158 list_for_each(l
, &device
->req_queue
)
160 spin_unlock(get_ccwdev_lock(device
->cdev
));
162 spin_lock_irq(&device
->blk_data
.request_queue_lock
);
164 !blk_queue_plugged(queue
) &&
165 blk_peek_request(queue
) &&
166 nr_queued
< TAPEBLOCK_MIN_REQUEUE
168 req
= blk_fetch_request(queue
);
169 if (rq_data_dir(req
) == WRITE
) {
170 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
171 spin_unlock_irq(&device
->blk_data
.request_queue_lock
);
172 blk_end_request_all(req
, -EIO
);
173 spin_lock_irq(&device
->blk_data
.request_queue_lock
);
177 spin_unlock_irq(&device
->blk_data
.request_queue_lock
);
178 rc
= tapeblock_start_request(device
, req
);
179 spin_lock_irq(&device
->blk_data
.request_queue_lock
);
181 spin_unlock_irq(&device
->blk_data
.request_queue_lock
);
182 atomic_set(&device
->blk_data
.requeue_scheduled
, 0);
186 * Tape request queue function. Called from ll_rw_blk.c
189 tapeblock_request_fn(struct request_queue
*queue
)
191 struct tape_device
*device
;
193 device
= (struct tape_device
*) queue
->queuedata
;
194 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device
);
195 BUG_ON(device
== NULL
);
196 tapeblock_trigger_requeue(device
);
200 * This function is called for every new tapedevice
203 tapeblock_setup_device(struct tape_device
* device
)
205 struct tape_blk_data
* blkdat
;
206 struct gendisk
* disk
;
209 blkdat
= &device
->blk_data
;
210 blkdat
->device
= device
;
211 spin_lock_init(&blkdat
->request_queue_lock
);
212 atomic_set(&blkdat
->requeue_scheduled
, 0);
214 blkdat
->request_queue
= blk_init_queue(
215 tapeblock_request_fn
,
216 &blkdat
->request_queue_lock
218 if (!blkdat
->request_queue
)
221 elevator_exit(blkdat
->request_queue
->elevator
);
222 rc
= elevator_init(blkdat
->request_queue
, "noop");
226 blk_queue_logical_block_size(blkdat
->request_queue
, TAPEBLOCK_HSEC_SIZE
);
227 blk_queue_max_sectors(blkdat
->request_queue
, TAPEBLOCK_MAX_SEC
);
228 blk_queue_max_phys_segments(blkdat
->request_queue
, -1L);
229 blk_queue_max_hw_segments(blkdat
->request_queue
, -1L);
230 blk_queue_max_segment_size(blkdat
->request_queue
, -1L);
231 blk_queue_segment_boundary(blkdat
->request_queue
, -1L);
233 disk
= alloc_disk(1);
239 disk
->major
= tapeblock_major
;
240 disk
->first_minor
= device
->first_minor
;
241 disk
->fops
= &tapeblock_fops
;
242 disk
->private_data
= tape_get_device_reference(device
);
243 disk
->queue
= blkdat
->request_queue
;
244 set_capacity(disk
, 0);
245 sprintf(disk
->disk_name
, "btibm%d",
246 device
->first_minor
/ TAPE_MINORS_PER_DEV
);
249 blkdat
->medium_changed
= 1;
250 blkdat
->request_queue
->queuedata
= tape_get_device_reference(device
);
254 tape_get_device_reference(device
);
255 INIT_WORK(&blkdat
->requeue_task
, tapeblock_requeue
);
260 blk_cleanup_queue(blkdat
->request_queue
);
261 blkdat
->request_queue
= NULL
;
267 tapeblock_cleanup_device(struct tape_device
*device
)
269 flush_scheduled_work();
270 tape_put_device(device
);
272 if (!device
->blk_data
.disk
) {
276 del_gendisk(device
->blk_data
.disk
);
277 device
->blk_data
.disk
->private_data
=
278 tape_put_device(device
->blk_data
.disk
->private_data
);
279 put_disk(device
->blk_data
.disk
);
281 device
->blk_data
.disk
= NULL
;
283 device
->blk_data
.request_queue
->queuedata
= tape_put_device(device
);
285 blk_cleanup_queue(device
->blk_data
.request_queue
);
286 device
->blk_data
.request_queue
= NULL
;
290 * Detect number of blocks of the tape.
291 * FIXME: can we extent this to detect the blocks size as well ?
294 tapeblock_revalidate_disk(struct gendisk
*disk
)
296 struct tape_device
* device
;
297 unsigned int nr_of_blks
;
300 device
= (struct tape_device
*) disk
->private_data
;
303 if (!device
->blk_data
.medium_changed
)
306 rc
= tape_mtop(device
, MTFSFM
, 1);
310 rc
= tape_mtop(device
, MTTELL
, 1);
314 pr_info("%s: Determining the size of the recorded area...\n",
315 dev_name(&device
->cdev
->dev
));
316 DBF_LH(3, "Image file ends at %d\n", rc
);
319 /* This will fail for the first file. Catch the error by checking the
321 tape_mtop(device
, MTBSF
, 1);
323 rc
= tape_mtop(device
, MTTELL
, 1);
330 DBF_LH(3, "Image file starts at %d\n", rc
);
334 pr_info("%s: The size of the recorded area is %i blocks\n",
335 dev_name(&device
->cdev
->dev
), nr_of_blks
);
336 set_capacity(device
->blk_data
.disk
,
337 nr_of_blks
*(TAPEBLOCK_HSEC_SIZE
/512));
339 device
->blk_data
.block_position
= 0;
340 device
->blk_data
.medium_changed
= 0;
345 tapeblock_medium_changed(struct gendisk
*disk
)
347 struct tape_device
*device
;
349 device
= (struct tape_device
*) disk
->private_data
;
350 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
351 device
, device
->blk_data
.medium_changed
);
353 return device
->blk_data
.medium_changed
;
357 * Block frontend tape device open function.
360 tapeblock_open(struct block_device
*bdev
, fmode_t mode
)
362 struct gendisk
* disk
= bdev
->bd_disk
;
363 struct tape_device
* device
;
366 device
= tape_get_device_reference(disk
->private_data
);
368 if (device
->required_tapemarks
) {
369 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
370 pr_warning("%s: Opening the tape failed because of missing "
371 "end-of-file marks\n", dev_name(&device
->cdev
->dev
));
376 rc
= tape_open(device
);
380 rc
= tapeblock_revalidate_disk(disk
);
385 * Note: The reference to <device> is hold until the release function
388 tape_state_set(device
, TS_BLKUSE
);
392 tape_release(device
);
394 tape_put_device(device
);
399 * Block frontend tape device release function.
401 * Note: One reference to the tape device was made by the open function. So
402 * we just get the pointer here and release the reference.
405 tapeblock_release(struct gendisk
*disk
, fmode_t mode
)
407 struct tape_device
*device
= disk
->private_data
;
409 tape_state_set(device
, TS_IN_USE
);
410 tape_release(device
);
411 tape_put_device(device
);
417 * Support of some generic block device IOCTLs.
421 struct block_device
* bdev
,
423 unsigned int command
,
428 struct gendisk
*disk
= bdev
->bd_disk
;
429 struct tape_device
*device
;
433 device
= disk
->private_data
;
435 minor
= MINOR(bdev
->bd_dev
);
437 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command
);
438 DBF_LH(6, "device = %d:%d\n", tapeblock_major
, minor
);
441 /* Refuse some IOCTL calls without complaining (mount). */
442 case 0x5310: /* CDROMMULTISESSION */
453 * Initialize block device frontend.
460 /* Register the tape major number to the kernel */
461 rc
= register_blkdev(tapeblock_major
, "tBLK");
465 if (tapeblock_major
== 0)
466 tapeblock_major
= rc
;
471 * Deregister major for block device frontend
476 unregister_blkdev(tapeblock_major
, "tBLK");