2 * drivers/s390/char/tape_block.c
3 * block device frontend for tape device driver
5 * S390 and zSeries version
6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com>
13 #define KMSG_COMPONENT "tape"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 #include <linux/module.h>
18 #include <linux/blkdev.h>
19 #include <linux/interrupt.h>
20 #include <linux/buffer_head.h>
21 #include <linux/kernel.h>
23 #include <asm/debug.h>
25 #define TAPE_DBF_AREA tape_core_dbf
29 #define TAPEBLOCK_MAX_SEC 100
30 #define TAPEBLOCK_MIN_REQUEUE 3
33 * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
35 * In 2.5/2.6 the block device request function is very likely to be called
36 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
37 * just call any function that tries to allocate CCW requests from that con-
38 * text since it might sleep. There are two choices to work around this:
39 * a) do not allocate with kmalloc but use its own memory pool
40 * b) take requests from the queue outside that context, knowing that
41 * allocation might sleep
45 * file operation structure for tape block frontend
47 static int tapeblock_open(struct block_device
*, fmode_t
);
48 static int tapeblock_release(struct gendisk
*, fmode_t
);
49 static int tapeblock_medium_changed(struct gendisk
*);
50 static int tapeblock_revalidate_disk(struct gendisk
*);
52 static const struct block_device_operations tapeblock_fops
= {
54 .open
= tapeblock_open
,
55 .release
= tapeblock_release
,
56 .media_changed
= tapeblock_medium_changed
,
57 .revalidate_disk
= tapeblock_revalidate_disk
,
60 static int tapeblock_major
= 0;
63 tapeblock_trigger_requeue(struct tape_device
*device
)
65 /* Protect against rescheduling. */
66 if (atomic_cmpxchg(&device
->blk_data
.requeue_scheduled
, 0, 1) != 0)
68 schedule_work(&device
->blk_data
.requeue_task
);
72 * Post finished request.
75 __tapeblock_end_request(struct tape_request
*ccw_req
, void *data
)
77 struct tape_device
*device
;
80 DBF_LH(6, "__tapeblock_end_request()\n");
82 device
= ccw_req
->device
;
83 req
= (struct request
*) data
;
84 blk_end_request_all(req
, (ccw_req
->rc
== 0) ? 0 : -EIO
);
86 /* Update position. */
87 device
->blk_data
.block_position
=
88 (blk_rq_pos(req
) + blk_rq_sectors(req
)) >> TAPEBLOCK_HSEC_S2B
;
90 /* We lost the position information due to an error. */
91 device
->blk_data
.block_position
= -1;
92 device
->discipline
->free_bread(ccw_req
);
93 if (!list_empty(&device
->req_queue
) ||
94 blk_peek_request(device
->blk_data
.request_queue
))
95 tapeblock_trigger_requeue(device
);
99 * Feed the tape device CCW queue with requests supplied in a list.
102 tapeblock_start_request(struct tape_device
*device
, struct request
*req
)
104 struct tape_request
* ccw_req
;
107 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device
, req
);
109 ccw_req
= device
->discipline
->bread(device
, req
);
110 if (IS_ERR(ccw_req
)) {
111 DBF_EVENT(1, "TBLOCK: bread failed\n");
112 blk_end_request_all(req
, -EIO
);
113 return PTR_ERR(ccw_req
);
115 ccw_req
->callback
= __tapeblock_end_request
;
116 ccw_req
->callback_data
= (void *) req
;
117 ccw_req
->retries
= TAPEBLOCK_RETRIES
;
119 rc
= tape_do_io_async(device
, ccw_req
);
122 * Start/enqueueing failed. No retries in
125 blk_end_request_all(req
, -EIO
);
126 device
->discipline
->free_bread(ccw_req
);
133 * Move requests from the block device request queue to the tape device ccw
137 tapeblock_requeue(struct work_struct
*work
) {
138 struct tape_blk_data
* blkdat
;
139 struct tape_device
* device
;
140 struct request_queue
* queue
;
142 struct request
* req
;
143 struct list_head
* l
;
146 blkdat
= container_of(work
, struct tape_blk_data
, requeue_task
);
147 device
= blkdat
->device
;
151 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
152 queue
= device
->blk_data
.request_queue
;
154 /* Count number of requests on ccw queue. */
156 list_for_each(l
, &device
->req_queue
)
158 spin_unlock(get_ccwdev_lock(device
->cdev
));
160 spin_lock_irq(&device
->blk_data
.request_queue_lock
);
162 !blk_queue_plugged(queue
) &&
163 blk_peek_request(queue
) &&
164 nr_queued
< TAPEBLOCK_MIN_REQUEUE
166 req
= blk_fetch_request(queue
);
167 if (rq_data_dir(req
) == WRITE
) {
168 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
169 spin_unlock_irq(&device
->blk_data
.request_queue_lock
);
170 blk_end_request_all(req
, -EIO
);
171 spin_lock_irq(&device
->blk_data
.request_queue_lock
);
175 spin_unlock_irq(&device
->blk_data
.request_queue_lock
);
176 rc
= tapeblock_start_request(device
, req
);
177 spin_lock_irq(&device
->blk_data
.request_queue_lock
);
179 spin_unlock_irq(&device
->blk_data
.request_queue_lock
);
180 atomic_set(&device
->blk_data
.requeue_scheduled
, 0);
184 * Tape request queue function. Called from ll_rw_blk.c
187 tapeblock_request_fn(struct request_queue
*queue
)
189 struct tape_device
*device
;
191 device
= (struct tape_device
*) queue
->queuedata
;
192 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device
);
193 BUG_ON(device
== NULL
);
194 tapeblock_trigger_requeue(device
);
198 * This function is called for every new tapedevice
201 tapeblock_setup_device(struct tape_device
* device
)
203 struct tape_blk_data
* blkdat
;
204 struct gendisk
* disk
;
207 blkdat
= &device
->blk_data
;
208 blkdat
->device
= device
;
209 spin_lock_init(&blkdat
->request_queue_lock
);
210 atomic_set(&blkdat
->requeue_scheduled
, 0);
212 blkdat
->request_queue
= blk_init_queue(
213 tapeblock_request_fn
,
214 &blkdat
->request_queue_lock
216 if (!blkdat
->request_queue
)
219 elevator_exit(blkdat
->request_queue
->elevator
);
220 rc
= elevator_init(blkdat
->request_queue
, "noop");
224 blk_queue_logical_block_size(blkdat
->request_queue
, TAPEBLOCK_HSEC_SIZE
);
225 blk_queue_max_hw_sectors(blkdat
->request_queue
, TAPEBLOCK_MAX_SEC
);
226 blk_queue_max_segments(blkdat
->request_queue
, -1L);
227 blk_queue_max_segment_size(blkdat
->request_queue
, -1L);
228 blk_queue_segment_boundary(blkdat
->request_queue
, -1L);
230 disk
= alloc_disk(1);
236 disk
->major
= tapeblock_major
;
237 disk
->first_minor
= device
->first_minor
;
238 disk
->fops
= &tapeblock_fops
;
239 disk
->private_data
= tape_get_device(device
);
240 disk
->queue
= blkdat
->request_queue
;
241 set_capacity(disk
, 0);
242 sprintf(disk
->disk_name
, "btibm%d",
243 device
->first_minor
/ TAPE_MINORS_PER_DEV
);
246 blkdat
->medium_changed
= 1;
247 blkdat
->request_queue
->queuedata
= tape_get_device(device
);
251 tape_get_device(device
);
252 INIT_WORK(&blkdat
->requeue_task
, tapeblock_requeue
);
257 blk_cleanup_queue(blkdat
->request_queue
);
258 blkdat
->request_queue
= NULL
;
264 tapeblock_cleanup_device(struct tape_device
*device
)
266 flush_scheduled_work();
267 tape_put_device(device
);
269 if (!device
->blk_data
.disk
) {
273 del_gendisk(device
->blk_data
.disk
);
274 device
->blk_data
.disk
->private_data
= NULL
;
275 tape_put_device(device
);
276 put_disk(device
->blk_data
.disk
);
278 device
->blk_data
.disk
= NULL
;
280 device
->blk_data
.request_queue
->queuedata
= NULL
;
281 tape_put_device(device
);
283 blk_cleanup_queue(device
->blk_data
.request_queue
);
284 device
->blk_data
.request_queue
= NULL
;
288 * Detect number of blocks of the tape.
289 * FIXME: can we extent this to detect the blocks size as well ?
292 tapeblock_revalidate_disk(struct gendisk
*disk
)
294 struct tape_device
* device
;
295 unsigned int nr_of_blks
;
298 device
= (struct tape_device
*) disk
->private_data
;
301 if (!device
->blk_data
.medium_changed
)
304 rc
= tape_mtop(device
, MTFSFM
, 1);
308 rc
= tape_mtop(device
, MTTELL
, 1);
312 pr_info("%s: Determining the size of the recorded area...\n",
313 dev_name(&device
->cdev
->dev
));
314 DBF_LH(3, "Image file ends at %d\n", rc
);
317 /* This will fail for the first file. Catch the error by checking the
319 tape_mtop(device
, MTBSF
, 1);
321 rc
= tape_mtop(device
, MTTELL
, 1);
328 DBF_LH(3, "Image file starts at %d\n", rc
);
332 pr_info("%s: The size of the recorded area is %i blocks\n",
333 dev_name(&device
->cdev
->dev
), nr_of_blks
);
334 set_capacity(device
->blk_data
.disk
,
335 nr_of_blks
*(TAPEBLOCK_HSEC_SIZE
/512));
337 device
->blk_data
.block_position
= 0;
338 device
->blk_data
.medium_changed
= 0;
343 tapeblock_medium_changed(struct gendisk
*disk
)
345 struct tape_device
*device
;
347 device
= (struct tape_device
*) disk
->private_data
;
348 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
349 device
, device
->blk_data
.medium_changed
);
351 return device
->blk_data
.medium_changed
;
355 * Block frontend tape device open function.
358 tapeblock_open(struct block_device
*bdev
, fmode_t mode
)
360 struct gendisk
* disk
= bdev
->bd_disk
;
361 struct tape_device
* device
;
364 device
= tape_get_device(disk
->private_data
);
366 if (device
->required_tapemarks
) {
367 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
368 pr_warning("%s: Opening the tape failed because of missing "
369 "end-of-file marks\n", dev_name(&device
->cdev
->dev
));
374 rc
= tape_open(device
);
378 rc
= tapeblock_revalidate_disk(disk
);
383 * Note: The reference to <device> is hold until the release function
386 tape_state_set(device
, TS_BLKUSE
);
390 tape_release(device
);
392 tape_put_device(device
);
397 * Block frontend tape device release function.
399 * Note: One reference to the tape device was made by the open function. So
400 * we just get the pointer here and release the reference.
403 tapeblock_release(struct gendisk
*disk
, fmode_t mode
)
405 struct tape_device
*device
= disk
->private_data
;
407 tape_state_set(device
, TS_IN_USE
);
408 tape_release(device
);
409 tape_put_device(device
);
415 * Initialize block device frontend.
422 /* Register the tape major number to the kernel */
423 rc
= register_blkdev(tapeblock_major
, "tBLK");
427 if (tapeblock_major
== 0)
428 tapeblock_major
= rc
;
433 * Deregister major for block device frontend
438 unregister_blkdev(tapeblock_major
, "tBLK");