2 * linux/kernel/blk_dev/ll_rw.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * This handles all read/write requests to block devices
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/config.h>
16 #include <linux/locks.h>
18 #include <asm/system.h>
23 extern u_long
sbpcd_init(u_long
, u_long
);
27 * The request-struct contains all necessary data
28 * to load a nr of sectors into memory
30 static struct request all_requests
[NR_REQUEST
];
33 * used to wait on when there are no free requests
35 struct wait_queue
* wait_for_request
= NULL
;
37 /* This specifies how many sectors to read ahead on the disk. */
39 int read_ahead
[MAX_BLKDEV
] = {0, };
45 struct blk_dev_struct blk_dev
[MAX_BLKDEV
] = {
46 { NULL
, NULL
}, /* no_dev */
47 { NULL
, NULL
}, /* dev mem */
48 { NULL
, NULL
}, /* dev fd */
49 { NULL
, NULL
}, /* dev hd */
50 { NULL
, NULL
}, /* dev ttyx */
51 { NULL
, NULL
}, /* dev tty */
52 { NULL
, NULL
}, /* dev lp */
53 { NULL
, NULL
}, /* dev pipes */
54 { NULL
, NULL
}, /* dev sd */
55 { NULL
, NULL
} /* dev st */
59 * blk_size contains the size of all block-devices in units of 1024 byte
62 * blk_size[MAJOR][MINOR]
64 * if (!blk_size[MAJOR]) then no minor size checking is done.
66 int * blk_size
[MAX_BLKDEV
] = { NULL
, NULL
, };
69 * blksize_size contains the size of all block-devices:
71 * blksize_size[MAJOR][MINOR]
73 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
75 int * blksize_size
[MAX_BLKDEV
] = { NULL
, NULL
, };
78 * look for a free request in the first N entries.
79 * NOTE: interrupts must be disabled on the way in, and will still
80 * be disabled on the way out.
82 static inline struct request
* get_request(int n
, int dev
)
84 static struct request
*prev_found
= NULL
, *prev_limit
= NULL
;
85 register struct request
*req
, *limit
;
88 panic("get_request(%d): impossible!\n", n
);
90 limit
= all_requests
+ n
;
91 if (limit
!= prev_limit
) {
93 prev_found
= all_requests
;
97 req
= ((req
> all_requests
) ? req
: limit
) - 1;
100 if (req
== prev_found
)
109 * wait until a free request in the first N entries is available.
110 * NOTE: interrupts must be disabled on the way in, and will still
111 * be disabled on the way out.
113 static inline struct request
* get_request_wait(int n
, int dev
)
115 register struct request
*req
;
117 while ((req
= get_request(n
, dev
)) == NULL
)
118 sleep_on(&wait_for_request
);
122 /* RO fail safe mechanism */
124 static long ro_bits
[MAX_BLKDEV
][8];
126 int is_read_only(int dev
)
132 if (major
< 0 || major
>= MAX_BLKDEV
) return 0;
133 return ro_bits
[major
][minor
>> 5] & (1 << (minor
& 31));
136 void set_device_ro(int dev
,int flag
)
142 if (major
< 0 || major
>= MAX_BLKDEV
) return;
143 if (flag
) ro_bits
[major
][minor
>> 5] |= 1 << (minor
& 31);
144 else ro_bits
[major
][minor
>> 5] &= ~(1 << (minor
& 31));
148 * add-request adds a request to the linked list.
149 * It disables interrupts so that it can muck with the
150 * request-lists in peace.
152 static void add_request(struct blk_dev_struct
* dev
, struct request
* req
)
154 struct request
* tmp
;
160 if (!(tmp
= dev
->current_request
)) {
161 dev
->current_request
= req
;
166 for ( ; tmp
->next
; tmp
= tmp
->next
) {
167 if ((IN_ORDER(tmp
,req
) ||
168 !IN_ORDER(tmp
,tmp
->next
)) &&
169 IN_ORDER(req
,tmp
->next
))
172 req
->next
= tmp
->next
;
175 /* for SCSI devices, call request_fn unconditionally */
176 if (scsi_major(MAJOR(req
->dev
)))
182 static void make_request(int major
,int rw
, struct buffer_head
* bh
)
184 unsigned int sector
, count
;
185 struct request
* req
;
186 int rw_ahead
, max_req
;
188 /* WRITEA/READA is special case - it is not really needed, so if the */
189 /* buffer is locked, we just forget about it, else it's a normal read */
190 rw_ahead
= (rw
== READA
|| rw
== WRITEA
);
199 if (rw
!=READ
&& rw
!=WRITE
) {
200 printk("Bad block dev command, must be R/W/RA/WA\n");
203 count
= bh
->b_size
>> 9;
204 sector
= bh
->b_blocknr
* count
;
206 if (blk_size
[major
][MINOR(bh
->b_dev
)] < (sector
+ count
)>>1) {
207 bh
->b_dirt
= bh
->b_uptodate
= 0;
211 if ((rw
== WRITE
&& !bh
->b_dirt
) || (rw
== READ
&& bh
->b_uptodate
)) {
216 /* we don't allow the write-requests to fill up the queue completely:
217 * we want some room for reads: they take precedence. The last third
218 * of the requests are only for reads.
220 max_req
= (rw
== READ
) ? NR_REQUEST
: ((NR_REQUEST
*2)/3);
222 /* big loop: look for a free request. */
227 /* The scsi disk drivers completely remove the request from the queue when
228 * they start processing an entry. For this reason it is safe to continue
229 * to add links to the top entry for scsi devices.
231 if ((major
== HD_MAJOR
232 || major
== SCSI_DISK_MAJOR
233 || major
== SCSI_CDROM_MAJOR
)
234 && (req
= blk_dev
[major
].current_request
))
236 if (major
== HD_MAJOR
)
239 if (req
->dev
== bh
->b_dev
&&
242 req
->sector
+ req
->nr_sectors
== sector
&&
243 req
->nr_sectors
< 254)
245 req
->bhtail
->b_reqnext
= bh
;
247 req
->nr_sectors
+= count
;
253 if (req
->dev
== bh
->b_dev
&&
256 req
->sector
- count
== sector
&&
257 req
->nr_sectors
< 254)
259 req
->nr_sectors
+= count
;
260 bh
->b_reqnext
= req
->bh
;
261 req
->buffer
= bh
->b_data
;
262 req
->current_nr_sectors
= count
;
263 req
->sector
= sector
;
274 /* find an unused request. */
275 req
= get_request(max_req
, bh
->b_dev
);
277 /* if no request available: if rw_ahead, forget it; otherwise try again. */
284 sleep_on(&wait_for_request
);
289 /* we found a request. */
292 /* fill up the request-info, and add it to the queue */
295 req
->sector
= sector
;
296 req
->nr_sectors
= count
;
297 req
->current_nr_sectors
= count
;
298 req
->buffer
= bh
->b_data
;
303 add_request(major
+blk_dev
,req
);
306 void ll_rw_page(int rw
, int dev
, int page
, char * buffer
)
308 struct request
* req
;
309 unsigned int major
= MAJOR(dev
);
311 if (major
>= MAX_BLKDEV
|| !(blk_dev
[major
].request_fn
)) {
312 printk("Trying to read nonexistent block-device %04x (%d)\n",dev
,page
*8);
315 if (rw
!=READ
&& rw
!=WRITE
)
316 panic("Bad block dev command, must be R/W");
317 if (rw
== WRITE
&& is_read_only(dev
)) {
318 printk("Can't page to read-only device 0x%X\n",dev
);
322 req
= get_request_wait(NR_REQUEST
, dev
);
324 /* fill up the request-info, and add it to the queue */
327 req
->sector
= page
<<3;
329 req
->current_nr_sectors
= 8;
330 req
->buffer
= buffer
;
331 req
->waiting
= current
;
334 current
->state
= TASK_SWAPPING
;
335 add_request(major
+blk_dev
,req
);
339 /* This function can be used to request a number of buffers from a block
340 device. Currently the only restriction is that all buffers must belong to
343 void ll_rw_block(int rw
, int nr
, struct buffer_head
* bh
[])
349 struct blk_dev_struct
* dev
;
352 /* Make sure that the first block contains something reasonable */
360 if ((major
= MAJOR(bh
[0]->b_dev
)) < MAX_BLKDEV
)
361 dev
= blk_dev
+ major
;
362 if (!dev
|| !dev
->request_fn
) {
364 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
365 (unsigned long) bh
[0]->b_dev
, bh
[0]->b_blocknr
);
369 /* Determine correct block size for this device. */
370 correct_size
= BLOCK_SIZE
;
371 if (blksize_size
[major
]) {
372 i
= blksize_size
[major
][MINOR(bh
[0]->b_dev
)];
377 /* Verify requested block sizees. */
378 for (i
= 0; i
< nr
; i
++) {
379 if (bh
[i
] && bh
[i
]->b_size
!= correct_size
) {
381 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
382 correct_size
, bh
[i
]->b_size
);
387 if ((rw
== WRITE
|| rw
== WRITEA
) && is_read_only(bh
[0]->b_dev
)) {
388 printk("Can't write to read-only device 0x%X\n",bh
[0]->b_dev
);
392 /* If there are no pending requests for this device, then we insert
393 a dummy request for that device. This will prevent the request
394 from starting until we have shoved all of the blocks into the
395 queue, and then we let it rip. */
399 if (!dev
->current_request
&& nr
> 1) {
400 dev
->current_request
= &plug
;
406 for (i
= 0; i
< nr
; i
++) {
409 make_request(major
, rw
, bh
[i
]);
410 if (rw
== READ
|| rw
== READA
)
418 dev
->current_request
= plug
.next
;
425 for (i
= 0; i
< nr
; i
++) {
427 bh
[i
]->b_dirt
= bh
[i
]->b_uptodate
= 0;
432 void ll_rw_swap_file(int rw
, int dev
, unsigned int *b
, int nb
, char *buf
)
436 struct request
* req
;
437 unsigned int major
= MAJOR(dev
);
439 if (major
>= MAX_BLKDEV
|| !(blk_dev
[major
].request_fn
)) {
440 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
444 if (rw
!=READ
&& rw
!=WRITE
) {
445 printk("ll_rw_swap: bad block dev command, must be R/W");
448 if (rw
== WRITE
&& is_read_only(dev
)) {
449 printk("Can't swap to read-only device 0x%X\n",dev
);
453 buffersize
= PAGE_SIZE
/ nb
;
455 for (i
=0; i
<nb
; i
++, buf
+= buffersize
)
458 req
= get_request_wait(NR_REQUEST
, dev
);
462 req
->sector
= (b
[i
] * buffersize
) >> 9;
463 req
->nr_sectors
= buffersize
>> 9;
464 req
->current_nr_sectors
= buffersize
>> 9;
466 req
->waiting
= current
;
469 current
->state
= TASK_UNINTERRUPTIBLE
;
470 add_request(major
+blk_dev
,req
);
475 long blk_dev_init(long mem_start
, long mem_end
)
477 struct request
* req
;
479 req
= all_requests
+ NR_REQUEST
;
480 while (--req
>= all_requests
) {
484 memset(ro_bits
,0,sizeof(ro_bits
));
485 #ifdef CONFIG_BLK_DEV_HD
486 mem_start
= hd_init(mem_start
,mem_end
);
488 #ifdef CONFIG_BLK_DEV_XD
489 mem_start
= xd_init(mem_start
,mem_end
);
492 mem_start
= cdu31a_init(mem_start
,mem_end
);
495 mem_start
= mcd_init(mem_start
,mem_end
);
498 mem_start
= sbpcd_init(mem_start
, mem_end
);
501 mem_start
+= rd_init(mem_start
, ramdisk_size
*1024);