* add p cc
[mascara-docs.git] / i386 / linux / linux-0.99 / drivers / block / ll_rw_blk.c
blob5e42788dd5dad201e8a3ee73c693509cecb4d456
1 /*
2 * linux/kernel/blk_dev/ll_rw.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * This handles all read/write requests to block devices
9 */
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/config.h>
16 #include <linux/locks.h>
18 #include <asm/system.h>
20 #include "blk.h"
22 #ifdef CONFIG_SBPCD
23 extern u_long sbpcd_init(u_long, u_long);
24 #endif CONFIG_SBPCD
27 * The request-struct contains all necessary data
28 * to load a nr of sectors into memory
30 static struct request all_requests[NR_REQUEST];
33 * used to wait on when there are no free requests
35 struct wait_queue * wait_for_request = NULL;
37 /* This specifies how many sectors to read ahead on the disk. */
39 int read_ahead[MAX_BLKDEV] = {0, };
41 /* blk_dev_struct is:
42 * do_request-address
43 * next-request
45 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
46 { NULL, NULL }, /* no_dev */
47 { NULL, NULL }, /* dev mem */
48 { NULL, NULL }, /* dev fd */
49 { NULL, NULL }, /* dev hd */
50 { NULL, NULL }, /* dev ttyx */
51 { NULL, NULL }, /* dev tty */
52 { NULL, NULL }, /* dev lp */
53 { NULL, NULL }, /* dev pipes */
54 { NULL, NULL }, /* dev sd */
55 { NULL, NULL } /* dev st */
59 * blk_size contains the size of all block-devices in units of 1024 byte
60 * sectors:
62 * blk_size[MAJOR][MINOR]
64 * if (!blk_size[MAJOR]) then no minor size checking is done.
66 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
69 * blksize_size contains the size of all block-devices:
71 * blksize_size[MAJOR][MINOR]
73 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
75 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
78 * look for a free request in the first N entries.
79 * NOTE: interrupts must be disabled on the way in, and will still
80 * be disabled on the way out.
82 static inline struct request * get_request(int n, int dev)
84 static struct request *prev_found = NULL, *prev_limit = NULL;
85 register struct request *req, *limit;
87 if (n <= 0)
88 panic("get_request(%d): impossible!\n", n);
90 limit = all_requests + n;
91 if (limit != prev_limit) {
92 prev_limit = limit;
93 prev_found = all_requests;
95 req = prev_found;
96 for (;;) {
97 req = ((req > all_requests) ? req : limit) - 1;
98 if (req->dev < 0)
99 break;
100 if (req == prev_found)
101 return NULL;
103 prev_found = req;
104 req->dev = dev;
105 return req;
109 * wait until a free request in the first N entries is available.
110 * NOTE: interrupts must be disabled on the way in, and will still
111 * be disabled on the way out.
113 static inline struct request * get_request_wait(int n, int dev)
115 register struct request *req;
117 while ((req = get_request(n, dev)) == NULL)
118 sleep_on(&wait_for_request);
119 return req;
122 /* RO fail safe mechanism */
124 static long ro_bits[MAX_BLKDEV][8];
126 int is_read_only(int dev)
128 int minor,major;
130 major = MAJOR(dev);
131 minor = MINOR(dev);
132 if (major < 0 || major >= MAX_BLKDEV) return 0;
133 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
136 void set_device_ro(int dev,int flag)
138 int minor,major;
140 major = MAJOR(dev);
141 minor = MINOR(dev);
142 if (major < 0 || major >= MAX_BLKDEV) return;
143 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
144 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
148 * add-request adds a request to the linked list.
149 * It disables interrupts so that it can muck with the
150 * request-lists in peace.
152 static void add_request(struct blk_dev_struct * dev, struct request * req)
154 struct request * tmp;
156 req->next = NULL;
157 cli();
158 if (req->bh)
159 req->bh->b_dirt = 0;
160 if (!(tmp = dev->current_request)) {
161 dev->current_request = req;
162 (dev->request_fn)();
163 sti();
164 return;
166 for ( ; tmp->next ; tmp = tmp->next) {
167 if ((IN_ORDER(tmp,req) ||
168 !IN_ORDER(tmp,tmp->next)) &&
169 IN_ORDER(req,tmp->next))
170 break;
172 req->next = tmp->next;
173 tmp->next = req;
175 /* for SCSI devices, call request_fn unconditionally */
176 if (scsi_major(MAJOR(req->dev)))
177 (dev->request_fn)();
179 sti();
182 static void make_request(int major,int rw, struct buffer_head * bh)
184 unsigned int sector, count;
185 struct request * req;
186 int rw_ahead, max_req;
188 /* WRITEA/READA is special case - it is not really needed, so if the */
189 /* buffer is locked, we just forget about it, else it's a normal read */
190 rw_ahead = (rw == READA || rw == WRITEA);
191 if (rw_ahead) {
192 if (bh->b_lock)
193 return;
194 if (rw == READA)
195 rw = READ;
196 else
197 rw = WRITE;
199 if (rw!=READ && rw!=WRITE) {
200 printk("Bad block dev command, must be R/W/RA/WA\n");
201 return;
203 count = bh->b_size >> 9;
204 sector = bh->b_blocknr * count;
205 if (blk_size[major])
206 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
207 bh->b_dirt = bh->b_uptodate = 0;
208 return;
210 lock_buffer(bh);
211 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
212 unlock_buffer(bh);
213 return;
216 /* we don't allow the write-requests to fill up the queue completely:
217 * we want some room for reads: they take precedence. The last third
218 * of the requests are only for reads.
220 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
222 /* big loop: look for a free request. */
224 repeat:
225 cli();
227 /* The scsi disk drivers completely remove the request from the queue when
228 * they start processing an entry. For this reason it is safe to continue
229 * to add links to the top entry for scsi devices.
231 if ((major == HD_MAJOR
232 || major == SCSI_DISK_MAJOR
233 || major == SCSI_CDROM_MAJOR)
234 && (req = blk_dev[major].current_request))
236 if (major == HD_MAJOR)
237 req = req->next;
238 while (req) {
239 if (req->dev == bh->b_dev &&
240 !req->waiting &&
241 req->cmd == rw &&
242 req->sector + req->nr_sectors == sector &&
243 req->nr_sectors < 254)
245 req->bhtail->b_reqnext = bh;
246 req->bhtail = bh;
247 req->nr_sectors += count;
248 bh->b_dirt = 0;
249 sti();
250 return;
253 if (req->dev == bh->b_dev &&
254 !req->waiting &&
255 req->cmd == rw &&
256 req->sector - count == sector &&
257 req->nr_sectors < 254)
259 req->nr_sectors += count;
260 bh->b_reqnext = req->bh;
261 req->buffer = bh->b_data;
262 req->current_nr_sectors = count;
263 req->sector = sector;
264 bh->b_dirt = 0;
265 req->bh = bh;
266 sti();
267 return;
270 req = req->next;
274 /* find an unused request. */
275 req = get_request(max_req, bh->b_dev);
277 /* if no request available: if rw_ahead, forget it; otherwise try again. */
278 if (! req) {
279 if (rw_ahead) {
280 sti();
281 unlock_buffer(bh);
282 return;
284 sleep_on(&wait_for_request);
285 sti();
286 goto repeat;
289 /* we found a request. */
290 sti();
292 /* fill up the request-info, and add it to the queue */
293 req->cmd = rw;
294 req->errors = 0;
295 req->sector = sector;
296 req->nr_sectors = count;
297 req->current_nr_sectors = count;
298 req->buffer = bh->b_data;
299 req->waiting = NULL;
300 req->bh = bh;
301 req->bhtail = bh;
302 req->next = NULL;
303 add_request(major+blk_dev,req);
306 void ll_rw_page(int rw, int dev, int page, char * buffer)
308 struct request * req;
309 unsigned int major = MAJOR(dev);
311 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
312 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
313 return;
315 if (rw!=READ && rw!=WRITE)
316 panic("Bad block dev command, must be R/W");
317 if (rw == WRITE && is_read_only(dev)) {
318 printk("Can't page to read-only device 0x%X\n",dev);
319 return;
321 cli();
322 req = get_request_wait(NR_REQUEST, dev);
323 sti();
324 /* fill up the request-info, and add it to the queue */
325 req->cmd = rw;
326 req->errors = 0;
327 req->sector = page<<3;
328 req->nr_sectors = 8;
329 req->current_nr_sectors = 8;
330 req->buffer = buffer;
331 req->waiting = current;
332 req->bh = NULL;
333 req->next = NULL;
334 current->state = TASK_SWAPPING;
335 add_request(major+blk_dev,req);
336 schedule();
339 /* This function can be used to request a number of buffers from a block
340 device. Currently the only restriction is that all buffers must belong to
341 the same device */
343 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
345 unsigned int major;
346 struct request plug;
347 int plugged;
348 int correct_size;
349 struct blk_dev_struct * dev;
350 int i;
352 /* Make sure that the first block contains something reasonable */
353 while (!*bh) {
354 bh++;
355 if (--nr <= 0)
356 return;
359 dev = NULL;
360 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
361 dev = blk_dev + major;
362 if (!dev || !dev->request_fn) {
363 printk(
364 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
365 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
366 goto sorry;
369 /* Determine correct block size for this device. */
370 correct_size = BLOCK_SIZE;
371 if (blksize_size[major]) {
372 i = blksize_size[major][MINOR(bh[0]->b_dev)];
373 if (i)
374 correct_size = i;
377 /* Verify requested block sizees. */
378 for (i = 0; i < nr; i++) {
379 if (bh[i] && bh[i]->b_size != correct_size) {
380 printk(
381 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
382 correct_size, bh[i]->b_size);
383 goto sorry;
387 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
388 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
389 goto sorry;
392 /* If there are no pending requests for this device, then we insert
393 a dummy request for that device. This will prevent the request
394 from starting until we have shoved all of the blocks into the
395 queue, and then we let it rip. */
397 plugged = 0;
398 cli();
399 if (!dev->current_request && nr > 1) {
400 dev->current_request = &plug;
401 plug.dev = -1;
402 plug.next = NULL;
403 plugged = 1;
405 sti();
406 for (i = 0; i < nr; i++) {
407 if (bh[i]) {
408 bh[i]->b_req = 1;
409 make_request(major, rw, bh[i]);
410 if (rw == READ || rw == READA)
411 kstat.pgpgin++;
412 else
413 kstat.pgpgout++;
416 if (plugged) {
417 cli();
418 dev->current_request = plug.next;
419 (dev->request_fn)();
420 sti();
422 return;
424 sorry:
425 for (i = 0; i < nr; i++) {
426 if (bh[i])
427 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
429 return;
432 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
434 int i;
435 int buffersize;
436 struct request * req;
437 unsigned int major = MAJOR(dev);
439 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
440 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
441 return;
444 if (rw!=READ && rw!=WRITE) {
445 printk("ll_rw_swap: bad block dev command, must be R/W");
446 return;
448 if (rw == WRITE && is_read_only(dev)) {
449 printk("Can't swap to read-only device 0x%X\n",dev);
450 return;
453 buffersize = PAGE_SIZE / nb;
455 for (i=0; i<nb; i++, buf += buffersize)
457 cli();
458 req = get_request_wait(NR_REQUEST, dev);
459 sti();
460 req->cmd = rw;
461 req->errors = 0;
462 req->sector = (b[i] * buffersize) >> 9;
463 req->nr_sectors = buffersize >> 9;
464 req->current_nr_sectors = buffersize >> 9;
465 req->buffer = buf;
466 req->waiting = current;
467 req->bh = NULL;
468 req->next = NULL;
469 current->state = TASK_UNINTERRUPTIBLE;
470 add_request(major+blk_dev,req);
471 schedule();
475 long blk_dev_init(long mem_start, long mem_end)
477 struct request * req;
479 req = all_requests + NR_REQUEST;
480 while (--req >= all_requests) {
481 req->dev = -1;
482 req->next = NULL;
484 memset(ro_bits,0,sizeof(ro_bits));
485 #ifdef CONFIG_BLK_DEV_HD
486 mem_start = hd_init(mem_start,mem_end);
487 #endif
488 #ifdef CONFIG_BLK_DEV_XD
489 mem_start = xd_init(mem_start,mem_end);
490 #endif
491 #ifdef CONFIG_CDU31A
492 mem_start = cdu31a_init(mem_start,mem_end);
493 #endif
494 #ifdef CONFIG_MCD
495 mem_start = mcd_init(mem_start,mem_end);
496 #endif
497 #ifdef CONFIG_SBPCD
498 mem_start = sbpcd_init(mem_start, mem_end);
499 #endif CONFIG_SBPCD
500 if (ramdisk_size)
501 mem_start += rd_init(mem_start, ramdisk_size*1024);
502 return mem_start;