* better
[mascara-docs.git] / i386 / linux-2.3.21 / drivers / ap1000 / ddv.c
blobf868076d392302c044f0ecf859a07dc42be7bf07
1 /*
2 * Copyright 1996 The Australian National University.
3 * Copyright 1996 Fujitsu Laboratories Limited
4 *
5 * This software may be distributed under the terms of the Gnu
6 * Public License version 2 or later
7 */
8 /*
9 * ddv.c - Single AP1000 block driver.
11 * This block driver performs io operations to the ddv option
12 * board. (Hopefully:)
16 #include <linux/errno.h>
17 #include <linux/types.h>
18 #include <linux/fs.h>
19 #include <linux/ext2_fs.h>
20 #include <linux/kernel.h>
21 #include <linux/major.h>
22 #include <linux/mm.h>
23 #include <linux/malloc.h>
24 #define __KERNEL_SYSCALLS__
25 #include <linux/unistd.h>
26 #include <linux/sched.h>
27 #include <asm/pgtable.h>
28 #include <asm/uaccess.h>
29 #include <linux/module.h>
30 #include <asm/ap1000/apreg.h>
31 #include <asm/ap1000/DdvReqTable.h>
33 #define MAJOR_NR DDV_MAJOR
35 #include <linux/blk.h>
36 #include <linux/genhd.h>
37 #include <linux/hdreg.h>
39 #define DDV_DEBUG 0
40 #define AIR_DISK 1
42 #define SECTOR_SIZE 512
44 /* we can have lots of partitions */
45 #define PARTN_BITS 6
46 #define NUM_DDVDEVS (1<<PARTN_BITS)
48 #define PARDISK_BASE (1<<5) /* partitions above this number are
49 striped across all the cells */
50 #define STRIPE_SHIFT 6
51 #define STRIPE_SECTORS (1<<STRIPE_SHIFT) /* number of sectors per stripe */
53 #define MAX_BNUM 16
54 #define MAX_REQUEST (TABLE_SIZE - 2)
55 #define REQUEST_LOW 16
56 #define REQUEST_HIGH 4
59 /* we fake up a block size larger than the physical block size to try
60 to make things a bit more efficient */
61 #define SECTOR_BLOCK_SHIFT 9
63 #define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
65 /* try to read ahead a bit */
66 #define DDV_READ_AHEAD 64
68 static int have_ddv_board = 1;
69 static unsigned num_options = 0;
70 static unsigned this_option = 0;
72 extern int ddv_get_mlist(unsigned mptr[],int bnum);
73 extern int ddv_set_request(struct request *req,
74 int request_type,int bnum,int mlist,int len,int offset);
75 extern void ddv_load_kernel(char *opcodep);
76 extern int ddv_restart_cpu(void);
77 extern int ddv_mlist_available(void);
78 static int ddv_revalidate(kdev_t dev, struct gendisk *gdev);
79 static void ddv_geninit(struct gendisk *ignored);
80 static void ddv_release(struct inode * inode, struct file * filp);
81 static void ddv_request1(void);
84 static char *ddv_opcodep = NULL;
85 static struct request *next_request = NULL;
87 static DECLARE_WAIT_QUEUE_HEAD(busy_wait);
89 static int ddv_blocksizes[NUM_DDVDEVS]; /* in bytes */
90 int ddv_sect_length[NUM_DDVDEVS]; /* in sectors */
91 int ddv_blk_length[NUM_DDVDEVS]; /* in blocks */
93 /* these are used by the ddv_daemon, which services remote disk requests */
94 static struct remote_request *rem_queue = NULL;
95 static struct remote_request *rem_queue_end;
96 static DECLARE_WAIT_QUEUE_HEAD(ddv_daemon_wait);
98 static int opiu_kernel_loaded = 0;
100 static struct {
101 unsigned reads, writes, blocks, rq_started, rq_finished, errors;
102 unsigned sectors_read, sectors_written;
103 } ddv_stats;
105 static struct hd_struct partition_tables[NUM_DDVDEVS];
107 static struct gendisk ddv_gendisk = {
108 MAJOR_NR, /* Major number */
109 DEVICE_NAME, /* Major name */
110 PARTN_BITS, /* Bits to shift to get real from partition */
111 1 << PARTN_BITS, /* Number of partitions per real */
112 1, /* maximum number of real */
113 #ifdef MODULE
114 NULL, /* called from init_module */
115 #else
116 ddv_geninit, /* init function */
117 #endif
118 partition_tables,/* hd struct */
119 ddv_blk_length, /* block sizes */
120 1, /* number */
121 (void *) NULL, /* internal */
122 NULL /* next */
126 struct ddv_geometry {
127 unsigned char heads;
128 unsigned char sectors;
129 unsigned short cylinders;
130 unsigned long start;
133 static struct ddv_geometry ddv_geometry;
136 struct remote_request {
137 union {
138 struct remote_request *next;
139 void (*fn)(void);
140 } u;
141 unsigned bnum; /* how many blocks does this contain */
142 struct request *reqp; /* pointer to the request on the original cell */
143 unsigned cell; /* what cell is the request from */
144 struct request req; /* details of the request */
148 static void ddv_set_optadr(void)
150 unsigned addr = 0x11000000;
151 OPT_IO(OBASE) = addr;
152 MSC_IO(MSC_OPTADR) =
153 ((addr & 0xff000000)>>16) |
154 ((OPTION_BASE & 0xf0000000)>>24) |
155 ((OPTION_BASE + 0x10000000)>>28);
156 OPT_IO(PRST) = 0;
159 extern struct RequestTable *RTable;
160 extern struct OPrintBufArray *PrintBufs;
161 extern struct OAlignBufArray *AlignBufs;
162 extern struct DiskInfo *DiskInfo;
164 static void ddv_release(struct inode * inode, struct file * filp)
166 #if DEBUG
167 printk("ddv_release started\n");
168 #endif
169 sync_dev(inode->i_rdev);
170 #if DEBUG
171 printk("ddv_release done\n");
172 #endif
176 static unsigned in_request = 0;
177 static unsigned req_queued = 0;
179 static void ddv_end_request(int uptodate,struct request *req)
181 struct buffer_head * bh;
183 ddv_stats.rq_finished++;
185 /* printk("ddv_end_request(%d,%p)\n",uptodate,req); */
187 req->errors = 0;
188 if (!uptodate) {
189 printk("end_request: I/O error, dev %s, sector %lu\n",
190 kdevname(req->rq_dev), req->sector);
191 req->nr_sectors--;
192 req->nr_sectors &= ~SECTOR_MASK;
193 req->sector += (BLOCK_SIZE / SECTOR_SIZE);
194 req->sector &= ~SECTOR_MASK;
195 ddv_stats.errors++;
198 if ((bh = req->bh) != NULL) {
199 req->bh = bh->b_reqnext;
200 bh->b_reqnext = NULL;
201 mark_buffer_uptodate(bh, uptodate);
202 unlock_buffer(bh);
203 if ((bh = req->bh) != NULL) {
204 req->current_nr_sectors = bh->b_size >> 9;
205 if (req->nr_sectors < req->current_nr_sectors) {
206 req->nr_sectors = req->current_nr_sectors;
207 printk("end_request: buffer-list destroyed\n");
209 req->buffer = bh->b_data;
210 printk("WARNING: ddv: more sectors!\n");
211 ddv_stats.errors++;
212 return;
215 if (req->sem != NULL)
216 up(req->sem);
217 req->rq_status = RQ_INACTIVE;
218 wake_up(&wait_for_request);
222 /* check that a request is all OK to process */
223 static int request_ok(struct request *req)
225 int minor;
226 if (!req) return 0;
228 if (MAJOR(req->rq_dev) != MAJOR_NR)
229 panic(DEVICE_NAME ": bad major number\n");
230 if (!buffer_locked(req->bh))
231 panic(DEVICE_NAME ": block not locked");
233 minor = MINOR(req->rq_dev);
234 if (minor >= NUM_DDVDEVS) {
235 printk("ddv_request: Invalid minor (%d)\n", minor);
236 return 0;
239 if ((req->sector + req->current_nr_sectors) > ddv_sect_length[minor]) {
240 printk("ddv: out of range minor=%d offset=%d len=%d sect_length=%d\n",
241 minor,(int)req->sector,(int)req->current_nr_sectors,
242 ddv_sect_length[minor]);
243 return 0;
246 if (req->cmd != READ && req->cmd != WRITE) {
247 printk("unknown request type %d\n",req->cmd);
248 return 0;
251 /* it seems to be OK */
252 return 1;
256 static void complete_request(struct request *req,int bnum)
258 while (bnum--) {
259 ddv_end_request(1,req);
260 req = req->next;
265 static int completion_pointer = 0;
267 static void check_completion(void)
269 int i,bnum;
270 struct request *req;
272 if (!RTable) return;
274 for (;
275 (i=completion_pointer) != RTable->ddv_pointer &&
276 RTable->async_info[i].status == DDV_REQ_FREE;
277 completion_pointer = INC_T(completion_pointer))
279 req = (struct request *)RTable->async_info[i].argv[7];
280 bnum = RTable->async_info[i].bnum;
281 if (!req || !bnum) {
282 printk("%s(%d)\n",__FILE__,__LINE__);
283 ddv_stats.errors++;
284 continue;
287 RTable->async_info[i].status = 0;
288 RTable->async_info[i].argv[7] = 0;
290 complete_request(req,bnum);
291 in_request--;
296 static struct request *get_request_queue(struct request *oldq)
298 struct request *req,*req2;
300 /* skip any non-active or bad requests */
301 skip1:
302 if (!(req = CURRENT))
303 return oldq;
305 if (req->rq_status != RQ_ACTIVE) {
306 CURRENT = req->next;
307 goto skip1;
310 if (!request_ok(req)) {
311 ddv_end_request(0,req);
312 CURRENT = req->next;
313 goto skip1;
316 /* now grab as many as we can */
317 req_queued++;
319 for (req2 = req;
320 req2->next &&
321 req2->next->rq_status == RQ_ACTIVE &&
322 request_ok(req2->next);
323 req2 = req2->next)
324 req_queued++;
326 /* leave CURRENT pointing at the bad ones */
327 CURRENT = req2->next;
329 /* chop our list at that point */
330 req2->next = NULL;
332 if (!oldq)
333 return req;
335 for (req2=oldq;req2->next;req2=req2->next) ;
337 req2->next = req;
339 return oldq;
343 static void ddv_rem_complete(struct remote_request *rem)
345 unsigned flags;
346 int bnum = rem->bnum;
347 struct request *req = rem->reqp;
349 complete_request(req,bnum);
350 in_request--;
352 save_flags(flags); cli();
353 ddv_request1();
354 restore_flags(flags);
359 * The background ddv daemon. This receives remote disk requests
360 * and processes them via the normal block operations
362 static int ddv_daemon(void *unused)
364 current->session = 1;
365 current->pgrp = 1;
366 sprintf(current->comm, "ddv_daemon");
367 spin_lock_irq(&current->sigmask_lock);
368 sigfillset(&current->blocked); /* block all signals */
369 recalc_sigpending(current);
370 spin_unlock_irq(&current->sigmask_lock);
372 /* Give it a realtime priority. */
373 current->policy = SCHED_FIFO;
374 current->priority = 32; /* Fixme --- we need to standardise our
375 namings for POSIX.4 realtime scheduling
376 priorities. */
378 printk("Started ddv_daemon\n");
380 while (1) {
381 struct remote_request *rem;
382 unsigned flags;
383 struct buffer_head *bhlist[MAX_BNUM*4];
384 int i,j,minor,len,shift,offset;
386 save_flags(flags); cli();
388 while (!rem_queue) {
389 spin_lock_irq(&current->sigmask_lock);
390 flush_signals(current);
391 spin_unlock_irq(&current->sigmask_lock);
392 interruptible_sleep_on(&ddv_daemon_wait);
395 rem = rem_queue;
396 rem_queue = rem->u.next;
397 restore_flags(flags);
400 minor = MINOR(rem->req.rq_dev);
401 len = rem->req.current_nr_sectors;
402 offset = rem->req.sector;
404 /* work out the conversion to the local block size from
405 sectors */
406 for (shift=0;
407 (SECTOR_SIZE<<shift) != ddv_blocksizes[minor];
408 shift++) ;
410 /* do the request */
411 for (i=0; len; i++) {
412 bhlist[i] = getblk(rem->req.rq_dev,
413 offset >> shift,
414 ddv_blocksizes[minor]);
415 if (!buffer_uptodate(bhlist[i]))
416 ll_rw_block(READ,1,&bhlist[i]);
417 offset += 1<<shift;
418 len -= 1<<shift;
421 for (j=0;j<i;j++)
422 if (!buffer_uptodate(bhlist[j]))
423 wait_on_buffer(bhlist[j]);
426 /* put() the data */
429 /* release the buffers */
430 for (j=0;j<i;j++)
431 brelse(bhlist[j]);
433 /* tell the originator that its done */
434 rem->u.fn = ddv_rem_complete;
435 tnet_rpc(rem->cell,rem,sizeof(int)*3,1);
440 /* receive a remote disk request */
441 static void ddv_rem_queue(char *data,unsigned size)
443 unsigned flags;
444 struct remote_request *rem = (struct remote_request *)
445 kmalloc(size,GFP_ATOMIC);
447 if (!rem) {
448 /* oh bugger! */
449 ddv_stats.errors++;
450 return;
453 memcpy(rem,data,size);
454 rem->u.next = NULL;
456 save_flags(flags); cli();
458 /* add it to our remote request queue */
459 if (!rem_queue)
460 rem_queue = rem;
461 else
462 rem_queue_end->u.next = rem;
463 rem_queue_end = rem;
465 restore_flags(flags);
467 wake_up(&ddv_daemon_wait);
471 /* which disk should this request go to */
472 static inline unsigned pardisk_num(struct request *req)
474 int minor = MINOR(req->rq_dev);
475 unsigned stripe;
476 unsigned cell;
478 if (minor < PARDISK_BASE)
479 return this_option;
481 stripe = req->sector >> STRIPE_SHIFT;
482 cell = stripe % num_options;
484 return cell;
488 /* check if a 2nd request can be tacked onto the first */
489 static inline int contiguous(struct request *req1,struct request *req2)
491 if (req2->cmd != req1->cmd ||
492 req2->rq_dev != req1->rq_dev ||
493 req2->sector != req1->sector + req1->current_nr_sectors ||
494 req2->current_nr_sectors != req1->current_nr_sectors)
495 return 0;
496 if (pardisk_num(req1) != pardisk_num(req2))
497 return 0;
498 return 1;
501 static void ddv_request1(void)
503 struct request *req,*req1,*req2;
504 unsigned offset,len,req_num,mlist,bnum,available=0;
505 static unsigned mptrs[MAX_BNUM];
506 unsigned cell;
508 if (in_request > REQUEST_HIGH)
509 return;
511 next_request = get_request_queue(next_request);
513 while ((req = next_request)) {
514 int minor;
516 if (in_request >= MAX_REQUEST)
517 return;
519 if (in_request>1 && req_queued<REQUEST_LOW)
520 return;
522 /* make sure we have room for a request */
523 available = ddv_mlist_available();
524 if (available < 1) return;
525 if (available > MAX_BNUM)
526 available = MAX_BNUM;
528 offset = req->sector;
529 len = req->current_nr_sectors;
530 minor = MINOR(req->rq_dev);
532 mptrs[0] = (int)req->buffer;
534 for (bnum=1,req1=req,req2=req->next;
535 req2 && bnum<available && contiguous(req1,req2);
536 req1=req2,req2=req2->next) {
537 mptrs[bnum++] = (int)req2->buffer;
540 next_request = req2;
543 req_queued -= bnum;
544 ddv_stats.blocks += bnum;
545 ddv_stats.rq_started += bnum;
547 if (req->cmd == READ) {
548 ddv_stats.reads++;
549 ddv_stats.sectors_read += len*bnum;
550 } else {
551 ddv_stats.writes++;
552 ddv_stats.sectors_written += len*bnum;
555 if (minor >= PARDISK_BASE) {
556 /* translate the request to the normal partition */
557 unsigned stripe;
558 minor -= PARDISK_BASE;
560 stripe = offset >> STRIPE_SHIFT;
561 stripe /= num_options;
562 offset = (stripe << STRIPE_SHIFT) +
563 (offset & ((1<<STRIPE_SHIFT)-1));
564 #if AIR_DISK
565 /* like an air-guitar :-) */
566 complete_request(req,bnum);
567 continue;
568 #endif
571 if ((cell=pardisk_num(req)) != this_option) {
572 /* its a remote request */
573 struct remote_request *rem;
574 unsigned *remlist;
575 unsigned size = sizeof(*rem) + sizeof(int)*bnum;
577 rem = (struct remote_request *)kmalloc(size,GFP_ATOMIC);
578 if (!rem) {
579 /* hopefully we can get it on the next go */
580 return;
582 remlist = (unsigned *)(rem+1);
584 rem->u.fn = ddv_rem_queue;
585 rem->cell = this_option;
586 rem->bnum = bnum;
587 rem->req = *req;
588 rem->reqp = req;
589 rem->req.rq_dev = MKDEV(MAJOR_NR,minor);
590 rem->req.sector = offset;
591 memcpy(remlist,mptrs,sizeof(mptrs[0])*bnum);
593 if (tnet_rpc(cell,rem,size,1) != 0) {
594 kfree_s(rem,size);
595 return;
597 } else {
598 /* its a local request */
599 if ((mlist = ddv_get_mlist(mptrs,bnum)) == -1) {
600 ddv_stats.errors++;
601 panic("ddv: mlist corrupted");
604 req_num = RTable->cell_pointer;
605 RTable->async_info[req_num].status =
606 req->cmd==READ?DDV_RAWREAD_REQ:DDV_RAWWRITE_REQ;
607 RTable->async_info[req_num].bnum = bnum;
608 RTable->async_info[req_num].argv[0] = mlist;
609 RTable->async_info[req_num].argv[1] = len;
610 RTable->async_info[req_num].argv[2] = offset +
611 partition_tables[minor].start_sect;
612 RTable->async_info[req_num].argv[3] = bnum;
613 RTable->async_info[req_num].argv[7] = (unsigned)req;
614 RTable->cell_pointer = INC_T(RTable->cell_pointer);
618 in_request++;
623 static void ddv_request(void)
625 cli();
626 ddv_request1();
627 sti();
631 static void check_printbufs(void)
633 int i;
635 if (!PrintBufs) return;
637 while (PrintBufs->option_counter != PrintBufs->cell_counter) {
638 i = PrintBufs->cell_counter;
639 printk("opiu (%d): ",i);
640 if (((unsigned)PrintBufs->bufs[i].fmt) > 0x100000)
641 printk("Error: bad format in printk at %p\n",
642 PrintBufs->bufs[i].fmt);
643 else
644 printk(PrintBufs->bufs[i].fmt + OPIBUS_BASE,
645 PrintBufs->bufs[i].args[0],
646 PrintBufs->bufs[i].args[1],
647 PrintBufs->bufs[i].args[2],
648 PrintBufs->bufs[i].args[3],
649 PrintBufs->bufs[i].args[4],
650 PrintBufs->bufs[i].args[5]);
651 if (++PrintBufs->cell_counter == PRINT_BUFS)
652 PrintBufs->cell_counter = 0;
656 static void ddv_interrupt(int irq, void *dev_id, struct pt_regs *regs)
658 unsigned long flags;
659 save_flags(flags); cli();
660 OPT_IO(IRC1) = 0x80000000;
662 check_printbufs();
663 check_completion();
665 ddv_request1();
666 restore_flags(flags);
669 static int ddv_open(struct inode * inode, struct file * filp)
671 int minor = MINOR(inode->i_rdev);
673 if (!have_ddv_board || minor >= NUM_DDVDEVS)
674 return -ENODEV;
676 if (minor >= PARDISK_BASE) {
677 ddv_sect_length[minor] = ddv_sect_length[minor - PARDISK_BASE];
678 ddv_blk_length[minor] = ddv_blk_length[minor - PARDISK_BASE];
681 return 0;
685 static void ddv_open_reply(struct cap_request *creq)
687 int size = creq->size - sizeof(*creq);
688 ddv_opcodep = (char *)kmalloc(size,GFP_ATOMIC);
689 read_bif(ddv_opcodep, size);
690 #if DEBUG
691 printk("received opiu kernel of size %d\n",size);
692 #endif
693 if (size == 0)
694 have_ddv_board = 0;
695 wake_up(&busy_wait);
699 static void ddv_load_opiu(void)
701 int i;
702 struct cap_request creq;
704 /* if the opiu kernel is already loaded then we don't do anything */
705 if (!have_ddv_board || opiu_kernel_loaded)
706 return;
708 bif_register_request(REQ_DDVOPEN,ddv_open_reply);
710 /* send the open request to the front end */
711 creq.cid = mpp_cid();
712 creq.type = REQ_DDVOPEN;
713 creq.header = 0;
714 creq.size = sizeof(creq);
716 bif_queue(&creq,0,0);
718 ddv_set_optadr();
720 while (!ddv_opcodep)
721 sleep_on(&busy_wait);
723 if (!have_ddv_board)
724 return;
726 ddv_load_kernel(ddv_opcodep);
728 kfree(ddv_opcodep);
729 ddv_opcodep = NULL;
731 if (ddv_restart_cpu())
732 return;
734 ddv_sect_length[0] = DiskInfo->blocks;
735 ddv_blk_length[0] = DiskInfo->blocks >> 1;
736 ddv_blocksizes[0] = BLOCK_SIZE;
738 ddv_geometry.cylinders = ddv_sect_length[0] /
739 (ddv_geometry.heads*ddv_geometry.sectors);
741 ddv_gendisk.part[0].start_sect = 0;
742 ddv_gendisk.part[0].nr_sects = ddv_sect_length[0];
744 resetup_one_dev(&ddv_gendisk, 0);
746 for (i=0;i<PARDISK_BASE;i++) {
747 ddv_sect_length[i] = ddv_gendisk.part[i].nr_sects;
748 ddv_blk_length[i] = ddv_gendisk.part[i].nr_sects >> 1;
751 /* setup the parallel partitions by multiplying the normal
752 partition by the number of options */
753 for (;i<NUM_DDVDEVS;i++) {
754 ddv_sect_length[i] = ddv_sect_length[i-PARDISK_BASE]*num_options;
755 ddv_blk_length[i] = ddv_blk_length[i-PARDISK_BASE]*num_options;
756 ddv_gendisk.part[i].start_sect = ddv_gendisk.part[i-PARDISK_BASE].start_sect;
757 ddv_gendisk.part[i].nr_sects = ddv_sect_length[i];
761 opiu_kernel_loaded = 1;
766 * This routine is called to flush all partitions and partition tables
767 * for a changed disk, and then re-read the new partition table.
769 static int ddv_revalidate(kdev_t dev, struct gendisk *gdev)
771 int target;
772 int max_p;
773 int start;
774 int i;
776 target = DEVICE_NR(dev);
778 max_p = gdev->max_p;
779 start = target << gdev->minor_shift;
781 printk("ddv_revalidate dev=%d target=%d max_p=%d start=%d\n",
782 dev,target,max_p,start);
784 for (i=max_p - 1; i >=0 ; i--) {
785 int minor = start + i;
786 kdev_t devi = MKDEV(gdev->major, minor);
787 sync_dev(devi);
788 invalidate_inodes(devi);
789 invalidate_buffers(devi);
790 gdev->part[minor].start_sect = 0;
791 gdev->part[minor].nr_sects = 0;
794 ddv_sect_length[start] = DiskInfo->blocks;
795 ddv_blk_length[start] = DiskInfo->blocks >> 1;
797 gdev->part[start].nr_sects = ddv_sect_length[start];
798 resetup_one_dev(gdev, target);
800 printk("sect_length[%d]=%d blk_length[%d]=%d\n",
801 start,ddv_sect_length[start],
802 start,ddv_blk_length[start]);
804 for (i=0;i<max_p;i++) {
805 ddv_sect_length[start+i] = gdev->part[start+i].nr_sects;
806 ddv_blk_length[start+i] = gdev->part[start+i].nr_sects >> 1;
807 if (gdev->part[start+i].nr_sects)
808 printk("partition[%d] start=%d length=%d\n",i,
809 (int)gdev->part[start+i].start_sect,
810 (int)gdev->part[start+i].nr_sects);
813 return 0;
819 static int ddv_ioctl(struct inode *inode, struct file *file,
820 unsigned int cmd, unsigned long arg)
822 int err;
823 struct ddv_geometry *loc = (struct ddv_geometry *) arg;
824 int dev;
825 int minor = MINOR(inode->i_rdev);
827 if ((!inode) || !(inode->i_rdev))
828 return -EINVAL;
829 dev = DEVICE_NR(inode->i_rdev);
830 #if DEBUG
831 printk("ddv_ioctl: cmd=%x dev=%x minor=%d\n", cmd, dev, minor);
832 #endif
833 switch (cmd) {
834 case HDIO_GETGEO:
835 printk("\tHDIO_GETGEO\n");
836 if (!loc) return -EINVAL;
837 if (put_user(ddv_geometry.heads, (char *) &loc->heads)) return -EFAULT;
838 if (put_user(ddv_geometry.sectors, (char *) &loc->sectors)) return -EFAULT;
839 if (put_user(ddv_geometry.cylinders, (short *) &loc->cylinders)) return -EFAULT;
840 if (put_user(ddv_geometry.start, (long *) &loc->start)) return -EFAULT;
841 return 0;
843 case HDIO_GET_MULTCOUNT :
844 printk("\tHDIO_GET_MULTCOUNT\n");
845 return -EINVAL;
847 case HDIO_GET_IDENTITY :
848 printk("\tHDIO_GET_IDENTITY\n");
849 return -EINVAL;
851 case HDIO_GET_NOWERR :
852 printk("\tHDIO_GET_NOWERR\n");
853 return -EINVAL;
855 case HDIO_SET_NOWERR :
856 printk("\tHDIO_SET_NOWERR\n");
857 return -EINVAL;
859 case BLKRRPART:
860 printk("\tBLKRRPART\n");
861 if (!capable(CAP_SYS_ADMIN))
862 return -EACCES;
863 return ddv_revalidate(inode->i_rdev,&ddv_gendisk);
865 case BLKGETSIZE: /* Return device size */
866 if (put_user(ddv_sect_length[minor],(long *) arg)) return -EFAULT;
867 #if DEBUG
868 printk("BLKGETSIZE gave %d\n",ddv_sect_length[minor]);
869 #endif
870 return 0;
872 default:
873 printk("ddv_ioctl: Invalid cmd=%d(0x%x)\n", cmd, cmd);
874 return -EINVAL;
878 static struct file_operations ddv_fops = {
879 NULL, /* lseek - default */
880 block_read, /* read */
881 block_write, /* write */
882 NULL, /* readdir - bad */
883 NULL, /* poll */
884 ddv_ioctl, /* ioctl */
885 NULL, /* mmap */
886 ddv_open, /* open */
887 NULL, /* flush */
888 ddv_release,
889 block_fsync /* fsync */
893 static void ddv_status(void)
895 if (!have_ddv_board) {
896 printk("no ddv board\n");
897 return;
900 printk("
901 in_request %u req_queued %u
902 MTable: start=%u end=%u
903 Requests: started=%u finished=%u
904 Requests: completion_pointer=%u ddv_pointer=%u cell_pointer=%u
905 PrintBufs: option_counter=%u cell_counter=%u
906 ddv_stats: reads=%u writes=%u blocks=%u
907 ddv_stats: sectors_read=%u sectors_written=%u
908 CURRENT=%p next_request=%p errors=%u
910 in_request,req_queued,
911 RTable->start_mtable,RTable->end_mtable,
912 ddv_stats.rq_started,ddv_stats.rq_finished,
913 completion_pointer,RTable->ddv_pointer,RTable->cell_pointer,
914 PrintBufs->option_counter,PrintBufs->cell_counter,
915 ddv_stats.reads,ddv_stats.writes,ddv_stats.blocks,
916 ddv_stats.sectors_read,ddv_stats.sectors_written,
917 CURRENT,next_request,
918 ddv_stats.errors);
922 int ddv_init(void)
924 int cid;
926 cid = mpp_cid();
928 if (register_blkdev(MAJOR_NR,DEVICE_NAME,&ddv_fops)) {
929 printk("ap: unable to get major %d for ap block dev\n",
930 MAJOR_NR);
931 return -1;
934 printk("ddv_init: register dev %d\n", MAJOR_NR);
935 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
936 read_ahead[MAJOR_NR] = DDV_READ_AHEAD;
938 bif_add_debug_key('d',ddv_status,"DDV status");
939 ddv_gendisk.next = gendisk_head;
940 gendisk_head = &ddv_gendisk;
942 num_options = mpp_num_cells();
943 this_option = mpp_cid();
945 kernel_thread(ddv_daemon, NULL, 0);
947 return(0);
951 static void ddv_geninit(struct gendisk *ignored)
953 int i;
954 static int done = 0;
956 if (done)
957 printk("ddv_geninit already done!\n");
959 done = 1;
961 printk("ddv_geninit\n");
963 /* request interrupt line 2 */
964 if (request_irq(APOPT0_IRQ,ddv_interrupt,SA_INTERRUPT,"apddv",NULL)) {
965 printk("Failed to install ddv interrupt handler\n");
968 for (i=0;i<NUM_DDVDEVS;i++) {
969 ddv_blocksizes[i] = BLOCK_SIZE;
970 ddv_sect_length[i] = 0;
971 ddv_blk_length[i] = 0;
974 ddv_geometry.heads = 32;
975 ddv_geometry.sectors = 32;
976 ddv_geometry.cylinders = 1;
977 ddv_geometry.start = 0;
979 blksize_size[MAJOR_NR] = ddv_blocksizes;
981 ddv_load_opiu();
985 /* loadable module support */
987 #ifdef MODULE
989 int init_module(void)
991 int error = ddv_init();
992 if (!error) {
993 ddv_geninit(&(struct gendisk) { 0,0,0,0,0,0,0,0,0,0,0 });
994 printk(KERN_INFO "DDV: Loaded as module.\n");
996 return error;
999 /* Before freeing the module, invalidate all of the protected buffers! */
1000 void cleanup_module(void)
1002 int i;
1003 struct gendisk ** gdp;
1005 for (i = 0 ; i < NUM_DDVDEVS; i++)
1006 invalidate_buffers(MKDEV(MAJOR_NR, i));
1008 /* reset the opiu */
1009 OPT_IO(OPIU_OP) = OPIU_RESET;
1010 OPT_IO(PRST) = PRST_IRST;
1012 unregister_blkdev( MAJOR_NR, DEVICE_NAME );
1013 for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
1014 if (*gdp == &ddv_gendisk)
1015 break;
1016 if (*gdp)
1017 *gdp = (*gdp)->next;
1018 free_irq(APOPT0_IRQ, NULL);
1019 blk_dev[MAJOR_NR].request_fn = 0;
1022 #endif /* MODULE */