dmaengine: imx-sdma: Let the core do the device node validation
[linux/fpc-iii.git] / drivers / scsi / megaraid / megaraid_mm.c
blob3ce837e4b24c330c650ee5a0642f133a66558119
1 /*
3 * Linux MegaRAID device driver
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.7 (Jul 16 2006)
15 * Common management module
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/mutex.h>
20 #include "megaraid_mm.h"
23 // Entry points for char node driver
24 static DEFINE_MUTEX(mraid_mm_mutex);
25 static int mraid_mm_open(struct inode *, struct file *);
26 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
29 // routines to convert to and from the old the format
30 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
31 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
34 // Helper functions
35 static int handle_drvrcmd(void __user *, uint8_t, int *);
36 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
37 static void ioctl_done(uioc_t *);
38 static void lld_timedout(struct timer_list *);
39 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
40 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
41 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
42 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
43 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
44 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
45 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
46 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
48 #ifdef CONFIG_COMPAT
49 static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
50 #endif
52 MODULE_AUTHOR("LSI Logic Corporation");
53 MODULE_DESCRIPTION("LSI Logic Management Module");
54 MODULE_LICENSE("GPL");
55 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
57 static int dbglevel = CL_ANN;
58 module_param_named(dlevel, dbglevel, int, 0);
59 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
61 EXPORT_SYMBOL(mraid_mm_register_adp);
62 EXPORT_SYMBOL(mraid_mm_unregister_adp);
63 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
65 static uint32_t drvr_ver = 0x02200207;
67 static int adapters_count_g;
68 static struct list_head adapters_list_g;
70 static wait_queue_head_t wait_q;
72 static const struct file_operations lsi_fops = {
73 .open = mraid_mm_open,
74 .unlocked_ioctl = mraid_mm_unlocked_ioctl,
75 #ifdef CONFIG_COMPAT
76 .compat_ioctl = mraid_mm_compat_ioctl,
77 #endif
78 .owner = THIS_MODULE,
79 .llseek = noop_llseek,
82 static struct miscdevice megaraid_mm_dev = {
83 .minor = MISC_DYNAMIC_MINOR,
84 .name = "megadev0",
85 .fops = &lsi_fops,
88 /**
89 * mraid_mm_open - open routine for char node interface
90 * @inode : unused
91 * @filep : unused
93 * Allow ioctl operations by apps only if they have superuser privilege.
95 static int
96 mraid_mm_open(struct inode *inode, struct file *filep)
99 * Only allow superuser to access private ioctl interface
101 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
103 return 0;
107 * mraid_mm_ioctl - module entry-point for ioctls
108 * @inode : inode (ignored)
109 * @filep : file operations pointer (ignored)
110 * @cmd : ioctl command
111 * @arg : user ioctl packet
113 static int
114 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
116 uioc_t *kioc;
117 char signature[EXT_IOCTL_SIGN_SZ] = {0};
118 int rval;
119 mraid_mmadp_t *adp;
120 uint8_t old_ioctl;
121 int drvrcmd_rval;
122 void __user *argp = (void __user *)arg;
125 * Make sure only USCSICMD are issued through this interface.
126 * MIMD application would still fire different command.
129 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
130 return (-EINVAL);
134 * Look for signature to see if this is the new or old ioctl format.
136 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
137 con_log(CL_ANN, (KERN_WARNING
138 "megaraid cmm: copy from usr addr failed\n"));
139 return (-EFAULT);
142 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
143 old_ioctl = 0;
144 else
145 old_ioctl = 1;
148 * At present, we don't support the new ioctl packet
150 if (!old_ioctl )
151 return (-EINVAL);
154 * If it is a driver ioctl (as opposed to fw ioctls), then we can
155 * handle the command locally. rval > 0 means it is not a drvr cmd
157 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
159 if (rval < 0)
160 return rval;
161 else if (rval == 0)
162 return drvrcmd_rval;
164 rval = 0;
165 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
166 return rval;
170 * Check if adapter can accept ioctl. We may have marked it offline
171 * if any previous kioc had timedout on this controller.
173 if (!adp->quiescent) {
174 con_log(CL_ANN, (KERN_WARNING
175 "megaraid cmm: controller cannot accept cmds due to "
176 "earlier errors\n" ));
177 return -EFAULT;
181 * The following call will block till a kioc is available
182 * or return NULL if the list head is empty for the pointer
183 * of type mraid_mmapt passed to mraid_mm_alloc_kioc
185 kioc = mraid_mm_alloc_kioc(adp);
186 if (!kioc)
187 return -ENXIO;
190 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
192 if ((rval = mimd_to_kioc(argp, adp, kioc))) {
193 mraid_mm_dealloc_kioc(adp, kioc);
194 return rval;
197 kioc->done = ioctl_done;
200 * Issue the IOCTL to the low level driver. After the IOCTL completes
201 * release the kioc if and only if it was _not_ timedout. If it was
202 * timedout, that means that resources are still with low level driver.
204 if ((rval = lld_ioctl(adp, kioc))) {
206 if (!kioc->timedout)
207 mraid_mm_dealloc_kioc(adp, kioc);
209 return rval;
213 * Convert the kioc back to user space
215 rval = kioc_to_mimd(kioc, argp);
218 * Return the kioc to free pool
220 mraid_mm_dealloc_kioc(adp, kioc);
222 return rval;
225 static long
226 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
227 unsigned long arg)
229 int err;
231 /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
232 mutex_lock(&mraid_mm_mutex);
233 err = mraid_mm_ioctl(filep, cmd, arg);
234 mutex_unlock(&mraid_mm_mutex);
236 return err;
240 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
241 * @umimd : User space mimd_t ioctl packet
242 * @rval : returned success/error status
244 * The function return value is a pointer to the located @adapter.
246 static mraid_mmadp_t *
247 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
249 mraid_mmadp_t *adapter;
250 mimd_t mimd;
251 uint32_t adapno;
252 int iterator;
255 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
256 *rval = -EFAULT;
257 return NULL;
260 adapno = GETADAP(mimd.ui.fcs.adapno);
262 if (adapno >= adapters_count_g) {
263 *rval = -ENODEV;
264 return NULL;
267 adapter = NULL;
268 iterator = 0;
270 list_for_each_entry(adapter, &adapters_list_g, list) {
271 if (iterator++ == adapno) break;
274 if (!adapter) {
275 *rval = -ENODEV;
276 return NULL;
279 return adapter;
283 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
284 * @arg : packet sent by the user app
285 * @old_ioctl : mimd if 1; uioc otherwise
286 * @rval : pointer for command's returned value (not function status)
288 static int
289 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
291 mimd_t __user *umimd;
292 mimd_t kmimd;
293 uint8_t opcode;
294 uint8_t subopcode;
296 if (old_ioctl)
297 goto old_packet;
298 else
299 goto new_packet;
301 new_packet:
302 return (-ENOTSUPP);
304 old_packet:
305 *rval = 0;
306 umimd = arg;
308 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
309 return (-EFAULT);
311 opcode = kmimd.ui.fcs.opcode;
312 subopcode = kmimd.ui.fcs.subopcode;
315 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
316 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
317 * indicate that we cannot handle this.
319 if (opcode != 0x82)
320 return 1;
322 switch (subopcode) {
324 case MEGAIOC_QDRVRVER:
326 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
327 return (-EFAULT);
329 return 0;
331 case MEGAIOC_QNADAP:
333 *rval = adapters_count_g;
335 if (copy_to_user(kmimd.data, &adapters_count_g,
336 sizeof(uint32_t)))
337 return (-EFAULT);
339 return 0;
341 default:
342 /* cannot handle */
343 return 1;
346 return 0;
351 * mimd_to_kioc - Converter from old to new ioctl format
352 * @umimd : user space old MIMD IOCTL
353 * @adp : adapter softstate
354 * @kioc : kernel space new format IOCTL
356 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
357 * new packet is in kernel space so that driver can perform operations on it
358 * freely.
361 static int
362 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
364 mbox64_t *mbox64;
365 mbox_t *mbox;
366 mraid_passthru_t *pthru32;
367 uint32_t adapno;
368 uint8_t opcode;
369 uint8_t subopcode;
370 mimd_t mimd;
372 if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
373 return (-EFAULT);
376 * Applications are not allowed to send extd pthru
378 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
379 (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
380 return (-EINVAL);
382 opcode = mimd.ui.fcs.opcode;
383 subopcode = mimd.ui.fcs.subopcode;
384 adapno = GETADAP(mimd.ui.fcs.adapno);
386 if (adapno >= adapters_count_g)
387 return (-ENODEV);
389 kioc->adapno = adapno;
390 kioc->mb_type = MBOX_LEGACY;
391 kioc->app_type = APPTYPE_MIMD;
393 switch (opcode) {
395 case 0x82:
397 if (subopcode == MEGAIOC_QADAPINFO) {
399 kioc->opcode = GET_ADAP_INFO;
400 kioc->data_dir = UIOC_RD;
401 kioc->xferlen = sizeof(mraid_hba_info_t);
403 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
404 return (-ENOMEM);
406 else {
407 con_log(CL_ANN, (KERN_WARNING
408 "megaraid cmm: Invalid subop\n"));
409 return (-EINVAL);
412 break;
414 case 0x81:
416 kioc->opcode = MBOX_CMD;
417 kioc->xferlen = mimd.ui.fcs.length;
418 kioc->user_data_len = kioc->xferlen;
419 kioc->user_data = mimd.ui.fcs.buffer;
421 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
422 return (-ENOMEM);
424 if (mimd.outlen) kioc->data_dir = UIOC_RD;
425 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
427 break;
429 case 0x80:
431 kioc->opcode = MBOX_CMD;
432 kioc->xferlen = (mimd.outlen > mimd.inlen) ?
433 mimd.outlen : mimd.inlen;
434 kioc->user_data_len = kioc->xferlen;
435 kioc->user_data = mimd.data;
437 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
438 return (-ENOMEM);
440 if (mimd.outlen) kioc->data_dir = UIOC_RD;
441 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
443 break;
445 default:
446 return (-EINVAL);
450 * If driver command, nothing else to do
452 if (opcode == 0x82)
453 return 0;
456 * This is a mailbox cmd; copy the mailbox from mimd
458 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
459 mbox = &mbox64->mbox32;
460 memcpy(mbox, mimd.mbox, 14);
462 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
464 mbox->xferaddr = (uint32_t)kioc->buf_paddr;
466 if (kioc->data_dir & UIOC_WR) {
467 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
468 kioc->xferlen)) {
469 return (-EFAULT);
473 return 0;
477 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
478 * Just like in above case, the beginning for memblk is treated as
479 * a mailbox. The passthru will begin at next 1K boundary. And the
480 * data will start 1K after that.
482 pthru32 = kioc->pthru32;
483 kioc->user_pthru = &umimd->pthru;
484 mbox->xferaddr = (uint32_t)kioc->pthru32_h;
486 if (copy_from_user(pthru32, kioc->user_pthru,
487 sizeof(mraid_passthru_t))) {
488 return (-EFAULT);
491 pthru32->dataxferaddr = kioc->buf_paddr;
492 if (kioc->data_dir & UIOC_WR) {
493 if (pthru32->dataxferlen > kioc->xferlen)
494 return -EINVAL;
495 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
496 pthru32->dataxferlen)) {
497 return (-EFAULT);
501 return 0;
505 * mraid_mm_attch_buf - Attach a free dma buffer for required size
506 * @adp : Adapter softstate
507 * @kioc : kioc that the buffer needs to be attached to
508 * @xferlen : required length for buffer
510 * First we search for a pool with smallest buffer that is >= @xferlen. If
511 * that pool has no free buffer, we will try for the next bigger size. If none
512 * is available, we will try to allocate the smallest buffer that is >=
513 * @xferlen and attach it the pool.
515 static int
516 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
518 mm_dmapool_t *pool;
519 int right_pool = -1;
520 unsigned long flags;
521 int i;
523 kioc->pool_index = -1;
524 kioc->buf_vaddr = NULL;
525 kioc->buf_paddr = 0;
526 kioc->free_buf = 0;
529 * We need xferlen amount of memory. See if we can get it from our
530 * dma pools. If we don't get exact size, we will try bigger buffer
533 for (i = 0; i < MAX_DMA_POOLS; i++) {
535 pool = &adp->dma_pool_list[i];
537 if (xferlen > pool->buf_size)
538 continue;
540 if (right_pool == -1)
541 right_pool = i;
543 spin_lock_irqsave(&pool->lock, flags);
545 if (!pool->in_use) {
547 pool->in_use = 1;
548 kioc->pool_index = i;
549 kioc->buf_vaddr = pool->vaddr;
550 kioc->buf_paddr = pool->paddr;
552 spin_unlock_irqrestore(&pool->lock, flags);
553 return 0;
555 else {
556 spin_unlock_irqrestore(&pool->lock, flags);
557 continue;
562 * If xferlen doesn't match any of our pools, return error
564 if (right_pool == -1)
565 return -EINVAL;
568 * We did not get any buffer from the preallocated pool. Let us try
569 * to allocate one new buffer. NOTE: This is a blocking call.
571 pool = &adp->dma_pool_list[right_pool];
573 spin_lock_irqsave(&pool->lock, flags);
575 kioc->pool_index = right_pool;
576 kioc->free_buf = 1;
577 kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC,
578 &kioc->buf_paddr);
579 spin_unlock_irqrestore(&pool->lock, flags);
581 if (!kioc->buf_vaddr)
582 return -ENOMEM;
584 return 0;
588 * mraid_mm_alloc_kioc - Returns a uioc_t from free list
589 * @adp : Adapter softstate for this module
591 * The kioc_semaphore is initialized with number of kioc nodes in the
592 * free kioc pool. If the kioc pool is empty, this function blocks till
593 * a kioc becomes free.
595 static uioc_t *
596 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
598 uioc_t *kioc;
599 struct list_head* head;
600 unsigned long flags;
602 down(&adp->kioc_semaphore);
604 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
606 head = &adp->kioc_pool;
608 if (list_empty(head)) {
609 up(&adp->kioc_semaphore);
610 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
612 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
613 return NULL;
616 kioc = list_entry(head->next, uioc_t, list);
617 list_del_init(&kioc->list);
619 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
621 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
622 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
624 kioc->buf_vaddr = NULL;
625 kioc->buf_paddr = 0;
626 kioc->pool_index =-1;
627 kioc->free_buf = 0;
628 kioc->user_data = NULL;
629 kioc->user_data_len = 0;
630 kioc->user_pthru = NULL;
631 kioc->timedout = 0;
633 return kioc;
637 * mraid_mm_dealloc_kioc - Return kioc to free pool
638 * @adp : Adapter softstate
639 * @kioc : uioc_t node to be returned to free pool
641 static void
642 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
644 mm_dmapool_t *pool;
645 unsigned long flags;
647 if (kioc->pool_index != -1) {
648 pool = &adp->dma_pool_list[kioc->pool_index];
650 /* This routine may be called in non-isr context also */
651 spin_lock_irqsave(&pool->lock, flags);
654 * While attaching the dma buffer, if we didn't get the
655 * required buffer from the pool, we would have allocated
656 * it at the run time and set the free_buf flag. We must
657 * free that buffer. Otherwise, just mark that the buffer is
658 * not in use
660 if (kioc->free_buf == 1)
661 dma_pool_free(pool->handle, kioc->buf_vaddr,
662 kioc->buf_paddr);
663 else
664 pool->in_use = 0;
666 spin_unlock_irqrestore(&pool->lock, flags);
669 /* Return the kioc to the free pool */
670 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
671 list_add(&kioc->list, &adp->kioc_pool);
672 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
674 /* increment the free kioc count */
675 up(&adp->kioc_semaphore);
677 return;
681 * lld_ioctl - Routine to issue ioctl to low level drvr
682 * @adp : The adapter handle
683 * @kioc : The ioctl packet with kernel addresses
685 static int
686 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
688 int rval;
689 struct uioc_timeout timeout = { };
691 kioc->status = -ENODATA;
692 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
694 if (rval) return rval;
697 * Start the timer
699 if (adp->timeout > 0) {
700 timeout.uioc = kioc;
701 timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
703 timeout.timer.expires = jiffies + adp->timeout * HZ;
705 add_timer(&timeout.timer);
709 * Wait till the low level driver completes the ioctl. After this
710 * call, the ioctl either completed successfully or timedout.
712 wait_event(wait_q, (kioc->status != -ENODATA));
713 if (timeout.timer.function) {
714 del_timer_sync(&timeout.timer);
715 destroy_timer_on_stack(&timeout.timer);
719 * If the command had timedout, we mark the controller offline
720 * before returning
722 if (kioc->timedout) {
723 adp->quiescent = 0;
726 return kioc->status;
731 * ioctl_done - callback from the low level driver
732 * @kioc : completed ioctl packet
734 static void
735 ioctl_done(uioc_t *kioc)
737 uint32_t adapno;
738 int iterator;
739 mraid_mmadp_t* adapter;
742 * When the kioc returns from driver, make sure it still doesn't
743 * have ENODATA in status. Otherwise, driver will hang on wait_event
744 * forever
746 if (kioc->status == -ENODATA) {
747 con_log(CL_ANN, (KERN_WARNING
748 "megaraid cmm: lld didn't change status!\n"));
750 kioc->status = -EINVAL;
754 * Check if this kioc was timedout before. If so, nobody is waiting
755 * on this kioc. We don't have to wake up anybody. Instead, we just
756 * have to free the kioc
758 if (kioc->timedout) {
759 iterator = 0;
760 adapter = NULL;
761 adapno = kioc->adapno;
763 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
764 "ioctl that was timedout before\n"));
766 list_for_each_entry(adapter, &adapters_list_g, list) {
767 if (iterator++ == adapno) break;
770 kioc->timedout = 0;
772 if (adapter) {
773 mraid_mm_dealloc_kioc( adapter, kioc );
776 else {
777 wake_up(&wait_q);
783 * lld_timedout - callback from the expired timer
784 * @t : timer that timed out
786 static void
787 lld_timedout(struct timer_list *t)
789 struct uioc_timeout *timeout = from_timer(timeout, t, timer);
790 uioc_t *kioc = timeout->uioc;
792 kioc->status = -ETIME;
793 kioc->timedout = 1;
795 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
797 wake_up(&wait_q);
802 * kioc_to_mimd - Converter from new back to old format
803 * @kioc : Kernel space IOCTL packet (successfully issued)
804 * @mimd : User space MIMD packet
806 static int
807 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
809 mimd_t kmimd;
810 uint8_t opcode;
811 uint8_t subopcode;
813 mbox64_t *mbox64;
814 mraid_passthru_t __user *upthru32;
815 mraid_passthru_t *kpthru32;
816 mcontroller_t cinfo;
817 mraid_hba_info_t *hinfo;
820 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
821 return (-EFAULT);
823 opcode = kmimd.ui.fcs.opcode;
824 subopcode = kmimd.ui.fcs.subopcode;
826 if (opcode == 0x82) {
827 switch (subopcode) {
829 case MEGAIOC_QADAPINFO:
831 hinfo = (mraid_hba_info_t *)(unsigned long)
832 kioc->buf_vaddr;
834 hinfo_to_cinfo(hinfo, &cinfo);
836 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
837 return (-EFAULT);
839 return 0;
841 default:
842 return (-EINVAL);
845 return 0;
848 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
850 if (kioc->user_pthru) {
852 upthru32 = kioc->user_pthru;
853 kpthru32 = kioc->pthru32;
855 if (copy_to_user(&upthru32->scsistatus,
856 &kpthru32->scsistatus,
857 sizeof(uint8_t))) {
858 return (-EFAULT);
862 if (kioc->user_data) {
863 if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
864 kioc->user_data_len)) {
865 return (-EFAULT);
869 if (copy_to_user(&mimd->mbox[17],
870 &mbox64->mbox32.status, sizeof(uint8_t))) {
871 return (-EFAULT);
874 return 0;
879 * hinfo_to_cinfo - Convert new format hba info into old format
880 * @hinfo : New format, more comprehensive adapter info
881 * @cinfo : Old format adapter info to support mimd_t apps
883 static void
884 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
886 if (!hinfo || !cinfo)
887 return;
889 cinfo->base = hinfo->baseport;
890 cinfo->irq = hinfo->irq;
891 cinfo->numldrv = hinfo->num_ldrv;
892 cinfo->pcibus = hinfo->pci_bus;
893 cinfo->pcidev = hinfo->pci_slot;
894 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
895 cinfo->pciid = hinfo->pci_device_id;
896 cinfo->pcivendor = hinfo->pci_vendor_id;
897 cinfo->pcislot = hinfo->pci_slot;
898 cinfo->uid = hinfo->unique_id;
903 * mraid_mm_register_adp - Registration routine for low level drivers
904 * @lld_adp : Adapter object
907 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
909 mraid_mmadp_t *adapter;
910 mbox64_t *mbox_list;
911 uioc_t *kioc;
912 uint32_t rval;
913 int i;
916 if (lld_adp->drvr_type != DRVRTYPE_MBOX)
917 return (-EINVAL);
919 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
921 if (!adapter)
922 return -ENOMEM;
925 adapter->unique_id = lld_adp->unique_id;
926 adapter->drvr_type = lld_adp->drvr_type;
927 adapter->drvr_data = lld_adp->drvr_data;
928 adapter->pdev = lld_adp->pdev;
929 adapter->issue_uioc = lld_adp->issue_uioc;
930 adapter->timeout = lld_adp->timeout;
931 adapter->max_kioc = lld_adp->max_kioc;
932 adapter->quiescent = 1;
935 * Allocate single blocks of memory for all required kiocs,
936 * mailboxes and passthru structures.
938 adapter->kioc_list = kmalloc_array(lld_adp->max_kioc,
939 sizeof(uioc_t),
940 GFP_KERNEL);
941 adapter->mbox_list = kmalloc_array(lld_adp->max_kioc,
942 sizeof(mbox64_t),
943 GFP_KERNEL);
944 adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
945 &adapter->pdev->dev,
946 sizeof(mraid_passthru_t),
947 16, 0);
949 if (!adapter->kioc_list || !adapter->mbox_list ||
950 !adapter->pthru_dma_pool) {
952 con_log(CL_ANN, (KERN_WARNING
953 "megaraid cmm: out of memory, %s %d\n", __func__,
954 __LINE__));
956 rval = (-ENOMEM);
958 goto memalloc_error;
962 * Slice kioc_list and make a kioc_pool with the individiual kiocs
964 INIT_LIST_HEAD(&adapter->kioc_pool);
965 spin_lock_init(&adapter->kioc_pool_lock);
966 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
968 mbox_list = (mbox64_t *)adapter->mbox_list;
970 for (i = 0; i < lld_adp->max_kioc; i++) {
972 kioc = adapter->kioc_list + i;
973 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
974 kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool,
975 GFP_KERNEL, &kioc->pthru32_h);
977 if (!kioc->pthru32) {
979 con_log(CL_ANN, (KERN_WARNING
980 "megaraid cmm: out of memory, %s %d\n",
981 __func__, __LINE__));
983 rval = (-ENOMEM);
985 goto pthru_dma_pool_error;
988 list_add_tail(&kioc->list, &adapter->kioc_pool);
991 // Setup the dma pools for data buffers
992 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
993 goto dma_pool_error;
996 list_add_tail(&adapter->list, &adapters_list_g);
998 adapters_count_g++;
1000 return 0;
1002 dma_pool_error:
1003 /* Do nothing */
1005 pthru_dma_pool_error:
1007 for (i = 0; i < lld_adp->max_kioc; i++) {
1008 kioc = adapter->kioc_list + i;
1009 if (kioc->pthru32) {
1010 dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
1011 kioc->pthru32_h);
1015 memalloc_error:
1017 kfree(adapter->kioc_list);
1018 kfree(adapter->mbox_list);
1020 dma_pool_destroy(adapter->pthru_dma_pool);
1022 kfree(adapter);
1024 return rval;
1029 * mraid_mm_adapter_app_handle - return the application handle for this adapter
1030 * @unique_id : adapter unique identifier
1032 * For the given driver data, locate the adapter in our global list and
1033 * return the corresponding handle, which is also used by applications to
1034 * uniquely identify an adapter.
1036 * Return adapter handle if found in the list.
1037 * Return 0 if adapter could not be located, should never happen though.
1039 uint32_t
1040 mraid_mm_adapter_app_handle(uint32_t unique_id)
1042 mraid_mmadp_t *adapter;
1043 mraid_mmadp_t *tmp;
1044 int index = 0;
1046 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1048 if (adapter->unique_id == unique_id) {
1050 return MKADAP(index);
1053 index++;
1056 return 0;
1061 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1062 * @adp : Adapter softstate
1064 * We maintain a pool of dma buffers per each adapter. Each pool has one
1065 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1066 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1067 * dont' want to waste too much memory by allocating more buffers per each
1068 * pool.
1070 static int
1071 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1073 mm_dmapool_t *pool;
1074 int bufsize;
1075 int i;
1078 * Create MAX_DMA_POOLS number of pools
1080 bufsize = MRAID_MM_INIT_BUFF_SIZE;
1082 for (i = 0; i < MAX_DMA_POOLS; i++){
1084 pool = &adp->dma_pool_list[i];
1086 pool->buf_size = bufsize;
1087 spin_lock_init(&pool->lock);
1089 pool->handle = dma_pool_create("megaraid mm data buffer",
1090 &adp->pdev->dev, bufsize,
1091 16, 0);
1093 if (!pool->handle) {
1094 goto dma_pool_setup_error;
1097 pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
1098 &pool->paddr);
1100 if (!pool->vaddr)
1101 goto dma_pool_setup_error;
1103 bufsize = bufsize * 2;
1106 return 0;
1108 dma_pool_setup_error:
1110 mraid_mm_teardown_dma_pools(adp);
1111 return (-ENOMEM);
1116 * mraid_mm_unregister_adp - Unregister routine for low level drivers
1117 * @unique_id : UID of the adpater
1119 * Assumes no outstanding ioctls to llds.
1122 mraid_mm_unregister_adp(uint32_t unique_id)
1124 mraid_mmadp_t *adapter;
1125 mraid_mmadp_t *tmp;
1127 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1130 if (adapter->unique_id == unique_id) {
1132 adapters_count_g--;
1134 list_del_init(&adapter->list);
1136 mraid_mm_free_adp_resources(adapter);
1138 kfree(adapter);
1140 con_log(CL_ANN, (
1141 "megaraid cmm: Unregistered one adapter:%#x\n",
1142 unique_id));
1144 return 0;
1148 return (-ENODEV);
1152 * mraid_mm_free_adp_resources - Free adapter softstate
1153 * @adp : Adapter softstate
1155 static void
1156 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1158 uioc_t *kioc;
1159 int i;
1161 mraid_mm_teardown_dma_pools(adp);
1163 for (i = 0; i < adp->max_kioc; i++) {
1165 kioc = adp->kioc_list + i;
1167 dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1168 kioc->pthru32_h);
1171 kfree(adp->kioc_list);
1172 kfree(adp->mbox_list);
1174 dma_pool_destroy(adp->pthru_dma_pool);
1177 return;
1182 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1183 * @adp : Adapter softstate
1185 static void
1186 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1188 int i;
1189 mm_dmapool_t *pool;
1191 for (i = 0; i < MAX_DMA_POOLS; i++) {
1193 pool = &adp->dma_pool_list[i];
1195 if (pool->handle) {
1197 if (pool->vaddr)
1198 dma_pool_free(pool->handle, pool->vaddr,
1199 pool->paddr);
1201 dma_pool_destroy(pool->handle);
1202 pool->handle = NULL;
1206 return;
1210 * mraid_mm_init - Module entry point
1212 static int __init
1213 mraid_mm_init(void)
1215 int err;
1217 // Announce the driver version
1218 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1219 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1221 err = misc_register(&megaraid_mm_dev);
1222 if (err < 0) {
1223 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1224 return err;
1227 init_waitqueue_head(&wait_q);
1229 INIT_LIST_HEAD(&adapters_list_g);
1231 return 0;
1235 #ifdef CONFIG_COMPAT
1237 * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
1238 * @filep : file operations pointer (ignored)
1239 * @cmd : ioctl command
1240 * @arg : user ioctl packet
1242 static long
1243 mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1244 unsigned long arg)
1246 int err;
1248 err = mraid_mm_ioctl(filep, cmd, arg);
1250 return err;
1252 #endif
1255 * mraid_mm_exit - Module exit point
1257 static void __exit
1258 mraid_mm_exit(void)
1260 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1262 misc_deregister(&megaraid_mm_dev);
1265 module_init(mraid_mm_init);
1266 module_exit(mraid_mm_exit);
1268 /* vi: set ts=8 sw=8 tw=78: */