i2c-eg20t: change timeout value 50msec to 1000msec
[zen-stable.git] / drivers / staging / sep / sep_driver.c
blob6b3d156d41407327137fd1a9977b16693b2a8172
1 /*
3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * CONTACTS:
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
26 * CHANGES:
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/miscdevice.h>
35 #include <linux/fs.h>
36 #include <linux/cdev.h>
37 #include <linux/kdev_t.h>
38 #include <linux/mutex.h>
39 #include <linux/sched.h>
40 #include <linux/mm.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
43 #include <linux/pci.h>
44 #include <linux/firmware.h>
45 #include <linux/slab.h>
46 #include <linux/ioctl.h>
47 #include <asm/current.h>
48 #include <linux/ioport.h>
49 #include <linux/io.h>
50 #include <linux/interrupt.h>
51 #include <linux/pagemap.h>
52 #include <asm/cacheflush.h>
53 #include <linux/delay.h>
54 #include <linux/jiffies.h>
55 #include <linux/rar_register.h>
57 #include "sep_driver_hw_defs.h"
58 #include "sep_driver_config.h"
59 #include "sep_driver_api.h"
60 #include "sep_dev.h"
62 /*----------------------------------------
63 DEFINES
64 -----------------------------------------*/
66 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
68 /*--------------------------------------------
69 GLOBAL variables
70 --------------------------------------------*/
72 /* Keep this a single static object for now to keep the conversion easy */
74 static struct sep_device *sep_dev;
76 /**
77 * sep_dump_message - dump the message that is pending
78 * @sep: SEP device
80 static void sep_dump_message(struct sep_device *sep)
82 int count;
83 u32 *p = sep->shared_addr;
84 for (count = 0; count < 12 * 4; count += 4)
85 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
86 count, *p++);
89 /**
90 * sep_map_and_alloc_shared_area - allocate shared block
91 * @sep: security processor
92 * @size: size of shared area
94 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
96 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
97 sep->shared_size,
98 &sep->shared_bus, GFP_KERNEL);
100 if (!sep->shared_addr) {
101 dev_warn(&sep->pdev->dev,
102 "shared memory dma_alloc_coherent failed\n");
103 return -ENOMEM;
105 dev_dbg(&sep->pdev->dev,
106 "shared_addr %zx bytes @%p (bus %llx)\n",
107 sep->shared_size, sep->shared_addr,
108 (unsigned long long)sep->shared_bus);
109 return 0;
113 * sep_unmap_and_free_shared_area - free shared block
114 * @sep: security processor
116 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
118 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
119 sep->shared_addr, sep->shared_bus);
123 * sep_shared_bus_to_virt - convert bus/virt addresses
124 * @sep: pointer to struct sep_device
125 * @bus_address: address to convert
127 * Returns virtual address inside the shared area according
128 * to the bus address.
130 static void *sep_shared_bus_to_virt(struct sep_device *sep,
131 dma_addr_t bus_address)
133 return sep->shared_addr + (bus_address - sep->shared_bus);
137 * open function for the singleton driver
138 * @inode_ptr struct inode *
139 * @file_ptr struct file *
141 * Called when the user opens the singleton device interface
143 static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
145 struct sep_device *sep;
148 * Get the SEP device structure and use it for the
149 * private_data field in filp for other methods
151 sep = sep_dev;
153 file_ptr->private_data = sep;
155 if (test_and_set_bit(0, &sep->singleton_access_flag))
156 return -EBUSY;
157 return 0;
161 * sep_open - device open method
162 * @inode: inode of SEP device
163 * @filp: file handle to SEP device
165 * Open method for the SEP device. Called when userspace opens
166 * the SEP device node.
168 * Returns zero on success otherwise an error code.
170 static int sep_open(struct inode *inode, struct file *filp)
172 struct sep_device *sep;
175 * Get the SEP device structure and use it for the
176 * private_data field in filp for other methods
178 sep = sep_dev;
179 filp->private_data = sep;
181 /* Anyone can open; locking takes place at transaction level */
182 return 0;
186 * sep_singleton_release - close a SEP singleton device
187 * @inode: inode of SEP device
188 * @filp: file handle being closed
190 * Called on the final close of a SEP device. As the open protects against
191 * multiple simultaenous opens that means this method is called when the
192 * final reference to the open handle is dropped.
194 static int sep_singleton_release(struct inode *inode, struct file *filp)
196 struct sep_device *sep = filp->private_data;
198 clear_bit(0, &sep->singleton_access_flag);
199 return 0;
203 * sep_request_daemon_open - request daemon open method
204 * @inode: inode of SEP device
205 * @filp: file handle to SEP device
207 * Open method for the SEP request daemon. Called when
208 * request daemon in userspace opens the SEP device node.
210 * Returns zero on success otherwise an error code.
212 static int sep_request_daemon_open(struct inode *inode, struct file *filp)
214 struct sep_device *sep = sep_dev;
215 int error = 0;
217 filp->private_data = sep;
219 /* There is supposed to be only one request daemon */
220 if (test_and_set_bit(0, &sep->request_daemon_open))
221 error = -EBUSY;
222 return error;
226 * sep_request_daemon_release - close a SEP daemon
227 * @inode: inode of SEP device
228 * @filp: file handle being closed
230 * Called on the final close of a SEP daemon.
232 static int sep_request_daemon_release(struct inode *inode, struct file *filp)
234 struct sep_device *sep = filp->private_data;
236 dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n",
237 current->pid);
239 /* Clear the request_daemon_open flag */
240 clear_bit(0, &sep->request_daemon_open);
241 return 0;
245 * sep_req_daemon_send_reply_command_handler - poke the SEP
246 * @sep: struct sep_device *
248 * This function raises interrupt to SEPm that signals that is has a
249 * new command from HOST
251 static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
253 unsigned long lck_flags;
255 sep_dump_message(sep);
257 /* Counters are lockable region */
258 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
259 sep->send_ct++;
260 sep->reply_ct++;
262 /* Send the interrupt to SEP */
263 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
264 sep->send_ct++;
266 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
268 dev_dbg(&sep->pdev->dev,
269 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
270 sep->send_ct, sep->reply_ct);
272 return 0;
277 * sep_free_dma_table_data_handler - free DMA table
278 * @sep: pointere to struct sep_device
280 * Handles the request to free DMA table for synchronic actions
282 static int sep_free_dma_table_data_handler(struct sep_device *sep)
284 int count;
285 int dcb_counter;
286 /* Pointer to the current dma_resource struct */
287 struct sep_dma_resource *dma;
289 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
290 dma = &sep->dma_res_arr[dcb_counter];
292 /* Unmap and free input map array */
293 if (dma->in_map_array) {
294 for (count = 0; count < dma->in_num_pages; count++) {
295 dma_unmap_page(&sep->pdev->dev,
296 dma->in_map_array[count].dma_addr,
297 dma->in_map_array[count].size,
298 DMA_TO_DEVICE);
300 kfree(dma->in_map_array);
303 /* Unmap output map array, DON'T free it yet */
304 if (dma->out_map_array) {
305 for (count = 0; count < dma->out_num_pages; count++) {
306 dma_unmap_page(&sep->pdev->dev,
307 dma->out_map_array[count].dma_addr,
308 dma->out_map_array[count].size,
309 DMA_FROM_DEVICE);
311 kfree(dma->out_map_array);
314 /* Free page cache for output */
315 if (dma->in_page_array) {
316 for (count = 0; count < dma->in_num_pages; count++) {
317 flush_dcache_page(dma->in_page_array[count]);
318 page_cache_release(dma->in_page_array[count]);
320 kfree(dma->in_page_array);
323 if (dma->out_page_array) {
324 for (count = 0; count < dma->out_num_pages; count++) {
325 if (!PageReserved(dma->out_page_array[count]))
326 SetPageDirty(dma->out_page_array[count]);
327 flush_dcache_page(dma->out_page_array[count]);
328 page_cache_release(dma->out_page_array[count]);
330 kfree(dma->out_page_array);
333 /* Reset all the values */
334 dma->in_page_array = NULL;
335 dma->out_page_array = NULL;
336 dma->in_num_pages = 0;
337 dma->out_num_pages = 0;
338 dma->in_map_array = NULL;
339 dma->out_map_array = NULL;
340 dma->in_map_num_entries = 0;
341 dma->out_map_num_entries = 0;
344 sep->nr_dcb_creat = 0;
345 sep->num_lli_tables_created = 0;
347 return 0;
351 * sep_request_daemon_mmap - maps the shared area to user space
352 * @filp: pointer to struct file
353 * @vma: pointer to vm_area_struct
355 * Called by the kernel when the daemon attempts an mmap() syscall
356 * using our handle.
358 static int sep_request_daemon_mmap(struct file *filp,
359 struct vm_area_struct *vma)
361 struct sep_device *sep = filp->private_data;
362 dma_addr_t bus_address;
363 int error = 0;
365 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
366 error = -EINVAL;
367 goto end_function;
370 /* Get physical address */
371 bus_address = sep->shared_bus;
373 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
374 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
376 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
377 error = -EAGAIN;
378 goto end_function;
381 end_function:
382 return error;
386 * sep_request_daemon_poll - poll implementation
387 * @sep: struct sep_device * for current SEP device
388 * @filp: struct file * for open file
389 * @wait: poll_table * for poll
391 * Called when our device is part of a poll() or select() syscall
393 static unsigned int sep_request_daemon_poll(struct file *filp,
394 poll_table *wait)
396 u32 mask = 0;
397 /* GPR2 register */
398 u32 retval2;
399 unsigned long lck_flags;
400 struct sep_device *sep = filp->private_data;
402 poll_wait(filp, &sep->event_request_daemon, wait);
404 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
405 sep->send_ct, sep->reply_ct);
407 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
408 /* Check if the data is ready */
409 if (sep->send_ct == sep->reply_ct) {
410 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
412 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
413 dev_dbg(&sep->pdev->dev,
414 "daemon poll: data check (GPR2) is %x\n", retval2);
416 /* Check if PRINT request */
417 if ((retval2 >> 30) & 0x1) {
418 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
419 mask |= POLLIN;
420 goto end_function;
422 /* Check if NVS request */
423 if (retval2 >> 31) {
424 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
425 mask |= POLLPRI | POLLWRNORM;
427 } else {
428 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
429 dev_dbg(&sep->pdev->dev,
430 "daemon poll: no reply received; returning 0\n");
431 mask = 0;
433 end_function:
434 return mask;
438 * sep_release - close a SEP device
439 * @inode: inode of SEP device
440 * @filp: file handle being closed
442 * Called on the final close of a SEP device.
444 static int sep_release(struct inode *inode, struct file *filp)
446 struct sep_device *sep = filp->private_data;
448 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
450 mutex_lock(&sep->sep_mutex);
451 /* Is this the process that has a transaction open?
452 * If so, lets reset pid_doing_transaction to 0 and
453 * clear the in use flags, and then wake up sep_event
454 * so that other processes can do transactions
456 if (sep->pid_doing_transaction == current->pid) {
457 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
458 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
459 sep_free_dma_table_data_handler(sep);
460 wake_up(&sep->event);
461 sep->pid_doing_transaction = 0;
464 mutex_unlock(&sep->sep_mutex);
465 return 0;
469 * sep_mmap - maps the shared area to user space
470 * @filp: pointer to struct file
471 * @vma: pointer to vm_area_struct
473 * Called on an mmap of our space via the normal SEP device
475 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
477 dma_addr_t bus_addr;
478 struct sep_device *sep = filp->private_data;
479 unsigned long error = 0;
481 /* Set the transaction busy (own the device) */
482 wait_event_interruptible(sep->event,
483 test_and_set_bit(SEP_MMAP_LOCK_BIT,
484 &sep->in_use_flags) == 0);
486 if (signal_pending(current)) {
487 error = -EINTR;
488 goto end_function_with_error;
491 * The pid_doing_transaction indicates that this process
492 * now owns the facilities to performa a transaction with
493 * the SEP. While this process is performing a transaction,
494 * no other process who has the SEP device open can perform
495 * any transactions. This method allows more than one process
496 * to have the device open at any given time, which provides
497 * finer granularity for device utilization by multiple
498 * processes.
500 mutex_lock(&sep->sep_mutex);
501 sep->pid_doing_transaction = current->pid;
502 mutex_unlock(&sep->sep_mutex);
504 /* Zero the pools and the number of data pool alocation pointers */
505 sep->data_pool_bytes_allocated = 0;
506 sep->num_of_data_allocations = 0;
509 * Check that the size of the mapped range is as the size of the message
510 * shared area
512 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
513 error = -EINVAL;
514 goto end_function_with_error;
517 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
519 /* Get bus address */
520 bus_addr = sep->shared_bus;
522 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
523 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
524 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
525 error = -EAGAIN;
526 goto end_function_with_error;
528 goto end_function;
530 end_function_with_error:
531 /* Clear the bit */
532 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
533 mutex_lock(&sep->sep_mutex);
534 sep->pid_doing_transaction = 0;
535 mutex_unlock(&sep->sep_mutex);
537 /* Raise event for stuck contextes */
539 wake_up(&sep->event);
541 end_function:
542 return error;
546 * sep_poll - poll handler
547 * @filp: pointer to struct file
548 * @wait: pointer to poll_table
550 * Called by the OS when the kernel is asked to do a poll on
551 * a SEP file handle.
553 static unsigned int sep_poll(struct file *filp, poll_table *wait)
555 u32 mask = 0;
556 u32 retval = 0;
557 u32 retval2 = 0;
558 unsigned long lck_flags;
560 struct sep_device *sep = filp->private_data;
562 /* Am I the process that owns the transaction? */
563 mutex_lock(&sep->sep_mutex);
564 if (current->pid != sep->pid_doing_transaction) {
565 dev_dbg(&sep->pdev->dev, "poll; wrong pid\n");
566 mask = POLLERR;
567 mutex_unlock(&sep->sep_mutex);
568 goto end_function;
570 mutex_unlock(&sep->sep_mutex);
572 /* Check if send command or send_reply were activated previously */
573 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
574 mask = POLLERR;
575 goto end_function;
578 /* Add the event to the polling wait table */
579 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
581 poll_wait(filp, &sep->event, wait);
583 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
584 sep->send_ct, sep->reply_ct);
586 /* Check if error occurred during poll */
587 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
588 if (retval2 != 0x0) {
589 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
590 mask |= POLLERR;
591 goto end_function;
594 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
596 if (sep->send_ct == sep->reply_ct) {
597 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
598 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
599 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
600 retval);
602 /* Check if printf request */
603 if ((retval >> 30) & 0x1) {
604 dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
605 wake_up(&sep->event_request_daemon);
606 goto end_function;
609 /* Check if the this is SEP reply or request */
610 if (retval >> 31) {
611 dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
612 wake_up(&sep->event_request_daemon);
613 } else {
614 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
615 /* In case it is again by send_reply_comand */
616 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
617 sep_dump_message(sep);
618 dev_dbg(&sep->pdev->dev,
619 "poll; SEP reply POLLIN | POLLRDNORM\n");
620 mask |= POLLIN | POLLRDNORM;
622 } else {
623 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
624 dev_dbg(&sep->pdev->dev,
625 "poll; no reply received; returning mask of 0\n");
626 mask = 0;
629 end_function:
630 return mask;
634 * sep_time_address - address in SEP memory of time
635 * @sep: SEP device we want the address from
637 * Return the address of the two dwords in memory used for time
638 * setting.
640 static u32 *sep_time_address(struct sep_device *sep)
642 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
646 * sep_set_time - set the SEP time
647 * @sep: the SEP we are setting the time for
649 * Calculates time and sets it at the predefined address.
650 * Called with the SEP mutex held.
652 static unsigned long sep_set_time(struct sep_device *sep)
654 struct timeval time;
655 u32 *time_addr; /* Address of time as seen by the kernel */
658 do_gettimeofday(&time);
660 /* Set value in the SYSTEM MEMORY offset */
661 time_addr = sep_time_address(sep);
663 time_addr[0] = SEP_TIME_VAL_TOKEN;
664 time_addr[1] = time.tv_sec;
666 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
667 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
668 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
670 return time.tv_sec;
674 * sep_set_caller_id_handler - insert caller id entry
675 * @sep: SEP device
676 * @arg: pointer to struct caller_id_struct
678 * Inserts the data into the caller id table. Note that this function
679 * falls under the ioctl lock
681 static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
683 void __user *hash;
684 int error = 0;
685 int i;
686 struct caller_id_struct command_args;
688 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
689 if (sep->caller_id_table[i].pid == 0)
690 break;
693 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
694 dev_dbg(&sep->pdev->dev, "no more caller id entries left\n");
695 dev_dbg(&sep->pdev->dev, "maximum number is %d\n",
696 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
697 error = -EUSERS;
698 goto end_function;
701 /* Copy the data */
702 if (copy_from_user(&command_args, (void __user *)arg,
703 sizeof(command_args))) {
704 error = -EFAULT;
705 goto end_function;
708 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
710 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
711 error = -EINVAL;
712 goto end_function;
715 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
716 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
717 command_args.callerIdSizeInBytes);
719 if (command_args.callerIdSizeInBytes >
720 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
721 error = -EMSGSIZE;
722 goto end_function;
725 sep->caller_id_table[i].pid = command_args.pid;
727 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
728 hash, command_args.callerIdSizeInBytes))
729 error = -EFAULT;
730 end_function:
731 return error;
735 * sep_set_current_caller_id - set the caller id
736 * @sep: pointer to struct_sep_device
738 * Set the caller ID (if it exists) to the SEP. Note that this
739 * function falls under the ioctl lock
741 static int sep_set_current_caller_id(struct sep_device *sep)
743 int i;
744 u32 *hash_buf_ptr;
746 /* Zero the previous value */
747 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
748 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
750 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
751 if (sep->caller_id_table[i].pid == current->pid) {
752 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
754 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
755 (void *)(sep->caller_id_table[i].callerIdHash),
756 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
757 break;
760 /* Ensure data is in little endian */
761 hash_buf_ptr = (u32 *)sep->shared_addr +
762 SEP_CALLER_ID_OFFSET_BYTES;
764 for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
765 hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
767 return 0;
771 * sep_send_command_handler - kick off a command
772 * @sep: SEP being signalled
774 * This function raises interrupt to SEP that signals that is has a new
775 * command from the host
777 * Note that this function does fall under the ioctl lock
779 static int sep_send_command_handler(struct sep_device *sep)
781 unsigned long lck_flags;
782 int error = 0;
784 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
785 error = -EPROTO;
786 goto end_function;
788 sep_set_time(sep);
790 sep_set_current_caller_id(sep);
792 sep_dump_message(sep);
794 /* Update counter */
795 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
796 sep->send_ct++;
797 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
799 dev_dbg(&sep->pdev->dev,
800 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
801 sep->send_ct, sep->reply_ct);
803 /* Send interrupt to SEP */
804 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
806 end_function:
807 return error;
811 * sep_allocate_data_pool_memory_handler -allocate pool memory
812 * @sep: pointer to struct sep_device
813 * @arg: pointer to struct alloc_struct
815 * This function handles the allocate data pool memory request
816 * This function returns calculates the bus address of the
817 * allocated memory, and the offset of this area from the mapped address.
818 * Therefore, the FVOs in user space can calculate the exact virtual
819 * address of this allocated memory
821 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
822 unsigned long arg)
824 int error = 0;
825 struct alloc_struct command_args;
827 /* Holds the allocated buffer address in the system memory pool */
828 u32 *token_addr;
830 if (copy_from_user(&command_args, (void __user *)arg,
831 sizeof(struct alloc_struct))) {
832 error = -EFAULT;
833 goto end_function;
836 /* Allocate memory */
837 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
838 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
839 error = -ENOMEM;
840 goto end_function;
843 dev_dbg(&sep->pdev->dev,
844 "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
845 dev_dbg(&sep->pdev->dev,
846 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
847 /* Set the virtual and bus address */
848 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
849 sep->data_pool_bytes_allocated;
851 /* Place in the shared area that is known by the SEP */
852 token_addr = (u32 *)(sep->shared_addr +
853 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
854 (sep->num_of_data_allocations)*2*sizeof(u32));
856 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
857 token_addr[1] = (u32)sep->shared_bus +
858 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
859 sep->data_pool_bytes_allocated;
861 /* Write the memory back to the user space */
862 error = copy_to_user((void *)arg, (void *)&command_args,
863 sizeof(struct alloc_struct));
864 if (error) {
865 error = -EFAULT;
866 goto end_function;
869 /* Update the allocation */
870 sep->data_pool_bytes_allocated += command_args.num_bytes;
871 sep->num_of_data_allocations += 1;
873 end_function:
874 return error;
878 * sep_lock_kernel_pages - map kernel pages for DMA
879 * @sep: pointer to struct sep_device
880 * @kernel_virt_addr: address of data buffer in kernel
881 * @data_size: size of data
882 * @lli_array_ptr: lli array
883 * @in_out_flag: input into device or output from device
885 * This function locks all the physical pages of the kernel virtual buffer
886 * and construct a basic lli array, where each entry holds the physical
887 * page address and the size that application data holds in this page
888 * This function is used only during kernel crypto mod calls from within
889 * the kernel (when ioctl is not used)
891 static int sep_lock_kernel_pages(struct sep_device *sep,
892 unsigned long kernel_virt_addr,
893 u32 data_size,
894 struct sep_lli_entry **lli_array_ptr,
895 int in_out_flag)
898 int error = 0;
899 /* Array of lli */
900 struct sep_lli_entry *lli_array;
901 /* Map array */
902 struct sep_dma_map *map_array;
904 dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n",
905 (unsigned long)kernel_virt_addr);
906 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
908 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
909 if (!lli_array) {
910 error = -ENOMEM;
911 goto end_function;
913 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
914 if (!map_array) {
915 error = -ENOMEM;
916 goto end_function_with_error;
919 map_array[0].dma_addr =
920 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
921 data_size, DMA_BIDIRECTIONAL);
922 map_array[0].size = data_size;
926 * Set the start address of the first page - app data may start not at
927 * the beginning of the page
929 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
930 lli_array[0].block_size = map_array[0].size;
932 dev_dbg(&sep->pdev->dev,
933 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
934 (unsigned long)lli_array[0].bus_address,
935 lli_array[0].block_size);
937 /* Set the output parameters */
938 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
939 *lli_array_ptr = lli_array;
940 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
941 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
942 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
943 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
944 } else {
945 *lli_array_ptr = lli_array;
946 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
947 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
948 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
949 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
951 goto end_function;
953 end_function_with_error:
954 kfree(lli_array);
956 end_function:
957 return error;
961 * sep_lock_user_pages - lock and map user pages for DMA
962 * @sep: pointer to struct sep_device
963 * @app_virt_addr: user memory data buffer
964 * @data_size: size of data buffer
965 * @lli_array_ptr: lli array
966 * @in_out_flag: input or output to device
968 * This function locks all the physical pages of the application
969 * virtual buffer and construct a basic lli array, where each entry
970 * holds the physical page address and the size that application
971 * data holds in this physical pages
973 static int sep_lock_user_pages(struct sep_device *sep,
974 u32 app_virt_addr,
975 u32 data_size,
976 struct sep_lli_entry **lli_array_ptr,
977 int in_out_flag)
980 int error = 0;
981 u32 count;
982 int result;
983 /* The the page of the end address of the user space buffer */
984 u32 end_page;
985 /* The page of the start address of the user space buffer */
986 u32 start_page;
987 /* The range in pages */
988 u32 num_pages;
989 /* Array of pointers to page */
990 struct page **page_array;
991 /* Array of lli */
992 struct sep_lli_entry *lli_array;
993 /* Map array */
994 struct sep_dma_map *map_array;
995 /* Direction of the DMA mapping for locked pages */
996 enum dma_data_direction dir;
998 /* Set start and end pages and num pages */
999 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1000 start_page = app_virt_addr >> PAGE_SHIFT;
1001 num_pages = end_page - start_page + 1;
1003 dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr);
1004 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1005 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1006 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1007 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
1009 /* Allocate array of pages structure pointers */
1010 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1011 if (!page_array) {
1012 error = -ENOMEM;
1013 goto end_function;
1015 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1016 if (!map_array) {
1017 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
1018 error = -ENOMEM;
1019 goto end_function_with_error1;
1022 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1023 GFP_ATOMIC);
1025 if (!lli_array) {
1026 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
1027 error = -ENOMEM;
1028 goto end_function_with_error2;
1031 /* Convert the application virtual address into a set of physical */
1032 down_read(&current->mm->mmap_sem);
1033 result = get_user_pages(current, current->mm, app_virt_addr,
1034 num_pages,
1035 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1036 0, page_array, NULL);
1038 up_read(&current->mm->mmap_sem);
1040 /* Check the number of pages locked - if not all then exit with error */
1041 if (result != num_pages) {
1042 dev_warn(&sep->pdev->dev,
1043 "not all pages locked by get_user_pages\n");
1044 error = -ENOMEM;
1045 goto end_function_with_error3;
1048 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
1050 /* Set direction */
1051 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1052 dir = DMA_TO_DEVICE;
1053 else
1054 dir = DMA_FROM_DEVICE;
1057 * Fill the array using page array data and
1058 * map the pages - this action will also flush the cache as needed
1060 for (count = 0; count < num_pages; count++) {
1061 /* Fill the map array */
1062 map_array[count].dma_addr =
1063 dma_map_page(&sep->pdev->dev, page_array[count],
1064 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
1066 map_array[count].size = PAGE_SIZE;
1068 /* Fill the lli array entry */
1069 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1070 lli_array[count].block_size = PAGE_SIZE;
1072 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1073 count, (unsigned long)lli_array[count].bus_address,
1074 count, lli_array[count].block_size);
1077 /* Check the offset for the first page */
1078 lli_array[0].bus_address =
1079 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1081 /* Check that not all the data is in the first page only */
1082 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1083 lli_array[0].block_size = data_size;
1084 else
1085 lli_array[0].block_size =
1086 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1088 dev_dbg(&sep->pdev->dev,
1089 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1090 (unsigned long)lli_array[count].bus_address,
1091 lli_array[count].block_size);
1093 /* Check the size of the last page */
1094 if (num_pages > 1) {
1095 lli_array[num_pages - 1].block_size =
1096 (app_virt_addr + data_size) & (~PAGE_MASK);
1097 if (lli_array[num_pages - 1].block_size == 0)
1098 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1100 dev_warn(&sep->pdev->dev,
1101 "lli_array[%x].bus_address is "
1102 "%08lx, lli_array[%x].block_size is %x\n",
1103 num_pages - 1,
1104 (unsigned long)lli_array[num_pages - 1].bus_address,
1105 num_pages - 1,
1106 lli_array[num_pages - 1].block_size);
1109 /* Set output params according to the in_out flag */
1110 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1111 *lli_array_ptr = lli_array;
1112 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1113 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1114 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1115 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
1116 num_pages;
1117 } else {
1118 *lli_array_ptr = lli_array;
1119 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
1120 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
1121 page_array;
1122 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1123 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
1124 num_pages;
1126 goto end_function;
1128 end_function_with_error3:
1129 /* Free lli array */
1130 kfree(lli_array);
1132 end_function_with_error2:
1133 kfree(map_array);
1135 end_function_with_error1:
1136 /* Free page array */
1137 kfree(page_array);
1139 end_function:
1140 return error;
1144 * u32 sep_calculate_lli_table_max_size - size the LLI table
1145 * @sep: pointer to struct sep_device
1146 * @lli_in_array_ptr
1147 * @num_array_entries
1148 * @last_table_flag
1150 * This function calculates the size of data that can be inserted into
1151 * the lli table from this array, such that either the table is full
1152 * (all entries are entered), or there are no more entries in the
1153 * lli array
1155 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1156 struct sep_lli_entry *lli_in_array_ptr,
1157 u32 num_array_entries,
1158 u32 *last_table_flag)
1160 u32 counter;
1161 /* Table data size */
1162 u32 table_data_size = 0;
1163 /* Data size for the next table */
1164 u32 next_table_data_size;
1166 *last_table_flag = 0;
1169 * Calculate the data in the out lli table till we fill the whole
1170 * table or till the data has ended
1172 for (counter = 0;
1173 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1174 (counter < num_array_entries); counter++)
1175 table_data_size += lli_in_array_ptr[counter].block_size;
1178 * Check if we reached the last entry,
1179 * meaning this ia the last table to build,
1180 * and no need to check the block alignment
1182 if (counter == num_array_entries) {
1183 /* Set the last table flag */
1184 *last_table_flag = 1;
1185 goto end_function;
1189 * Calculate the data size of the next table.
1190 * Stop if no entries left or if data size is more the DMA restriction
1192 next_table_data_size = 0;
1193 for (; counter < num_array_entries; counter++) {
1194 next_table_data_size += lli_in_array_ptr[counter].block_size;
1195 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1196 break;
1200 * Check if the next table data size is less then DMA rstriction.
1201 * if it is - recalculate the current table size, so that the next
1202 * table data size will be adaquete for DMA
1204 if (next_table_data_size &&
1205 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1207 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1208 next_table_data_size);
1210 end_function:
1211 return table_data_size;
1215 * sep_build_lli_table - build an lli array for the given table
1216 * @sep: pointer to struct sep_device
1217 * @lli_array_ptr: pointer to lli array
1218 * @lli_table_ptr: pointer to lli table
1219 * @num_processed_entries_ptr: pointer to number of entries
1220 * @num_table_entries_ptr: pointer to number of tables
1221 * @table_data_size: total data size
1223 * Builds ant lli table from the lli_array according to
1224 * the given size of data
1226 static void sep_build_lli_table(struct sep_device *sep,
1227 struct sep_lli_entry *lli_array_ptr,
1228 struct sep_lli_entry *lli_table_ptr,
1229 u32 *num_processed_entries_ptr,
1230 u32 *num_table_entries_ptr,
1231 u32 table_data_size)
1233 /* Current table data size */
1234 u32 curr_table_data_size;
1235 /* Counter of lli array entry */
1236 u32 array_counter;
1238 /* Init current table data size and lli array entry counter */
1239 curr_table_data_size = 0;
1240 array_counter = 0;
1241 *num_table_entries_ptr = 1;
1243 dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size);
1245 /* Fill the table till table size reaches the needed amount */
1246 while (curr_table_data_size < table_data_size) {
1247 /* Update the number of entries in table */
1248 (*num_table_entries_ptr)++;
1250 lli_table_ptr->bus_address =
1251 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1253 lli_table_ptr->block_size =
1254 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1256 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1258 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1259 lli_table_ptr);
1260 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1261 (unsigned long)lli_table_ptr->bus_address);
1262 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1263 lli_table_ptr->block_size);
1265 /* Check for overflow of the table data */
1266 if (curr_table_data_size > table_data_size) {
1267 dev_dbg(&sep->pdev->dev,
1268 "curr_table_data_size too large\n");
1270 /* Update the size of block in the table */
1271 lli_table_ptr->block_size -=
1272 cpu_to_le32((curr_table_data_size - table_data_size));
1274 /* Update the physical address in the lli array */
1275 lli_array_ptr[array_counter].bus_address +=
1276 cpu_to_le32(lli_table_ptr->block_size);
1278 /* Update the block size left in the lli array */
1279 lli_array_ptr[array_counter].block_size =
1280 (curr_table_data_size - table_data_size);
1281 } else
1282 /* Advance to the next entry in the lli_array */
1283 array_counter++;
1285 dev_dbg(&sep->pdev->dev,
1286 "lli_table_ptr->bus_address is %08lx\n",
1287 (unsigned long)lli_table_ptr->bus_address);
1288 dev_dbg(&sep->pdev->dev,
1289 "lli_table_ptr->block_size is %x\n",
1290 lli_table_ptr->block_size);
1292 /* Move to the next entry in table */
1293 lli_table_ptr++;
1296 /* Set the info entry to default */
1297 lli_table_ptr->bus_address = 0xffffffff;
1298 lli_table_ptr->block_size = 0;
1300 /* Set the output parameter */
1301 *num_processed_entries_ptr += array_counter;
1306 * sep_shared_area_virt_to_bus - map shared area to bus address
1307 * @sep: pointer to struct sep_device
1308 * @virt_address: virtual address to convert
1310 * This functions returns the physical address inside shared area according
1311 * to the virtual address. It can be either on the externa RAM device
1312 * (ioremapped), or on the system RAM
1313 * This implementation is for the external RAM
1315 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1316 void *virt_address)
1318 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1319 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1320 (unsigned long)
1321 sep->shared_bus + (virt_address - sep->shared_addr));
1323 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1327 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1328 * @sep: pointer to struct sep_device
1329 * @bus_address: bus address to convert
1331 * This functions returns the virtual address inside shared area
1332 * according to the physical address. It can be either on the
1333 * externa RAM device (ioremapped), or on the system RAM
1334 * This implementation is for the external RAM
1336 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1337 dma_addr_t bus_address)
1339 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
1340 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1341 (size_t)(bus_address - sep->shared_bus)));
1343 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1347 * sep_debug_print_lli_tables - dump LLI table
1348 * @sep: pointer to struct sep_device
1349 * @lli_table_ptr: pointer to sep_lli_entry
1350 * @num_table_entries: number of entries
1351 * @table_data_size: total data size
1353 * Walk the the list of the print created tables and print all the data
1355 static void sep_debug_print_lli_tables(struct sep_device *sep,
1356 struct sep_lli_entry *lli_table_ptr,
1357 unsigned long num_table_entries,
1358 unsigned long table_data_size)
1360 unsigned long table_count = 1;
1361 unsigned long entries_count = 0;
1363 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
1365 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1366 dev_dbg(&sep->pdev->dev,
1367 "lli table %08lx, table_data_size is %lu\n",
1368 table_count, table_data_size);
1369 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1370 num_table_entries);
1372 /* Print entries of the table (without info entry) */
1373 for (entries_count = 0; entries_count < num_table_entries;
1374 entries_count++, lli_table_ptr++) {
1376 dev_dbg(&sep->pdev->dev,
1377 "lli_table_ptr address is %08lx\n",
1378 (unsigned long) lli_table_ptr);
1380 dev_dbg(&sep->pdev->dev,
1381 "phys address is %08lx block size is %x\n",
1382 (unsigned long)lli_table_ptr->bus_address,
1383 lli_table_ptr->block_size);
1385 /* Point to the info entry */
1386 lli_table_ptr--;
1388 dev_dbg(&sep->pdev->dev,
1389 "phys lli_table_ptr->block_size is %x\n",
1390 lli_table_ptr->block_size);
1392 dev_dbg(&sep->pdev->dev,
1393 "phys lli_table_ptr->physical_address is %08lu\n",
1394 (unsigned long)lli_table_ptr->bus_address);
1397 table_data_size = lli_table_ptr->block_size & 0xffffff;
1398 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1400 dev_dbg(&sep->pdev->dev,
1401 "phys table_data_size is %lu num_table_entries is"
1402 " %lu bus_address is%lu\n", table_data_size,
1403 num_table_entries, (unsigned long)lli_table_ptr->bus_address);
1405 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1406 lli_table_ptr = (struct sep_lli_entry *)
1407 sep_shared_bus_to_virt(sep,
1408 (unsigned long)lli_table_ptr->bus_address);
1410 table_count++;
1412 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
1417 * sep_prepare_empty_lli_table - create a blank LLI table
1418 * @sep: pointer to struct sep_device
1419 * @lli_table_addr_ptr: pointer to lli table
1420 * @num_entries_ptr: pointer to number of entries
1421 * @table_data_size_ptr: point to table data size
1423 * This function creates empty lli tables when there is no data
1425 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1426 dma_addr_t *lli_table_addr_ptr,
1427 u32 *num_entries_ptr,
1428 u32 *table_data_size_ptr)
1430 struct sep_lli_entry *lli_table_ptr;
1432 /* Find the area for new table */
1433 lli_table_ptr =
1434 (struct sep_lli_entry *)(sep->shared_addr +
1435 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1436 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1437 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1439 lli_table_ptr->bus_address = 0;
1440 lli_table_ptr->block_size = 0;
1442 lli_table_ptr++;
1443 lli_table_ptr->bus_address = 0xFFFFFFFF;
1444 lli_table_ptr->block_size = 0;
1446 /* Set the output parameter value */
1447 *lli_table_addr_ptr = sep->shared_bus +
1448 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1449 sep->num_lli_tables_created *
1450 sizeof(struct sep_lli_entry) *
1451 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1453 /* Set the num of entries and table data size for empty table */
1454 *num_entries_ptr = 2;
1455 *table_data_size_ptr = 0;
1457 /* Update the number of created tables */
1458 sep->num_lli_tables_created++;
1462 * sep_prepare_input_dma_table - prepare input DMA mappings
1463 * @sep: pointer to struct sep_device
1464 * @data_size:
1465 * @block_size:
1466 * @lli_table_ptr:
1467 * @num_entries_ptr:
1468 * @table_data_size_ptr:
1469 * @is_kva: set for kernel data (kernel cryptio call)
1471 * This function prepares only input DMA table for synhronic symmetric
1472 * operations (HASH)
1473 * Note that all bus addresses that are passed to the SEP
1474 * are in 32 bit format; the SEP is a 32 bit device
1476 static int sep_prepare_input_dma_table(struct sep_device *sep,
1477 unsigned long app_virt_addr,
1478 u32 data_size,
1479 u32 block_size,
1480 dma_addr_t *lli_table_ptr,
1481 u32 *num_entries_ptr,
1482 u32 *table_data_size_ptr,
1483 bool is_kva)
1485 int error = 0;
1486 /* Pointer to the info entry of the table - the last entry */
1487 struct sep_lli_entry *info_entry_ptr;
1488 /* Array of pointers to page */
1489 struct sep_lli_entry *lli_array_ptr;
1490 /* Points to the first entry to be processed in the lli_in_array */
1491 u32 current_entry = 0;
1492 /* Num entries in the virtual buffer */
1493 u32 sep_lli_entries = 0;
1494 /* Lli table pointer */
1495 struct sep_lli_entry *in_lli_table_ptr;
1496 /* The total data in one table */
1497 u32 table_data_size = 0;
1498 /* Flag for last table */
1499 u32 last_table_flag = 0;
1500 /* Number of entries in lli table */
1501 u32 num_entries_in_table = 0;
1502 /* Next table address */
1503 void *lli_table_alloc_addr = 0;
1505 dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size);
1506 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
1508 /* Initialize the pages pointers */
1509 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1510 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1512 /* Set the kernel address for first table to be allocated */
1513 lli_table_alloc_addr = (void *)(sep->shared_addr +
1514 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1515 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1516 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1518 if (data_size == 0) {
1519 /* Special case - create meptu table - 2 entries, zero data */
1520 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1521 num_entries_ptr, table_data_size_ptr);
1522 goto update_dcb_counter;
1525 /* Check if the pages are in Kernel Virtual Address layout */
1526 if (is_kva == true)
1527 /* Lock the pages in the kernel */
1528 error = sep_lock_kernel_pages(sep, app_virt_addr,
1529 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1530 else
1532 * Lock the pages of the user buffer
1533 * and translate them to pages
1535 error = sep_lock_user_pages(sep, app_virt_addr,
1536 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1538 if (error)
1539 goto end_function;
1541 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
1542 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1544 current_entry = 0;
1545 info_entry_ptr = NULL;
1547 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
1549 /* Loop till all the entries in in array are not processed */
1550 while (current_entry < sep_lli_entries) {
1552 /* Set the new input and output tables */
1553 in_lli_table_ptr =
1554 (struct sep_lli_entry *)lli_table_alloc_addr;
1556 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1557 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1559 if (lli_table_alloc_addr >
1560 ((void *)sep->shared_addr +
1561 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1562 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1564 error = -ENOMEM;
1565 goto end_function_error;
1569 /* Update the number of created tables */
1570 sep->num_lli_tables_created++;
1572 /* Calculate the maximum size of data for input table */
1573 table_data_size = sep_calculate_lli_table_max_size(sep,
1574 &lli_array_ptr[current_entry],
1575 (sep_lli_entries - current_entry),
1576 &last_table_flag);
1579 * If this is not the last table -
1580 * then align it to the block size
1582 if (!last_table_flag)
1583 table_data_size =
1584 (table_data_size / block_size) * block_size;
1586 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1587 table_data_size);
1589 /* Construct input lli table */
1590 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1591 in_lli_table_ptr,
1592 &current_entry, &num_entries_in_table, table_data_size);
1594 if (info_entry_ptr == NULL) {
1596 /* Set the output parameters to physical addresses */
1597 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1598 in_lli_table_ptr);
1599 *num_entries_ptr = num_entries_in_table;
1600 *table_data_size_ptr = table_data_size;
1602 dev_dbg(&sep->pdev->dev,
1603 "output lli_table_in_ptr is %08lx\n",
1604 (unsigned long)*lli_table_ptr);
1606 } else {
1607 /* Update the info entry of the previous in table */
1608 info_entry_ptr->bus_address =
1609 sep_shared_area_virt_to_bus(sep,
1610 in_lli_table_ptr);
1611 info_entry_ptr->block_size =
1612 ((num_entries_in_table) << 24) |
1613 (table_data_size);
1615 /* Save the pointer to the info entry of the current tables */
1616 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1618 /* Print input tables */
1619 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1620 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
1621 *num_entries_ptr, *table_data_size_ptr);
1622 /* The array of the pages */
1623 kfree(lli_array_ptr);
1625 update_dcb_counter:
1626 /* Update DCB counter */
1627 sep->nr_dcb_creat++;
1628 goto end_function;
1630 end_function_error:
1631 /* Free all the allocated resources */
1632 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1633 kfree(lli_array_ptr);
1634 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1636 end_function:
1637 return error;
1641 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
1642 * @sep: pointer to struct sep_device
1643 * @lli_in_array:
1644 * @sep_in_lli_entries:
1645 * @lli_out_array:
1646 * @sep_out_lli_entries
1647 * @block_size
1648 * @lli_table_in_ptr
1649 * @lli_table_out_ptr
1650 * @in_num_entries_ptr
1651 * @out_num_entries_ptr
1652 * @table_data_size_ptr
1654 * This function creates the input and output DMA tables for
1655 * symmetric operations (AES/DES) according to the block
1656 * size from LLI arays
1657 * Note that all bus addresses that are passed to the SEP
1658 * are in 32 bit format; the SEP is a 32 bit device
1660 static int sep_construct_dma_tables_from_lli(
1661 struct sep_device *sep,
1662 struct sep_lli_entry *lli_in_array,
1663 u32 sep_in_lli_entries,
1664 struct sep_lli_entry *lli_out_array,
1665 u32 sep_out_lli_entries,
1666 u32 block_size,
1667 dma_addr_t *lli_table_in_ptr,
1668 dma_addr_t *lli_table_out_ptr,
1669 u32 *in_num_entries_ptr,
1670 u32 *out_num_entries_ptr,
1671 u32 *table_data_size_ptr)
1673 /* Points to the area where next lli table can be allocated */
1674 void *lli_table_alloc_addr = 0;
1675 /* Input lli table */
1676 struct sep_lli_entry *in_lli_table_ptr = NULL;
1677 /* Output lli table */
1678 struct sep_lli_entry *out_lli_table_ptr = NULL;
1679 /* Pointer to the info entry of the table - the last entry */
1680 struct sep_lli_entry *info_in_entry_ptr = NULL;
1681 /* Pointer to the info entry of the table - the last entry */
1682 struct sep_lli_entry *info_out_entry_ptr = NULL;
1683 /* Points to the first entry to be processed in the lli_in_array */
1684 u32 current_in_entry = 0;
1685 /* Points to the first entry to be processed in the lli_out_array */
1686 u32 current_out_entry = 0;
1687 /* Max size of the input table */
1688 u32 in_table_data_size = 0;
1689 /* Max size of the output table */
1690 u32 out_table_data_size = 0;
1691 /* Flag te signifies if this is the last tables build */
1692 u32 last_table_flag = 0;
1693 /* The data size that should be in table */
1694 u32 table_data_size = 0;
1695 /* Number of etnries in the input table */
1696 u32 num_entries_in_table = 0;
1697 /* Number of etnries in the output table */
1698 u32 num_entries_out_table = 0;
1700 /* Initiate to point after the message area */
1701 lli_table_alloc_addr = (void *)(sep->shared_addr +
1702 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1703 (sep->num_lli_tables_created *
1704 (sizeof(struct sep_lli_entry) *
1705 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1707 /* Loop till all the entries in in array are not processed */
1708 while (current_in_entry < sep_in_lli_entries) {
1709 /* Set the new input and output tables */
1710 in_lli_table_ptr =
1711 (struct sep_lli_entry *)lli_table_alloc_addr;
1713 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1714 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1716 /* Set the first output tables */
1717 out_lli_table_ptr =
1718 (struct sep_lli_entry *)lli_table_alloc_addr;
1720 /* Check if the DMA table area limit was overrun */
1721 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1722 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
1723 ((void *)sep->shared_addr +
1724 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1725 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1727 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
1728 return -ENOMEM;
1731 /* Update the number of the lli tables created */
1732 sep->num_lli_tables_created += 2;
1734 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1735 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1737 /* Calculate the maximum size of data for input table */
1738 in_table_data_size =
1739 sep_calculate_lli_table_max_size(sep,
1740 &lli_in_array[current_in_entry],
1741 (sep_in_lli_entries - current_in_entry),
1742 &last_table_flag);
1744 /* Calculate the maximum size of data for output table */
1745 out_table_data_size =
1746 sep_calculate_lli_table_max_size(sep,
1747 &lli_out_array[current_out_entry],
1748 (sep_out_lli_entries - current_out_entry),
1749 &last_table_flag);
1751 dev_dbg(&sep->pdev->dev,
1752 "construct tables from lli in_table_data_size is %x\n",
1753 in_table_data_size);
1755 dev_dbg(&sep->pdev->dev,
1756 "construct tables from lli out_table_data_size is %x\n",
1757 out_table_data_size);
1759 table_data_size = in_table_data_size;
1761 if (!last_table_flag) {
1763 * If this is not the last table,
1764 * then must check where the data is smallest
1765 * and then align it to the block size
1767 if (table_data_size > out_table_data_size)
1768 table_data_size = out_table_data_size;
1771 * Now calculate the table size so that
1772 * it will be module block size
1774 table_data_size = (table_data_size / block_size) *
1775 block_size;
1778 /* Construct input lli table */
1779 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
1780 in_lli_table_ptr,
1781 &current_in_entry,
1782 &num_entries_in_table,
1783 table_data_size);
1785 /* Construct output lli table */
1786 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
1787 out_lli_table_ptr,
1788 &current_out_entry,
1789 &num_entries_out_table,
1790 table_data_size);
1792 /* If info entry is null - this is the first table built */
1793 if (info_in_entry_ptr == NULL) {
1794 /* Set the output parameters to physical addresses */
1795 *lli_table_in_ptr =
1796 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1798 *in_num_entries_ptr = num_entries_in_table;
1800 *lli_table_out_ptr =
1801 sep_shared_area_virt_to_bus(sep,
1802 out_lli_table_ptr);
1804 *out_num_entries_ptr = num_entries_out_table;
1805 *table_data_size_ptr = table_data_size;
1807 dev_dbg(&sep->pdev->dev,
1808 "output lli_table_in_ptr is %08lx\n",
1809 (unsigned long)*lli_table_in_ptr);
1810 dev_dbg(&sep->pdev->dev,
1811 "output lli_table_out_ptr is %08lx\n",
1812 (unsigned long)*lli_table_out_ptr);
1813 } else {
1814 /* Update the info entry of the previous in table */
1815 info_in_entry_ptr->bus_address =
1816 sep_shared_area_virt_to_bus(sep,
1817 in_lli_table_ptr);
1819 info_in_entry_ptr->block_size =
1820 ((num_entries_in_table) << 24) |
1821 (table_data_size);
1823 /* Update the info entry of the previous in table */
1824 info_out_entry_ptr->bus_address =
1825 sep_shared_area_virt_to_bus(sep,
1826 out_lli_table_ptr);
1828 info_out_entry_ptr->block_size =
1829 ((num_entries_out_table) << 24) |
1830 (table_data_size);
1832 dev_dbg(&sep->pdev->dev,
1833 "output lli_table_in_ptr:%08lx %08x\n",
1834 (unsigned long)info_in_entry_ptr->bus_address,
1835 info_in_entry_ptr->block_size);
1837 dev_dbg(&sep->pdev->dev,
1838 "output lli_table_out_ptr:%08lx %08x\n",
1839 (unsigned long)info_out_entry_ptr->bus_address,
1840 info_out_entry_ptr->block_size);
1843 /* Save the pointer to the info entry of the current tables */
1844 info_in_entry_ptr = in_lli_table_ptr +
1845 num_entries_in_table - 1;
1846 info_out_entry_ptr = out_lli_table_ptr +
1847 num_entries_out_table - 1;
1849 dev_dbg(&sep->pdev->dev,
1850 "output num_entries_out_table is %x\n",
1851 (u32)num_entries_out_table);
1852 dev_dbg(&sep->pdev->dev,
1853 "output info_in_entry_ptr is %lx\n",
1854 (unsigned long)info_in_entry_ptr);
1855 dev_dbg(&sep->pdev->dev,
1856 "output info_out_entry_ptr is %lx\n",
1857 (unsigned long)info_out_entry_ptr);
1860 /* Print input tables */
1861 sep_debug_print_lli_tables(sep,
1862 (struct sep_lli_entry *)
1863 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
1864 *in_num_entries_ptr,
1865 *table_data_size_ptr);
1867 /* Print output tables */
1868 sep_debug_print_lli_tables(sep,
1869 (struct sep_lli_entry *)
1870 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
1871 *out_num_entries_ptr,
1872 *table_data_size_ptr);
1874 return 0;
1878 * sep_prepare_input_output_dma_table - prepare DMA I/O table
1879 * @app_virt_in_addr:
1880 * @app_virt_out_addr:
1881 * @data_size:
1882 * @block_size:
1883 * @lli_table_in_ptr:
1884 * @lli_table_out_ptr:
1885 * @in_num_entries_ptr:
1886 * @out_num_entries_ptr:
1887 * @table_data_size_ptr:
1888 * @is_kva: set for kernel data; used only for kernel crypto module
1890 * This function builds input and output DMA tables for synhronic
1891 * symmetric operations (AES, DES, HASH). It also checks that each table
1892 * is of the modular block size
1893 * Note that all bus addresses that are passed to the SEP
1894 * are in 32 bit format; the SEP is a 32 bit device
1896 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1897 unsigned long app_virt_in_addr,
1898 unsigned long app_virt_out_addr,
1899 u32 data_size,
1900 u32 block_size,
1901 dma_addr_t *lli_table_in_ptr,
1902 dma_addr_t *lli_table_out_ptr,
1903 u32 *in_num_entries_ptr,
1904 u32 *out_num_entries_ptr,
1905 u32 *table_data_size_ptr,
1906 bool is_kva)
1909 int error = 0;
1910 /* Array of pointers of page */
1911 struct sep_lli_entry *lli_in_array;
1912 /* Array of pointers of page */
1913 struct sep_lli_entry *lli_out_array;
1915 if (data_size == 0) {
1916 /* Prepare empty table for input and output */
1917 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
1918 in_num_entries_ptr, table_data_size_ptr);
1920 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
1921 out_num_entries_ptr, table_data_size_ptr);
1923 goto update_dcb_counter;
1926 /* Initialize the pages pointers */
1927 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1928 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
1930 /* Lock the pages of the buffer and translate them to pages */
1931 if (is_kva == true) {
1932 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
1933 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1935 if (error) {
1936 dev_warn(&sep->pdev->dev,
1937 "lock kernel for in failed\n");
1938 goto end_function;
1941 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
1942 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1944 if (error) {
1945 dev_warn(&sep->pdev->dev,
1946 "lock kernel for out failed\n");
1947 goto end_function;
1951 else {
1952 error = sep_lock_user_pages(sep, app_virt_in_addr,
1953 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1954 if (error) {
1955 dev_warn(&sep->pdev->dev,
1956 "sep_lock_user_pages for input virtual buffer failed\n");
1957 goto end_function;
1960 error = sep_lock_user_pages(sep, app_virt_out_addr,
1961 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1963 if (error) {
1964 dev_warn(&sep->pdev->dev,
1965 "sep_lock_user_pages for output virtual buffer failed\n");
1966 goto end_function_free_lli_in;
1970 dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n",
1971 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1972 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
1973 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
1974 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
1975 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1977 /* Call the function that creates table from the lli arrays */
1978 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
1979 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
1980 lli_out_array,
1981 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
1982 block_size, lli_table_in_ptr, lli_table_out_ptr,
1983 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1985 if (error) {
1986 dev_warn(&sep->pdev->dev,
1987 "sep_construct_dma_tables_from_lli failed\n");
1988 goto end_function_with_error;
1991 kfree(lli_out_array);
1992 kfree(lli_in_array);
1994 update_dcb_counter:
1995 /* Update DCB counter */
1996 sep->nr_dcb_creat++;
1998 goto end_function;
2000 end_function_with_error:
2001 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2002 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2003 kfree(lli_out_array);
2006 end_function_free_lli_in:
2007 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2008 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2009 kfree(lli_in_array);
2011 end_function:
2013 return error;
2018 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2019 * @app_in_address: unsigned long; for data buffer in (user space)
2020 * @app_out_address: unsigned long; for data buffer out (user space)
2021 * @data_in_size: u32; for size of data
2022 * @block_size: u32; for block size
2023 * @tail_block_size: u32; for size of tail block
2024 * @isapplet: bool; to indicate external app
2025 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2027 * This function prepares the linked DMA tables and puts the
2028 * address for the linked list of tables inta a DCB (data control
2029 * block) the address of which is known by the SEP hardware
2030 * Note that all bus addresses that are passed to the SEP
2031 * are in 32 bit format; the SEP is a 32 bit device
2033 static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2034 unsigned long app_in_address,
2035 unsigned long app_out_address,
2036 u32 data_in_size,
2037 u32 block_size,
2038 u32 tail_block_size,
2039 bool isapplet,
2040 bool is_kva)
2042 int error = 0;
2043 /* Size of tail */
2044 u32 tail_size = 0;
2045 /* Address of the created DCB table */
2046 struct sep_dcblock *dcb_table_ptr = NULL;
2047 /* The physical address of the first input DMA table */
2048 dma_addr_t in_first_mlli_address = 0;
2049 /* Number of entries in the first input DMA table */
2050 u32 in_first_num_entries = 0;
2051 /* The physical address of the first output DMA table */
2052 dma_addr_t out_first_mlli_address = 0;
2053 /* Number of entries in the first output DMA table */
2054 u32 out_first_num_entries = 0;
2055 /* Data in the first input/output table */
2056 u32 first_data_size = 0;
2058 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2059 /* No more DCBs to allocate */
2060 dev_warn(&sep->pdev->dev, "no more DCBs available\n");
2061 error = -ENOSPC;
2062 goto end_function;
2065 /* Allocate new DCB */
2066 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2067 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2068 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2070 /* Set the default values in the DCB */
2071 dcb_table_ptr->input_mlli_address = 0;
2072 dcb_table_ptr->input_mlli_num_entries = 0;
2073 dcb_table_ptr->input_mlli_data_size = 0;
2074 dcb_table_ptr->output_mlli_address = 0;
2075 dcb_table_ptr->output_mlli_num_entries = 0;
2076 dcb_table_ptr->output_mlli_data_size = 0;
2077 dcb_table_ptr->tail_data_size = 0;
2078 dcb_table_ptr->out_vr_tail_pt = 0;
2080 if (isapplet == true) {
2082 /* Check if there is enough data for DMA operation */
2083 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2084 if (is_kva == true) {
2085 memcpy(dcb_table_ptr->tail_data,
2086 (void *)app_in_address, data_in_size);
2087 } else {
2088 if (copy_from_user(dcb_table_ptr->tail_data,
2089 (void __user *)app_in_address,
2090 data_in_size)) {
2091 error = -EFAULT;
2092 goto end_function;
2096 dcb_table_ptr->tail_data_size = data_in_size;
2098 /* Set the output user-space address for mem2mem op */
2099 if (app_out_address)
2100 dcb_table_ptr->out_vr_tail_pt =
2101 (aligned_u64)app_out_address;
2104 * Update both data length parameters in order to avoid
2105 * second data copy and allow building of empty mlli
2106 * tables
2108 tail_size = 0x0;
2109 data_in_size = 0x0;
2111 } else {
2112 if (!app_out_address) {
2113 tail_size = data_in_size % block_size;
2114 if (!tail_size) {
2115 if (tail_block_size == block_size)
2116 tail_size = block_size;
2118 } else {
2119 tail_size = 0;
2122 if (tail_size) {
2123 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2124 return -EINVAL;
2125 if (is_kva == true) {
2126 memcpy(dcb_table_ptr->tail_data,
2127 (void *)(app_in_address + data_in_size -
2128 tail_size), tail_size);
2129 } else {
2130 /* We have tail data - copy it to DCB */
2131 if (copy_from_user(dcb_table_ptr->tail_data,
2132 (void *)(app_in_address +
2133 data_in_size - tail_size), tail_size)) {
2134 error = -EFAULT;
2135 goto end_function;
2138 if (app_out_address)
2140 * Calculate the output address
2141 * according to tail data size
2143 dcb_table_ptr->out_vr_tail_pt =
2144 (aligned_u64)app_out_address + data_in_size
2145 - tail_size;
2147 /* Save the real tail data size */
2148 dcb_table_ptr->tail_data_size = tail_size;
2150 * Update the data size without the tail
2151 * data size AKA data for the dma
2153 data_in_size = (data_in_size - tail_size);
2156 /* Check if we need to build only input table or input/output */
2157 if (app_out_address) {
2158 /* Prepare input/output tables */
2159 error = sep_prepare_input_output_dma_table(sep,
2160 app_in_address,
2161 app_out_address,
2162 data_in_size,
2163 block_size,
2164 &in_first_mlli_address,
2165 &out_first_mlli_address,
2166 &in_first_num_entries,
2167 &out_first_num_entries,
2168 &first_data_size,
2169 is_kva);
2170 } else {
2171 /* Prepare input tables */
2172 error = sep_prepare_input_dma_table(sep,
2173 app_in_address,
2174 data_in_size,
2175 block_size,
2176 &in_first_mlli_address,
2177 &in_first_num_entries,
2178 &first_data_size,
2179 is_kva);
2182 if (error) {
2183 dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
2184 goto end_function;
2187 /* Set the DCB values */
2188 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2189 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2190 dcb_table_ptr->input_mlli_data_size = first_data_size;
2191 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2192 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2193 dcb_table_ptr->output_mlli_data_size = first_data_size;
2195 end_function:
2196 return error;
2201 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2202 * @sep: pointer to struct sep_device
2203 * @isapplet: indicates external application (used for kernel access)
2204 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2206 * This function frees the DMA tables and DCB
2208 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2209 bool is_kva)
2211 int i = 0;
2212 int error = 0;
2213 int error_temp = 0;
2214 struct sep_dcblock *dcb_table_ptr;
2215 unsigned long pt_hold;
2216 void *tail_pt;
2218 if (isapplet == true) {
2219 /* Set pointer to first DCB table */
2220 dcb_table_ptr = (struct sep_dcblock *)
2221 (sep->shared_addr +
2222 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2224 /* Go over each DCB and see if tail pointer must be updated */
2225 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
2226 if (dcb_table_ptr->out_vr_tail_pt) {
2227 pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
2228 tail_pt = (void *)pt_hold;
2229 if (is_kva == true) {
2230 memcpy(tail_pt,
2231 dcb_table_ptr->tail_data,
2232 dcb_table_ptr->tail_data_size);
2233 } else {
2234 error_temp = copy_to_user(
2235 tail_pt,
2236 dcb_table_ptr->tail_data,
2237 dcb_table_ptr->tail_data_size);
2239 if (error_temp) {
2240 /* Release the DMA resource */
2241 error = -EFAULT;
2242 break;
2247 /* Free the output pages, if any */
2248 sep_free_dma_table_data_handler(sep);
2250 return error;
2254 * sep_get_static_pool_addr_handler - get static pool address
2255 * @sep: pointer to struct sep_device
2257 * This function sets the bus and virtual addresses of the static pool
2259 static int sep_get_static_pool_addr_handler(struct sep_device *sep)
2261 u32 *static_pool_addr = NULL;
2263 static_pool_addr = (u32 *)(sep->shared_addr +
2264 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2266 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
2267 static_pool_addr[1] = (u32)sep->shared_bus +
2268 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2270 dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n",
2271 (u32)static_pool_addr[1]);
2273 return 0;
2277 * sep_end_transaction_handler - end transaction
2278 * @sep: pointer to struct sep_device
2280 * This API handles the end transaction request
2282 static int sep_end_transaction_handler(struct sep_device *sep)
2284 /* Clear the data pool pointers Token */
2285 memset((void *)(sep->shared_addr +
2286 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2287 0, sep->num_of_data_allocations*2*sizeof(u32));
2289 /* Check that all the DMA resources were freed */
2290 sep_free_dma_table_data_handler(sep);
2292 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2295 * We are now through with the transaction. Let's
2296 * allow other processes who have the device open
2297 * to perform transactions
2299 mutex_lock(&sep->sep_mutex);
2300 sep->pid_doing_transaction = 0;
2301 mutex_unlock(&sep->sep_mutex);
2302 /* Raise event for stuck contextes */
2303 wake_up(&sep->event);
2305 return 0;
2309 * sep_prepare_dcb_handler - prepare a control block
2310 * @sep: pointer to struct sep_device
2311 * @arg: pointer to user parameters
2313 * This function will retrieve the RAR buffer physical addresses, type
2314 * & size corresponding to the RAR handles provided in the buffers vector.
2316 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2318 int error;
2319 /* Command arguments */
2320 struct build_dcb_struct command_args;
2322 /* Get the command arguments */
2323 if (copy_from_user(&command_args, (void __user *)arg,
2324 sizeof(struct build_dcb_struct))) {
2325 error = -EFAULT;
2326 goto end_function;
2329 dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n",
2330 command_args.app_in_address);
2331 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2332 command_args.app_out_address);
2333 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2334 command_args.data_in_size);
2335 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2336 command_args.block_size);
2337 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2338 command_args.tail_block_size);
2340 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2341 (unsigned long)command_args.app_in_address,
2342 (unsigned long)command_args.app_out_address,
2343 command_args.data_in_size, command_args.block_size,
2344 command_args.tail_block_size, true, false);
2346 end_function:
2347 return error;
2352 * sep_free_dcb_handler - free control block resources
2353 * @sep: pointer to struct sep_device
2355 * This function frees the DCB resources and updates the needed
2356 * user-space buffers.
2358 static int sep_free_dcb_handler(struct sep_device *sep)
2360 return sep_free_dma_tables_and_dcb(sep, false, false);
2364 * sep_rar_prepare_output_msg_handler - prepare an output message
2365 * @sep: pointer to struct sep_device
2366 * @arg: pointer to user parameters
2368 * This function will retrieve the RAR buffer physical addresses, type
2369 * & size corresponding to the RAR handles provided in the buffers vector.
2371 static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2372 unsigned long arg)
2374 int error = 0;
2375 /* Command args */
2376 struct rar_hndl_to_bus_struct command_args;
2377 /* Bus address */
2378 dma_addr_t rar_bus = 0;
2379 /* Holds the RAR address in the system memory offset */
2380 u32 *rar_addr;
2382 /* Copy the data */
2383 if (copy_from_user(&command_args, (void __user *)arg,
2384 sizeof(command_args))) {
2385 error = -EFAULT;
2386 goto end_function;
2389 /* Call to translation function only if user handle is not NULL */
2390 if (command_args.rar_handle)
2391 return -EOPNOTSUPP;
2392 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2394 /* Set value in the SYSTEM MEMORY offset */
2395 rar_addr = (u32 *)(sep->shared_addr +
2396 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2398 /* Copy the physical address to the System Area for the SEP */
2399 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2400 rar_addr[1] = rar_bus;
2402 end_function:
2403 return error;
2407 * sep_ioctl - ioctl api
2408 * @filp: pointer to struct file
2409 * @cmd: command
2410 * @arg: pointer to argument structure
2412 * Implement the ioctl methods available on the SEP device.
2414 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2416 int error = 0;
2417 struct sep_device *sep = filp->private_data;
2419 /* Make sure we own this device */
2420 mutex_lock(&sep->sep_mutex);
2421 if ((current->pid != sep->pid_doing_transaction) &&
2422 (sep->pid_doing_transaction != 0)) {
2423 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
2424 error = -EACCES;
2426 mutex_unlock(&sep->sep_mutex);
2428 if (error)
2429 return error;
2431 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2432 return -ENOTTY;
2434 /* Lock to prevent the daemon to interfere with operation */
2435 mutex_lock(&sep->ioctl_mutex);
2437 switch (cmd) {
2438 case SEP_IOCSENDSEPCOMMAND:
2439 /* Send command to SEP */
2440 error = sep_send_command_handler(sep);
2441 break;
2442 case SEP_IOCALLOCDATAPOLL:
2443 /* Allocate data pool */
2444 error = sep_allocate_data_pool_memory_handler(sep, arg);
2445 break;
2446 case SEP_IOCGETSTATICPOOLADDR:
2447 /* Inform the SEP the bus address of the static pool */
2448 error = sep_get_static_pool_addr_handler(sep);
2449 break;
2450 case SEP_IOCENDTRANSACTION:
2451 error = sep_end_transaction_handler(sep);
2452 break;
2453 case SEP_IOCRARPREPAREMESSAGE:
2454 error = sep_rar_prepare_output_msg_handler(sep, arg);
2455 break;
2456 case SEP_IOCPREPAREDCB:
2457 error = sep_prepare_dcb_handler(sep, arg);
2458 break;
2459 case SEP_IOCFREEDCB:
2460 error = sep_free_dcb_handler(sep);
2461 break;
2462 default:
2463 error = -ENOTTY;
2464 break;
2467 mutex_unlock(&sep->ioctl_mutex);
2468 return error;
2472 * sep_singleton_ioctl - ioctl api for singleton interface
2473 * @filp: pointer to struct file
2474 * @cmd: command
2475 * @arg: pointer to argument structure
2477 * Implement the additional ioctls for the singleton device
2479 static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
2481 long error = 0;
2482 struct sep_device *sep = filp->private_data;
2484 /* Check that the command is for the SEP device */
2485 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2486 return -ENOTTY;
2488 /* Make sure we own this device */
2489 mutex_lock(&sep->sep_mutex);
2490 if ((current->pid != sep->pid_doing_transaction) &&
2491 (sep->pid_doing_transaction != 0)) {
2492 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
2493 mutex_unlock(&sep->sep_mutex);
2494 return -EACCES;
2497 mutex_unlock(&sep->sep_mutex);
2499 switch (cmd) {
2500 case SEP_IOCTLSETCALLERID:
2501 mutex_lock(&sep->ioctl_mutex);
2502 error = sep_set_caller_id_handler(sep, arg);
2503 mutex_unlock(&sep->ioctl_mutex);
2504 break;
2505 default:
2506 error = sep_ioctl(filp, cmd, arg);
2507 break;
2509 return error;
2513 * sep_request_daemon_ioctl - ioctl for daemon
2514 * @filp: pointer to struct file
2515 * @cmd: command
2516 * @arg: pointer to argument structure
2518 * Called by the request daemon to perform ioctls on the daemon device
2520 static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
2521 unsigned long arg)
2524 long error;
2525 struct sep_device *sep = filp->private_data;
2527 /* Check that the command is for SEP device */
2528 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2529 return -ENOTTY;
2531 /* Only one process can access ioctl at any given time */
2532 mutex_lock(&sep->ioctl_mutex);
2534 switch (cmd) {
2535 case SEP_IOCSENDSEPRPLYCOMMAND:
2536 /* Send reply command to SEP */
2537 error = sep_req_daemon_send_reply_command_handler(sep);
2538 break;
2539 case SEP_IOCENDTRANSACTION:
2541 * End req daemon transaction, do nothing
2542 * will be removed upon update in middleware
2543 * API library
2545 error = 0;
2546 break;
2547 default:
2548 error = -ENOTTY;
2550 mutex_unlock(&sep->ioctl_mutex);
2551 return error;
2555 * sep_inthandler - interrupt handler
2556 * @irq: interrupt
2557 * @dev_id: device id
2559 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2561 irqreturn_t int_error = IRQ_HANDLED;
2562 unsigned long lck_flags;
2563 u32 reg_val, reg_val2 = 0;
2564 struct sep_device *sep = dev_id;
2566 /* Read the IRR register to check if this is SEP interrupt */
2567 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2569 if (reg_val & (0x1 << 13)) {
2570 /* Lock and update the counter of reply messages */
2571 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
2572 sep->reply_ct++;
2573 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
2575 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
2576 sep->send_ct, sep->reply_ct);
2578 /* Is this printf or daemon request? */
2579 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2580 dev_dbg(&sep->pdev->dev,
2581 "SEP Interrupt - reg2 is %08x\n", reg_val2);
2583 if ((reg_val2 >> 30) & 0x1) {
2584 dev_dbg(&sep->pdev->dev, "int: printf request\n");
2585 wake_up(&sep->event_request_daemon);
2586 } else if (reg_val2 >> 31) {
2587 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
2588 wake_up(&sep->event_request_daemon);
2589 } else {
2590 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
2591 wake_up(&sep->event);
2593 } else {
2594 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
2595 int_error = IRQ_NONE;
2597 if (int_error == IRQ_HANDLED)
2598 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2600 return int_error;
2604 * sep_reconfig_shared_area - reconfigure shared area
2605 * @sep: pointer to struct sep_device
2607 * Reconfig the shared area between HOST and SEP - needed in case
2608 * the DX_CC_Init function was called before OS loading.
2610 static int sep_reconfig_shared_area(struct sep_device *sep)
2612 int ret_val;
2614 /* use to limit waiting for SEP */
2615 unsigned long end_time;
2617 /* Send the new SHARED MESSAGE AREA to the SEP */
2618 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
2619 (unsigned long long)sep->shared_bus);
2621 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2623 /* Poll for SEP response */
2624 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2626 end_time = jiffies + (WAIT_TIME * HZ);
2628 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
2629 (ret_val != sep->shared_bus))
2630 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2632 /* Check the return value (register) */
2633 if (ret_val != sep->shared_bus) {
2634 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
2635 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
2636 ret_val = -ENOMEM;
2637 } else
2638 ret_val = 0;
2640 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
2641 return ret_val;
2644 /* File operation for singleton SEP operations */
2645 static const struct file_operations singleton_file_operations = {
2646 .owner = THIS_MODULE,
2647 .unlocked_ioctl = sep_singleton_ioctl,
2648 .poll = sep_poll,
2649 .open = sep_singleton_open,
2650 .release = sep_singleton_release,
2651 .mmap = sep_mmap,
2654 /* File operation for daemon operations */
2655 static const struct file_operations daemon_file_operations = {
2656 .owner = THIS_MODULE,
2657 .unlocked_ioctl = sep_request_daemon_ioctl,
2658 .poll = sep_request_daemon_poll,
2659 .open = sep_request_daemon_open,
2660 .release = sep_request_daemon_release,
2661 .mmap = sep_request_daemon_mmap,
2664 /* The files operations structure of the driver */
2665 static const struct file_operations sep_file_operations = {
2666 .owner = THIS_MODULE,
2667 .unlocked_ioctl = sep_ioctl,
2668 .poll = sep_poll,
2669 .open = sep_open,
2670 .release = sep_release,
2671 .mmap = sep_mmap,
2675 * sep_register_driver_with_fs - register misc devices
2676 * @sep: pointer to struct sep_device
2678 * This function registers the driver with the file system
2680 static int sep_register_driver_with_fs(struct sep_device *sep)
2682 int ret_val;
2684 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
2685 sep->miscdev_sep.name = SEP_DEV_NAME;
2686 sep->miscdev_sep.fops = &sep_file_operations;
2688 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
2689 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
2690 sep->miscdev_singleton.fops = &singleton_file_operations;
2692 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
2693 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
2694 sep->miscdev_daemon.fops = &daemon_file_operations;
2696 ret_val = misc_register(&sep->miscdev_sep);
2697 if (ret_val) {
2698 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
2699 ret_val);
2700 return ret_val;
2703 ret_val = misc_register(&sep->miscdev_singleton);
2704 if (ret_val) {
2705 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
2706 ret_val);
2707 misc_deregister(&sep->miscdev_sep);
2708 return ret_val;
2711 ret_val = misc_register(&sep->miscdev_daemon);
2712 if (ret_val) {
2713 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
2714 ret_val);
2715 misc_deregister(&sep->miscdev_sep);
2716 misc_deregister(&sep->miscdev_singleton);
2718 return ret_val;
2720 return ret_val;
2725 * sep_probe - probe a matching PCI device
2726 * @pdev: pci_device
2727 * @end: pci_device_id
2729 * Attempt to set up and configure a SEP device that has been
2730 * discovered by the PCI layer.
2732 static int __devinit sep_probe(struct pci_dev *pdev,
2733 const struct pci_device_id *ent)
2735 int error = 0;
2736 struct sep_device *sep;
2738 if (sep_dev != NULL) {
2739 dev_warn(&pdev->dev, "only one SEP supported.\n");
2740 return -EBUSY;
2743 /* Enable the device */
2744 error = pci_enable_device(pdev);
2745 if (error) {
2746 dev_warn(&pdev->dev, "error enabling pci device\n");
2747 goto end_function;
2750 /* Allocate the sep_device structure for this device */
2751 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
2752 if (sep_dev == NULL) {
2753 dev_warn(&pdev->dev,
2754 "can't kmalloc the sep_device structure\n");
2755 error = -ENOMEM;
2756 goto end_function_disable_device;
2760 * We're going to use another variable for actually
2761 * working with the device; this way, if we have
2762 * multiple devices in the future, it would be easier
2763 * to make appropriate changes
2765 sep = sep_dev;
2767 sep->pdev = pci_dev_get(pdev);
2769 init_waitqueue_head(&sep->event);
2770 init_waitqueue_head(&sep->event_request_daemon);
2771 spin_lock_init(&sep->snd_rply_lck);
2772 mutex_init(&sep->sep_mutex);
2773 mutex_init(&sep->ioctl_mutex);
2775 dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n");
2776 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
2778 /* Set up our register area */
2779 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
2780 if (!sep->reg_physical_addr) {
2781 dev_warn(&sep->pdev->dev, "Error getting register start\n");
2782 error = -ENODEV;
2783 goto end_function_free_sep_dev;
2786 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
2787 if (!sep->reg_physical_end) {
2788 dev_warn(&sep->pdev->dev, "Error getting register end\n");
2789 error = -ENODEV;
2790 goto end_function_free_sep_dev;
2793 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
2794 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
2795 if (!sep->reg_addr) {
2796 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
2797 error = -ENODEV;
2798 goto end_function_free_sep_dev;
2801 dev_dbg(&sep->pdev->dev,
2802 "Register area start %llx end %llx virtual %p\n",
2803 (unsigned long long)sep->reg_physical_addr,
2804 (unsigned long long)sep->reg_physical_end,
2805 sep->reg_addr);
2807 /* Allocate the shared area */
2808 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2809 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
2810 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
2811 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
2812 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2814 if (sep_map_and_alloc_shared_area(sep)) {
2815 error = -ENOMEM;
2816 /* Allocation failed */
2817 goto end_function_error;
2820 /* Clear ICR register */
2821 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2823 /* Set the IMR register - open only GPR 2 */
2824 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2826 /* Read send/receive counters from SEP */
2827 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2828 sep->reply_ct &= 0x3FFFFFFF;
2829 sep->send_ct = sep->reply_ct;
2831 /* Get the interrupt line */
2832 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
2833 "sep_driver", sep);
2835 if (error)
2836 goto end_function_deallocate_sep_shared_area;
2838 /* The new chip requires a shared area reconfigure */
2839 if (sep->pdev->revision == 4) { /* Only for new chip */
2840 error = sep_reconfig_shared_area(sep);
2841 if (error)
2842 goto end_function_free_irq;
2844 /* Finally magic up the device nodes */
2845 /* Register driver with the fs */
2846 error = sep_register_driver_with_fs(sep);
2847 if (error == 0)
2848 /* Success */
2849 return 0;
2851 end_function_free_irq:
2852 free_irq(pdev->irq, sep);
2854 end_function_deallocate_sep_shared_area:
2855 /* De-allocate shared area */
2856 sep_unmap_and_free_shared_area(sep);
2858 end_function_error:
2859 iounmap(sep->reg_addr);
2861 end_function_free_sep_dev:
2862 pci_dev_put(sep_dev->pdev);
2863 kfree(sep_dev);
2864 sep_dev = NULL;
2866 end_function_disable_device:
2867 pci_disable_device(pdev);
2869 end_function:
2870 return error;
2873 static void sep_remove(struct pci_dev *pdev)
2875 struct sep_device *sep = sep_dev;
2877 /* Unregister from fs */
2878 misc_deregister(&sep->miscdev_sep);
2879 misc_deregister(&sep->miscdev_singleton);
2880 misc_deregister(&sep->miscdev_daemon);
2882 /* Free the irq */
2883 free_irq(sep->pdev->irq, sep);
2885 /* Free the shared area */
2886 sep_unmap_and_free_shared_area(sep_dev);
2887 iounmap((void *) sep_dev->reg_addr);
2890 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
2891 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
2895 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2897 /* Field for registering driver to PCI device */
2898 static struct pci_driver sep_pci_driver = {
2899 .name = "sep_sec_driver",
2900 .id_table = sep_pci_id_tbl,
2901 .probe = sep_probe,
2902 .remove = sep_remove
2907 * sep_init - init function
2909 * Module load time. Register the PCI device driver.
2911 static int __init sep_init(void)
2913 return pci_register_driver(&sep_pci_driver);
2918 * sep_exit - called to unload driver
2920 * Drop the misc devices then remove and unmap the various resources
2921 * that are not released by the driver remove method.
2923 static void __exit sep_exit(void)
2925 pci_unregister_driver(&sep_pci_driver);
2929 module_init(sep_init);
2930 module_exit(sep_exit);
2932 MODULE_LICENSE("GPL");