1 // SPDX-License-Identifier: GPL-2.0+
3 * IBM Power Systems Virtual Management Channel Support.
5 * Copyright (c) 2004, 2018 IBM Corp.
6 * Dave Engebretsen engebret@us.ibm.com
7 * Steven Royer seroyer@linux.vnet.ibm.com
8 * Adam Reznechek adreznec@linux.vnet.ibm.com
9 * Bryant G. Ly <bryantly@linux.vnet.ibm.com>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/major.h>
16 #include <linux/string.h>
17 #include <linux/fcntl.h>
18 #include <linux/slab.h>
19 #include <linux/poll.h>
20 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/percpu.h>
25 #include <linux/delay.h>
26 #include <linux/uaccess.h>
28 #include <linux/miscdevice.h>
29 #include <linux/sched/signal.h>
31 #include <asm/byteorder.h>
37 #define IBMVMC_DRIVER_VERSION "1.0"
40 * Static global variables
42 static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait
);
44 static const char ibmvmc_driver_name
[] = "ibmvmc";
46 static struct ibmvmc_struct ibmvmc
;
47 static struct ibmvmc_hmc hmcs
[MAX_HMCS
];
48 static struct crq_server_adapter ibmvmc_adapter
;
50 static int ibmvmc_max_buf_pool_size
= DEFAULT_BUF_POOL_SIZE
;
51 static int ibmvmc_max_hmcs
= DEFAULT_HMCS
;
52 static int ibmvmc_max_mtu
= DEFAULT_MTU
;
54 static inline long h_copy_rdma(s64 length
, u64 sliobn
, u64 slioba
,
55 u64 dliobn
, u64 dlioba
)
59 /* Ensure all writes to source memory are visible before hcall */
61 pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
62 length
, sliobn
, slioba
, dliobn
, dlioba
);
63 rc
= plpar_hcall_norets(H_COPY_RDMA
, length
, sliobn
, slioba
,
65 pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc
);
70 static inline void h_free_crq(uint32_t unit_address
)
75 if (H_IS_LONG_BUSY(rc
))
76 msleep(get_longbusy_msecs(rc
));
78 rc
= plpar_hcall_norets(H_FREE_CRQ
, unit_address
);
79 } while ((rc
== H_BUSY
) || (H_IS_LONG_BUSY(rc
)));
83 * h_request_vmc: - request a hypervisor virtual management channel device
84 * @vmc_index: drc index of the vmc device created
86 * Requests the hypervisor create a new virtual management channel device,
87 * allowing this partition to send hypervisor virtualization control
94 static inline long h_request_vmc(u32
*vmc_index
)
97 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
100 if (H_IS_LONG_BUSY(rc
))
101 msleep(get_longbusy_msecs(rc
));
103 /* Call to request the VMC device from phyp */
104 rc
= plpar_hcall(H_REQUEST_VMC
, retbuf
);
105 pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__
, rc
);
106 *vmc_index
= retbuf
[0];
107 } while ((rc
== H_BUSY
) || (H_IS_LONG_BUSY(rc
)));
112 /* routines for managing a command/response queue */
114 * ibmvmc_handle_event: - Interrupt handler for crq events
115 * @irq: number of irq to handle, not used
116 * @dev_instance: crq_server_adapter that received interrupt
118 * Disables interrupts and schedules ibmvmc_task
120 * Always returns IRQ_HANDLED
122 static irqreturn_t
ibmvmc_handle_event(int irq
, void *dev_instance
)
124 struct crq_server_adapter
*adapter
=
125 (struct crq_server_adapter
*)dev_instance
;
127 vio_disable_interrupts(to_vio_dev(adapter
->dev
));
128 tasklet_schedule(&adapter
->work_task
);
134 * ibmvmc_release_crq_queue - Release CRQ Queue
136 * @adapter: crq_server_adapter struct
142 static void ibmvmc_release_crq_queue(struct crq_server_adapter
*adapter
)
144 struct vio_dev
*vdev
= to_vio_dev(adapter
->dev
);
145 struct crq_queue
*queue
= &adapter
->queue
;
147 free_irq(vdev
->irq
, (void *)adapter
);
148 tasklet_kill(&adapter
->work_task
);
150 if (adapter
->reset_task
)
151 kthread_stop(adapter
->reset_task
);
153 h_free_crq(vdev
->unit_address
);
154 dma_unmap_single(adapter
->dev
,
156 queue
->size
* sizeof(*queue
->msgs
), DMA_BIDIRECTIONAL
);
157 free_page((unsigned long)queue
->msgs
);
161 * ibmvmc_reset_crq_queue - Reset CRQ Queue
163 * @adapter: crq_server_adapter struct
165 * This function calls h_free_crq and then calls H_REG_CRQ and does all the
166 * bookkeeping to get us back to where we can communicate.
172 static int ibmvmc_reset_crq_queue(struct crq_server_adapter
*adapter
)
174 struct vio_dev
*vdev
= to_vio_dev(adapter
->dev
);
175 struct crq_queue
*queue
= &adapter
->queue
;
179 h_free_crq(vdev
->unit_address
);
181 /* Clean out the queue */
182 memset(queue
->msgs
, 0x00, PAGE_SIZE
);
185 /* And re-open it again */
186 rc
= plpar_hcall_norets(H_REG_CRQ
,
188 queue
->msg_token
, PAGE_SIZE
);
190 /* Adapter is good, but other end is not ready */
191 dev_warn(adapter
->dev
, "Partner adapter not ready\n");
193 dev_err(adapter
->dev
, "couldn't register crq--rc 0x%x\n", rc
);
199 * crq_queue_next_crq: - Returns the next entry in message queue
200 * @queue: crq_queue to use
202 * Returns pointer to next entry in queue, or NULL if there are no new
203 * entried in the CRQ.
205 static struct ibmvmc_crq_msg
*crq_queue_next_crq(struct crq_queue
*queue
)
207 struct ibmvmc_crq_msg
*crq
;
210 spin_lock_irqsave(&queue
->lock
, flags
);
211 crq
= &queue
->msgs
[queue
->cur
];
212 if (crq
->valid
& 0x80) {
213 if (++queue
->cur
== queue
->size
)
216 /* Ensure the read of the valid bit occurs before reading any
217 * other bits of the CRQ entry
224 spin_unlock_irqrestore(&queue
->lock
, flags
);
230 * ibmvmc_send_crq - Send CRQ
232 * @adapter: crq_server_adapter struct
233 * @word1: Word1 Data field
234 * @word2: Word2 Data field
240 static long ibmvmc_send_crq(struct crq_server_adapter
*adapter
,
241 u64 word1
, u64 word2
)
243 struct vio_dev
*vdev
= to_vio_dev(adapter
->dev
);
246 dev_dbg(adapter
->dev
, "(0x%x, 0x%016llx, 0x%016llx)\n",
247 vdev
->unit_address
, word1
, word2
);
250 * Ensure the command buffer is flushed to memory before handing it
251 * over to the other side to prevent it from fetching any stale data.
254 rc
= plpar_hcall_norets(H_SEND_CRQ
, vdev
->unit_address
, word1
, word2
);
255 dev_dbg(adapter
->dev
, "rc = 0x%lx\n", rc
);
261 * alloc_dma_buffer - Create DMA Buffer
263 * @vdev: vio_dev struct
265 * @dma_handle: DMA address field
267 * Allocates memory for the command queue and maps remote memory into an
270 * Returns a pointer to the buffer
272 static void *alloc_dma_buffer(struct vio_dev
*vdev
, size_t size
,
273 dma_addr_t
*dma_handle
)
275 /* allocate memory */
276 void *buffer
= kzalloc(size
, GFP_KERNEL
);
284 *dma_handle
= dma_map_single(&vdev
->dev
, buffer
, size
,
287 if (dma_mapping_error(&vdev
->dev
, *dma_handle
)) {
297 * free_dma_buffer - Free DMA Buffer
299 * @vdev: vio_dev struct
301 * @vaddr: Address field
302 * @dma_handle: DMA address field
304 * Releases memory for a command queue and unmaps mapped remote memory.
306 static void free_dma_buffer(struct vio_dev
*vdev
, size_t size
, void *vaddr
,
307 dma_addr_t dma_handle
)
310 dma_unmap_single(&vdev
->dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
312 /* deallocate memory */
317 * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
319 * @hmc_index: HMC Index Field
322 * Pointer to ibmvmc_buffer
324 static struct ibmvmc_buffer
*ibmvmc_get_valid_hmc_buffer(u8 hmc_index
)
326 struct ibmvmc_buffer
*buffer
;
327 struct ibmvmc_buffer
*ret_buf
= NULL
;
330 if (hmc_index
> ibmvmc
.max_hmc_index
)
333 buffer
= hmcs
[hmc_index
].buffer
;
335 for (i
= 0; i
< ibmvmc_max_buf_pool_size
; i
++) {
336 if (buffer
[i
].valid
&& buffer
[i
].free
&&
337 buffer
[i
].owner
== VMC_BUF_OWNER_ALPHA
) {
339 ret_buf
= &buffer
[i
];
348 * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
350 * @adapter: crq_server_adapter struct
351 * @hmc_index: Hmc Index field
354 * Pointer to ibmvmc_buffer
356 static struct ibmvmc_buffer
*ibmvmc_get_free_hmc_buffer(struct crq_server_adapter
*adapter
,
359 struct ibmvmc_buffer
*buffer
;
360 struct ibmvmc_buffer
*ret_buf
= NULL
;
363 if (hmc_index
> ibmvmc
.max_hmc_index
) {
364 dev_info(adapter
->dev
, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
369 buffer
= hmcs
[hmc_index
].buffer
;
371 for (i
= 0; i
< ibmvmc_max_buf_pool_size
; i
++) {
372 if (buffer
[i
].free
&&
373 buffer
[i
].owner
== VMC_BUF_OWNER_ALPHA
) {
375 ret_buf
= &buffer
[i
];
384 * ibmvmc_free_hmc_buffer - Free an HMC Buffer
386 * @hmc: ibmvmc_hmc struct
387 * @buffer: ibmvmc_buffer struct
390 static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc
*hmc
,
391 struct ibmvmc_buffer
*buffer
)
395 spin_lock_irqsave(&hmc
->lock
, flags
);
397 spin_unlock_irqrestore(&hmc
->lock
, flags
);
401 * ibmvmc_count_hmc_buffers - Count HMC Buffers
403 * @hmc_index: HMC Index field
404 * @valid: Valid number of buffers field
405 * @free: Free number of buffers field
408 static void ibmvmc_count_hmc_buffers(u8 hmc_index
, unsigned int *valid
,
411 struct ibmvmc_buffer
*buffer
;
415 if (hmc_index
> ibmvmc
.max_hmc_index
)
421 *valid
= 0; *free
= 0;
423 buffer
= hmcs
[hmc_index
].buffer
;
424 spin_lock_irqsave(&hmcs
[hmc_index
].lock
, flags
);
426 for (i
= 0; i
< ibmvmc_max_buf_pool_size
; i
++) {
427 if (buffer
[i
].valid
) {
434 spin_unlock_irqrestore(&hmcs
[hmc_index
].lock
, flags
);
438 * ibmvmc_get_free_hmc - Get Free HMC
441 * Pointer to an available HMC Connection
444 static struct ibmvmc_hmc
*ibmvmc_get_free_hmc(void)
450 * Find an available HMC connection.
452 for (i
= 0; i
<= ibmvmc
.max_hmc_index
; i
++) {
453 spin_lock_irqsave(&hmcs
[i
].lock
, flags
);
454 if (hmcs
[i
].state
== ibmhmc_state_free
) {
456 hmcs
[i
].state
= ibmhmc_state_initial
;
457 spin_unlock_irqrestore(&hmcs
[i
].lock
, flags
);
460 spin_unlock_irqrestore(&hmcs
[i
].lock
, flags
);
467 * ibmvmc_return_hmc - Return an HMC Connection
469 * @hmc: ibmvmc_hmc struct
470 * @release_readers: Number of readers connected to session
472 * This function releases the HMC connections back into the pool.
478 static int ibmvmc_return_hmc(struct ibmvmc_hmc
*hmc
, bool release_readers
)
480 struct ibmvmc_buffer
*buffer
;
481 struct crq_server_adapter
*adapter
;
482 struct vio_dev
*vdev
;
486 if (!hmc
|| !hmc
->adapter
)
489 if (release_readers
) {
490 if (hmc
->file_session
) {
491 struct ibmvmc_file_session
*session
= hmc
->file_session
;
494 wake_up_interruptible(&ibmvmc_read_wait
);
498 adapter
= hmc
->adapter
;
499 vdev
= to_vio_dev(adapter
->dev
);
501 spin_lock_irqsave(&hmc
->lock
, flags
);
503 hmc
->state
= ibmhmc_state_free
;
506 buffer
= hmc
->buffer
;
507 for (i
= 0; i
< ibmvmc_max_buf_pool_size
; i
++) {
508 if (buffer
[i
].valid
) {
509 free_dma_buffer(vdev
,
511 buffer
[i
].real_addr_local
,
512 buffer
[i
].dma_addr_local
);
513 dev_dbg(adapter
->dev
, "Forgot buffer id 0x%lx\n", i
);
515 memset(&buffer
[i
], 0, sizeof(struct ibmvmc_buffer
));
517 hmc
->queue_outbound_msgs
[i
] = VMC_INVALID_BUFFER_ID
;
520 spin_unlock_irqrestore(&hmc
->lock
, flags
);
526 * ibmvmc_send_open - Interface Open
527 * @buffer: Pointer to ibmvmc_buffer struct
528 * @hmc: Pointer to ibmvmc_hmc struct
530 * This command is sent by the management partition as the result of a
531 * management partition device request. It causes the hypervisor to
532 * prepare a set of data buffers for the management application connection
533 * indicated HMC idx. A unique HMC Idx would be used if multiple management
534 * applications running concurrently were desired. Before responding to this
535 * command, the hypervisor must provide the management partition with at
536 * least one of these new buffers via the Add Buffer. This indicates whether
537 * the messages are inbound or outbound from the hypervisor.
543 static int ibmvmc_send_open(struct ibmvmc_buffer
*buffer
,
544 struct ibmvmc_hmc
*hmc
)
546 struct ibmvmc_crq_msg crq_msg
;
547 struct crq_server_adapter
*adapter
;
548 __be64
*crq_as_u64
= (__be64
*)&crq_msg
;
551 if (!hmc
|| !hmc
->adapter
)
554 adapter
= hmc
->adapter
;
556 dev_dbg(adapter
->dev
, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
557 (unsigned long)buffer
->size
, (unsigned long)adapter
->liobn
,
558 (unsigned long)buffer
->dma_addr_local
,
559 (unsigned long)adapter
->riobn
,
560 (unsigned long)buffer
->dma_addr_remote
);
562 rc
= h_copy_rdma(buffer
->size
,
564 buffer
->dma_addr_local
,
566 buffer
->dma_addr_remote
);
568 dev_err(adapter
->dev
, "Error: In send_open, h_copy_rdma rc 0x%x\n",
573 hmc
->state
= ibmhmc_state_opening
;
575 crq_msg
.valid
= 0x80;
576 crq_msg
.type
= VMC_MSG_OPEN
;
578 crq_msg
.var1
.rsvd
= 0;
579 crq_msg
.hmc_session
= hmc
->session
;
580 crq_msg
.hmc_index
= hmc
->index
;
581 crq_msg
.var2
.buffer_id
= cpu_to_be16(buffer
->id
);
583 crq_msg
.var3
.rsvd
= 0;
585 ibmvmc_send_crq(adapter
, be64_to_cpu(crq_as_u64
[0]),
586 be64_to_cpu(crq_as_u64
[1]));
592 * ibmvmc_send_close - Interface Close
593 * @hmc: Pointer to ibmvmc_hmc struct
595 * This command is sent by the management partition to terminate a
596 * management application to hypervisor connection. When this command is
597 * sent, the management partition has quiesced all I/O operations to all
598 * buffers associated with this management application connection, and
599 * has freed any storage for these buffers.
605 static int ibmvmc_send_close(struct ibmvmc_hmc
*hmc
)
607 struct ibmvmc_crq_msg crq_msg
;
608 struct crq_server_adapter
*adapter
;
609 __be64
*crq_as_u64
= (__be64
*)&crq_msg
;
612 if (!hmc
|| !hmc
->adapter
)
615 adapter
= hmc
->adapter
;
617 dev_info(adapter
->dev
, "CRQ send: close\n");
619 crq_msg
.valid
= 0x80;
620 crq_msg
.type
= VMC_MSG_CLOSE
;
622 crq_msg
.var1
.rsvd
= 0;
623 crq_msg
.hmc_session
= hmc
->session
;
624 crq_msg
.hmc_index
= hmc
->index
;
625 crq_msg
.var2
.rsvd
= 0;
627 crq_msg
.var3
.rsvd
= 0;
629 ibmvmc_send_crq(adapter
, be64_to_cpu(crq_as_u64
[0]),
630 be64_to_cpu(crq_as_u64
[1]));
636 * ibmvmc_send_capabilities - Send VMC Capabilities
638 * @adapter: crq_server_adapter struct
640 * The capabilities message is an administrative message sent after the CRQ
641 * initialization sequence of messages and is used to exchange VMC capabilities
642 * between the management partition and the hypervisor. The management
643 * partition must send this message and the hypervisor must respond with VMC
644 * capabilities Response message before HMC interface message can begin. Any
645 * HMC interface messages received before the exchange of capabilities has
646 * complete are dropped.
651 static int ibmvmc_send_capabilities(struct crq_server_adapter
*adapter
)
653 struct ibmvmc_admin_crq_msg crq_msg
;
654 __be64
*crq_as_u64
= (__be64
*)&crq_msg
;
656 dev_dbg(adapter
->dev
, "ibmvmc: CRQ send: capabilities\n");
657 crq_msg
.valid
= 0x80;
658 crq_msg
.type
= VMC_MSG_CAP
;
662 crq_msg
.max_hmc
= ibmvmc_max_hmcs
;
663 crq_msg
.max_mtu
= cpu_to_be32(ibmvmc_max_mtu
);
664 crq_msg
.pool_size
= cpu_to_be16(ibmvmc_max_buf_pool_size
);
665 crq_msg
.crq_size
= cpu_to_be16(adapter
->queue
.size
);
666 crq_msg
.version
= cpu_to_be16(IBMVMC_PROTOCOL_VERSION
);
668 ibmvmc_send_crq(adapter
, be64_to_cpu(crq_as_u64
[0]),
669 be64_to_cpu(crq_as_u64
[1]));
671 ibmvmc
.state
= ibmvmc_state_capabilities
;
677 * ibmvmc_send_add_buffer_resp - Add Buffer Response
679 * @adapter: crq_server_adapter struct
680 * @status: Status field
681 * @hmc_session: HMC Session field
682 * @hmc_index: HMC Index field
683 * @buffer_id: Buffer Id field
685 * This command is sent by the management partition to the hypervisor in
686 * response to the Add Buffer message. The Status field indicates the result of
692 static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter
*adapter
,
693 u8 status
, u8 hmc_session
,
694 u8 hmc_index
, u16 buffer_id
)
696 struct ibmvmc_crq_msg crq_msg
;
697 __be64
*crq_as_u64
= (__be64
*)&crq_msg
;
699 dev_dbg(adapter
->dev
, "CRQ send: add_buffer_resp\n");
700 crq_msg
.valid
= 0x80;
701 crq_msg
.type
= VMC_MSG_ADD_BUF_RESP
;
702 crq_msg
.status
= status
;
703 crq_msg
.var1
.rsvd
= 0;
704 crq_msg
.hmc_session
= hmc_session
;
705 crq_msg
.hmc_index
= hmc_index
;
706 crq_msg
.var2
.buffer_id
= cpu_to_be16(buffer_id
);
708 crq_msg
.var3
.rsvd
= 0;
710 ibmvmc_send_crq(adapter
, be64_to_cpu(crq_as_u64
[0]),
711 be64_to_cpu(crq_as_u64
[1]));
717 * ibmvmc_send_rem_buffer_resp - Remove Buffer Response
719 * @adapter: crq_server_adapter struct
720 * @status: Status field
721 * @hmc_session: HMC Session field
722 * @hmc_index: HMC Index field
723 * @buffer_id: Buffer Id field
725 * This command is sent by the management partition to the hypervisor in
726 * response to the Remove Buffer message. The Buffer ID field indicates
727 * which buffer the management partition selected to remove. The Status
728 * field indicates the result of the command.
733 static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter
*adapter
,
734 u8 status
, u8 hmc_session
,
735 u8 hmc_index
, u16 buffer_id
)
737 struct ibmvmc_crq_msg crq_msg
;
738 __be64
*crq_as_u64
= (__be64
*)&crq_msg
;
740 dev_dbg(adapter
->dev
, "CRQ send: rem_buffer_resp\n");
741 crq_msg
.valid
= 0x80;
742 crq_msg
.type
= VMC_MSG_REM_BUF_RESP
;
743 crq_msg
.status
= status
;
744 crq_msg
.var1
.rsvd
= 0;
745 crq_msg
.hmc_session
= hmc_session
;
746 crq_msg
.hmc_index
= hmc_index
;
747 crq_msg
.var2
.buffer_id
= cpu_to_be16(buffer_id
);
749 crq_msg
.var3
.rsvd
= 0;
751 ibmvmc_send_crq(adapter
, be64_to_cpu(crq_as_u64
[0]),
752 be64_to_cpu(crq_as_u64
[1]));
758 * ibmvmc_send_msg - Signal Message
760 * @adapter: crq_server_adapter struct
761 * @buffer: ibmvmc_buffer struct
762 * @hmc: ibmvmc_hmc struct
763 * @msg_length: message length field
765 * This command is sent between the management partition and the hypervisor
766 * in order to signal the arrival of an HMC protocol message. The command
767 * can be sent by both the management partition and the hypervisor. It is
768 * used for all traffic between the management application and the hypervisor,
769 * regardless of who initiated the communication.
771 * There is no response to this message.
777 static int ibmvmc_send_msg(struct crq_server_adapter
*adapter
,
778 struct ibmvmc_buffer
*buffer
,
779 struct ibmvmc_hmc
*hmc
, int msg_len
)
781 struct ibmvmc_crq_msg crq_msg
;
782 __be64
*crq_as_u64
= (__be64
*)&crq_msg
;
785 dev_dbg(adapter
->dev
, "CRQ send: rdma to HV\n");
786 rc
= h_copy_rdma(msg_len
,
788 buffer
->dma_addr_local
,
790 buffer
->dma_addr_remote
);
792 dev_err(adapter
->dev
, "Error in send_msg, h_copy_rdma rc 0x%x\n",
797 crq_msg
.valid
= 0x80;
798 crq_msg
.type
= VMC_MSG_SIGNAL
;
800 crq_msg
.var1
.rsvd
= 0;
801 crq_msg
.hmc_session
= hmc
->session
;
802 crq_msg
.hmc_index
= hmc
->index
;
803 crq_msg
.var2
.buffer_id
= cpu_to_be16(buffer
->id
);
804 crq_msg
.var3
.msg_len
= cpu_to_be32(msg_len
);
805 dev_dbg(adapter
->dev
, "CRQ send: msg to HV 0x%llx 0x%llx\n",
806 be64_to_cpu(crq_as_u64
[0]), be64_to_cpu(crq_as_u64
[1]));
808 buffer
->owner
= VMC_BUF_OWNER_HV
;
809 ibmvmc_send_crq(adapter
, be64_to_cpu(crq_as_u64
[0]),
810 be64_to_cpu(crq_as_u64
[1]));
816 * ibmvmc_open - Open Session
818 * @inode: inode struct
824 static int ibmvmc_open(struct inode
*inode
, struct file
*file
)
826 struct ibmvmc_file_session
*session
;
829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__
,
830 (unsigned long)inode
, (unsigned long)file
,
833 session
= kzalloc(sizeof(*session
), GFP_KERNEL
);
834 session
->file
= file
;
835 file
->private_data
= session
;
841 * ibmvmc_close - Close Session
843 * @inode: inode struct
850 static int ibmvmc_close(struct inode
*inode
, struct file
*file
)
852 struct ibmvmc_file_session
*session
;
853 struct ibmvmc_hmc
*hmc
;
857 pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__
,
858 (unsigned long)file
, ibmvmc
.state
);
860 session
= file
->private_data
;
869 if (ibmvmc
.state
== ibmvmc_state_failed
) {
870 dev_warn(hmc
->adapter
->dev
, "close: state_failed\n");
874 spin_lock_irqsave(&hmc
->lock
, flags
);
875 if (hmc
->state
>= ibmhmc_state_opening
) {
876 rc
= ibmvmc_send_close(hmc
);
878 dev_warn(hmc
->adapter
->dev
, "close: send_close failed.\n");
880 spin_unlock_irqrestore(&hmc
->lock
, flags
);
892 * @buf: Character buffer
893 * @nbytes: Size in bytes
900 static ssize_t
ibmvmc_read(struct file
*file
, char *buf
, size_t nbytes
,
903 struct ibmvmc_file_session
*session
;
904 struct ibmvmc_hmc
*hmc
;
905 struct crq_server_adapter
*adapter
;
906 struct ibmvmc_buffer
*buffer
;
912 pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
913 (unsigned long)file
, (unsigned long)buf
,
914 (unsigned long)nbytes
);
919 if (nbytes
> ibmvmc
.max_mtu
) {
920 pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
921 (unsigned int)nbytes
);
925 session
= file
->private_data
;
927 pr_warn("ibmvmc: read: no session\n");
933 pr_warn("ibmvmc: read: no hmc\n");
937 adapter
= hmc
->adapter
;
939 pr_warn("ibmvmc: read: no adapter\n");
944 prepare_to_wait(&ibmvmc_read_wait
, &wait
, TASK_INTERRUPTIBLE
);
946 spin_lock_irqsave(&hmc
->lock
, flags
);
947 if (hmc
->queue_tail
!= hmc
->queue_head
)
948 /* Data is available */
951 spin_unlock_irqrestore(&hmc
->lock
, flags
);
953 if (!session
->valid
) {
957 if (file
->f_flags
& O_NONBLOCK
) {
964 if (signal_pending(current
)) {
965 retval
= -ERESTARTSYS
;
970 buffer
= &(hmc
->buffer
[hmc
->queue_outbound_msgs
[hmc
->queue_tail
]]);
972 if (hmc
->queue_tail
== ibmvmc_max_buf_pool_size
)
974 spin_unlock_irqrestore(&hmc
->lock
, flags
);
976 nbytes
= min_t(size_t, nbytes
, buffer
->msg_len
);
977 n
= copy_to_user((void *)buf
, buffer
->real_addr_local
, nbytes
);
978 dev_dbg(adapter
->dev
, "read: copy to user nbytes = 0x%lx.\n", nbytes
);
979 ibmvmc_free_hmc_buffer(hmc
, buffer
);
983 dev_warn(adapter
->dev
, "read: copy to user failed.\n");
988 finish_wait(&ibmvmc_read_wait
, &wait
);
989 dev_dbg(adapter
->dev
, "read: out %ld\n", retval
);
1000 * poll.h return values
1002 static unsigned int ibmvmc_poll(struct file
*file
, poll_table
*wait
)
1004 struct ibmvmc_file_session
*session
;
1005 struct ibmvmc_hmc
*hmc
;
1006 unsigned int mask
= 0;
1008 session
= file
->private_data
;
1016 poll_wait(file
, &ibmvmc_read_wait
, wait
);
1018 if (hmc
->queue_head
!= hmc
->queue_tail
)
1019 mask
|= POLLIN
| POLLRDNORM
;
1025 * ibmvmc_write - Write
1027 * @file: file struct
1028 * @buf: Character buffer
1029 * @count: Count field
1034 * Non-zero - Failure
1036 static ssize_t
ibmvmc_write(struct file
*file
, const char *buffer
,
1037 size_t count
, loff_t
*ppos
)
1039 struct ibmvmc_buffer
*vmc_buffer
;
1040 struct ibmvmc_file_session
*session
;
1041 struct crq_server_adapter
*adapter
;
1042 struct ibmvmc_hmc
*hmc
;
1044 unsigned long flags
;
1046 const char *p
= buffer
;
1050 session
= file
->private_data
;
1058 spin_lock_irqsave(&hmc
->lock
, flags
);
1059 if (hmc
->state
== ibmhmc_state_free
) {
1060 /* HMC connection is not valid (possibly was reset under us). */
1065 adapter
= hmc
->adapter
;
1071 if (count
> ibmvmc
.max_mtu
) {
1072 dev_warn(adapter
->dev
, "invalid buffer size 0x%lx\n",
1073 (unsigned long)count
);
1078 /* Waiting for the open resp message to the ioctl(1) - retry */
1079 if (hmc
->state
== ibmhmc_state_opening
) {
1084 /* Make sure the ioctl() was called & the open msg sent, and that
1085 * the HMC connection has not failed.
1087 if (hmc
->state
!= ibmhmc_state_ready
) {
1092 vmc_buffer
= ibmvmc_get_valid_hmc_buffer(hmc
->index
);
1094 /* No buffer available for the msg send, or we have not yet
1095 * completed the open/open_resp sequence. Retry until this is
1101 if (!vmc_buffer
->real_addr_local
) {
1102 dev_err(adapter
->dev
, "no buffer storage assigned\n");
1106 buf
= vmc_buffer
->real_addr_local
;
1109 bytes
= min_t(size_t, c
, vmc_buffer
->size
);
1111 bytes
-= copy_from_user(buf
, p
, bytes
);
1122 file
->f_path
.dentry
->d_inode
->i_mtime
= current_time(file_inode(file
));
1123 mark_inode_dirty(file
->f_path
.dentry
->d_inode
);
1125 dev_dbg(adapter
->dev
, "write: file = 0x%lx, count = 0x%lx\n",
1126 (unsigned long)file
, (unsigned long)count
);
1128 ibmvmc_send_msg(adapter
, vmc_buffer
, hmc
, count
);
1131 spin_unlock_irqrestore(&hmc
->lock
, flags
);
1132 return (ssize_t
)(ret
);
1136 * ibmvmc_setup_hmc - Setup the HMC
1138 * @session: ibmvmc_file_session struct
1142 * Non-zero - Failure
1144 static long ibmvmc_setup_hmc(struct ibmvmc_file_session
*session
)
1146 struct ibmvmc_hmc
*hmc
;
1147 unsigned int valid
, free
, index
;
1149 if (ibmvmc
.state
== ibmvmc_state_failed
) {
1150 pr_warn("ibmvmc: Reserve HMC: state_failed\n");
1154 if (ibmvmc
.state
< ibmvmc_state_ready
) {
1155 pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
1159 /* Device is busy until capabilities have been exchanged and we
1160 * have a generic buffer for each possible HMC connection.
1162 for (index
= 0; index
<= ibmvmc
.max_hmc_index
; index
++) {
1164 ibmvmc_count_hmc_buffers(index
, &valid
, &free
);
1166 pr_warn("ibmvmc: buffers not ready for index %d\n",
1172 /* Get an hmc object, and transition to ibmhmc_state_initial */
1173 hmc
= ibmvmc_get_free_hmc();
1175 pr_warn("%s: free hmc not found\n", __func__
);
1179 hmc
->session
= hmc
->session
+ 1;
1180 if (hmc
->session
== 0xff)
1184 hmc
->adapter
= &ibmvmc_adapter
;
1185 hmc
->file_session
= session
;
1192 * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
1194 * @session: ibmvmc_file_session struct
1195 * @new_hmc_id: HMC id field
1197 * IOCTL command to setup the hmc id
1201 * Non-zero - Failure
1203 static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session
*session
,
1204 unsigned char __user
*new_hmc_id
)
1206 struct ibmvmc_hmc
*hmc
;
1207 struct ibmvmc_buffer
*buffer
;
1209 char print_buffer
[HMC_ID_LEN
+ 1];
1210 unsigned long flags
;
1213 /* Reserve HMC session */
1216 rc
= ibmvmc_setup_hmc(session
);
1222 pr_err("ibmvmc: setup_hmc success but no hmc\n");
1227 if (hmc
->state
!= ibmhmc_state_initial
) {
1228 pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
1233 bytes
= copy_from_user(hmc
->hmc_id
, new_hmc_id
, HMC_ID_LEN
);
1237 /* Send Open Session command */
1238 spin_lock_irqsave(&hmc
->lock
, flags
);
1239 buffer
= ibmvmc_get_valid_hmc_buffer(hmc
->index
);
1240 spin_unlock_irqrestore(&hmc
->lock
, flags
);
1242 if (!buffer
|| !buffer
->real_addr_local
) {
1243 pr_warn("ibmvmc: sethmcid: no buffer available\n");
1247 /* Make sure buffer is NULL terminated before trying to print it */
1248 memset(print_buffer
, 0, HMC_ID_LEN
+ 1);
1249 strncpy(print_buffer
, hmc
->hmc_id
, HMC_ID_LEN
);
1250 pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer
);
1252 memcpy(buffer
->real_addr_local
, hmc
->hmc_id
, HMC_ID_LEN
);
1253 /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
1254 rc
= ibmvmc_send_open(buffer
, hmc
);
1260 * ibmvmc_ioctl_query - IOCTL Query
1262 * @session: ibmvmc_file_session struct
1263 * @ret_struct: ibmvmc_query_struct
1267 * Non-zero - Failure
1269 static long ibmvmc_ioctl_query(struct ibmvmc_file_session
*session
,
1270 struct ibmvmc_query_struct __user
*ret_struct
)
1272 struct ibmvmc_query_struct query_struct
;
1275 memset(&query_struct
, 0, sizeof(query_struct
));
1276 query_struct
.have_vmc
= (ibmvmc
.state
> ibmvmc_state_initial
);
1277 query_struct
.state
= ibmvmc
.state
;
1278 query_struct
.vmc_drc_index
= ibmvmc
.vmc_drc_index
;
1280 bytes
= copy_to_user(ret_struct
, &query_struct
,
1281 sizeof(query_struct
));
1289 * ibmvmc_ioctl_requestvmc - IOCTL Request VMC
1291 * @session: ibmvmc_file_session struct
1292 * @ret_vmc_index: VMC Index
1296 * Non-zero - Failure
1298 static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session
*session
,
1299 u32 __user
*ret_vmc_index
)
1301 /* TODO: (adreznec) Add locking to control multiple process access */
1306 /* Call to request the VMC device from phyp*/
1307 rc
= h_request_vmc(&vmc_drc_index
);
1308 pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc
);
1310 if (rc
== H_SUCCESS
) {
1312 } else if (rc
== H_FUNCTION
) {
1313 pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
1315 } else if (rc
== H_AUTHORITY
) {
1316 pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
1318 } else if (rc
== H_HARDWARE
) {
1319 pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
1321 } else if (rc
== H_RESOURCE
) {
1322 pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
1324 } else if (rc
== H_NOT_AVAILABLE
) {
1325 pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
1327 } else if (rc
== H_PARAMETER
) {
1328 pr_err("ibmvmc: requestvmc: invalid parameter\n");
1332 /* Success, set the vmc index in global struct */
1333 ibmvmc
.vmc_drc_index
= vmc_drc_index
;
1335 bytes
= copy_to_user(ret_vmc_index
, &vmc_drc_index
,
1336 sizeof(*ret_vmc_index
));
1338 pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
1345 * ibmvmc_ioctl - IOCTL
1347 * @session: ibmvmc_file_session struct
1349 * @arg: Argument field
1353 * Non-zero - Failure
1355 static long ibmvmc_ioctl(struct file
*file
,
1356 unsigned int cmd
, unsigned long arg
)
1358 struct ibmvmc_file_session
*session
= file
->private_data
;
1360 pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
1361 (unsigned long)file
, cmd
, arg
,
1362 (unsigned long)session
);
1365 pr_warn("ibmvmc: ioctl: no session\n");
1370 case VMC_IOCTL_SETHMCID
:
1371 return ibmvmc_ioctl_sethmcid(session
,
1372 (unsigned char __user
*)arg
);
1373 case VMC_IOCTL_QUERY
:
1374 return ibmvmc_ioctl_query(session
,
1375 (struct ibmvmc_query_struct __user
*)arg
);
1376 case VMC_IOCTL_REQUESTVMC
:
1377 return ibmvmc_ioctl_requestvmc(session
,
1378 (unsigned int __user
*)arg
);
1380 pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd
);
1385 static const struct file_operations ibmvmc_fops
= {
1386 .owner
= THIS_MODULE
,
1387 .read
= ibmvmc_read
,
1388 .write
= ibmvmc_write
,
1389 .poll
= ibmvmc_poll
,
1390 .unlocked_ioctl
= ibmvmc_ioctl
,
1391 .open
= ibmvmc_open
,
1392 .release
= ibmvmc_close
,
1396 * ibmvmc_add_buffer - Add Buffer
1398 * @adapter: crq_server_adapter struct
1399 * @crq: ibmvmc_crq_msg struct
1401 * This message transfers a buffer from hypervisor ownership to management
1402 * partition ownership. The LIOBA is obtained from the virtual TCE table
1403 * associated with the hypervisor side of the VMC device, and points to a
1404 * buffer of size MTU (as established in the capabilities exchange).
1406 * Typical flow for ading buffers:
1407 * 1. A new management application connection is opened by the management
1409 * 2. The hypervisor assigns new buffers for the traffic associated with
1411 * 3. The hypervisor sends VMC Add Buffer messages to the management
1412 * partition, informing it of the new buffers.
1413 * 4. The hypervisor sends an HMC protocol message (to the management
1414 * application) notifying it of the new buffers. This informs the
1415 * application that it has buffers available for sending HMC
1420 * Non-zero - Failure
1422 static int ibmvmc_add_buffer(struct crq_server_adapter
*adapter
,
1423 struct ibmvmc_crq_msg
*crq
)
1425 struct ibmvmc_buffer
*buffer
;
1429 unsigned long flags
;
1435 hmc_session
= crq
->hmc_session
;
1436 hmc_index
= crq
->hmc_index
;
1437 buffer_id
= be16_to_cpu(crq
->var2
.buffer_id
);
1439 if (hmc_index
> ibmvmc
.max_hmc_index
) {
1440 dev_err(adapter
->dev
, "add_buffer: invalid hmc_index = 0x%x\n",
1442 ibmvmc_send_add_buffer_resp(adapter
, VMC_MSG_INVALID_HMC_INDEX
,
1443 hmc_session
, hmc_index
, buffer_id
);
1447 if (buffer_id
>= ibmvmc
.max_buffer_pool_size
) {
1448 dev_err(adapter
->dev
, "add_buffer: invalid buffer_id = 0x%x\n",
1450 ibmvmc_send_add_buffer_resp(adapter
, VMC_MSG_INVALID_BUFFER_ID
,
1451 hmc_session
, hmc_index
, buffer_id
);
1455 spin_lock_irqsave(&hmcs
[hmc_index
].lock
, flags
);
1456 buffer
= &hmcs
[hmc_index
].buffer
[buffer_id
];
1458 if (buffer
->real_addr_local
|| buffer
->dma_addr_local
) {
1459 dev_warn(adapter
->dev
, "add_buffer: already allocated id = 0x%lx\n",
1460 (unsigned long)buffer_id
);
1461 spin_unlock_irqrestore(&hmcs
[hmc_index
].lock
, flags
);
1462 ibmvmc_send_add_buffer_resp(adapter
, VMC_MSG_INVALID_BUFFER_ID
,
1463 hmc_session
, hmc_index
, buffer_id
);
1467 buffer
->real_addr_local
= alloc_dma_buffer(to_vio_dev(adapter
->dev
),
1469 &buffer
->dma_addr_local
);
1471 if (!buffer
->real_addr_local
) {
1472 dev_err(adapter
->dev
, "add_buffer: alloc_dma_buffer failed.\n");
1473 spin_unlock_irqrestore(&hmcs
[hmc_index
].lock
, flags
);
1474 ibmvmc_send_add_buffer_resp(adapter
, VMC_MSG_INTERFACE_FAILURE
,
1475 hmc_session
, hmc_index
, buffer_id
);
1479 buffer
->dma_addr_remote
= be32_to_cpu(crq
->var3
.lioba
);
1480 buffer
->size
= ibmvmc
.max_mtu
;
1481 buffer
->owner
= crq
->var1
.owner
;
1483 /* Must ensure valid==1 is observable only after all other fields are */
1486 buffer
->id
= buffer_id
;
1488 dev_dbg(adapter
->dev
, "add_buffer: successfully added a buffer:\n");
1489 dev_dbg(adapter
->dev
, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
1490 hmc_index
, hmc_session
, buffer_id
, buffer
->owner
);
1491 dev_dbg(adapter
->dev
, " local: 0x%x, remote: 0x%x\n",
1492 (u32
)buffer
->dma_addr_local
,
1493 (u32
)buffer
->dma_addr_remote
);
1494 spin_unlock_irqrestore(&hmcs
[hmc_index
].lock
, flags
);
1496 ibmvmc_send_add_buffer_resp(adapter
, VMC_MSG_SUCCESS
, hmc_session
,
1497 hmc_index
, buffer_id
);
1503 * ibmvmc_rem_buffer - Remove Buffer
1505 * @adapter: crq_server_adapter struct
1506 * @crq: ibmvmc_crq_msg struct
1508 * This message requests an HMC buffer to be transferred from management
1509 * partition ownership to hypervisor ownership. The management partition may
1510 * not be able to satisfy the request at a particular point in time if all its
1511 * buffers are in use. The management partition requires a depth of at least
1512 * one inbound buffer to allow management application commands to flow to the
1513 * hypervisor. It is, therefore, an interface error for the hypervisor to
1514 * attempt to remove the management partition's last buffer.
1516 * The hypervisor is expected to manage buffer usage with the management
1517 * application directly and inform the management partition when buffers may be
1518 * removed. The typical flow for removing buffers:
1520 * 1. The management application no longer needs a communication path to a
1521 * particular hypervisor function. That function is closed.
1522 * 2. The hypervisor and the management application quiesce all traffic to that
1523 * function. The hypervisor requests a reduction in buffer pool size.
1524 * 3. The management application acknowledges the reduction in buffer pool size.
1525 * 4. The hypervisor sends a Remove Buffer message to the management partition,
1526 * informing it of the reduction in buffers.
1527 * 5. The management partition verifies it can remove the buffer. This is
1528 * possible if buffers have been quiesced.
1532 * Non-zero - Failure
1535 * The hypervisor requested that we pick an unused buffer, and return it.
1536 * Before sending the buffer back, we free any storage associated with the
1539 static int ibmvmc_rem_buffer(struct crq_server_adapter
*adapter
,
1540 struct ibmvmc_crq_msg
*crq
)
1542 struct ibmvmc_buffer
*buffer
;
1546 unsigned long flags
;
1552 hmc_session
= crq
->hmc_session
;
1553 hmc_index
= crq
->hmc_index
;
1555 if (hmc_index
> ibmvmc
.max_hmc_index
) {
1556 dev_warn(adapter
->dev
, "rem_buffer: invalid hmc_index = 0x%x\n",
1558 ibmvmc_send_rem_buffer_resp(adapter
, VMC_MSG_INVALID_HMC_INDEX
,
1559 hmc_session
, hmc_index
, buffer_id
);
1563 spin_lock_irqsave(&hmcs
[hmc_index
].lock
, flags
);
1564 buffer
= ibmvmc_get_free_hmc_buffer(adapter
, hmc_index
);
1566 dev_info(adapter
->dev
, "rem_buffer: no buffer to remove\n");
1567 spin_unlock_irqrestore(&hmcs
[hmc_index
].lock
, flags
);
1568 ibmvmc_send_rem_buffer_resp(adapter
, VMC_MSG_NO_BUFFER
,
1569 hmc_session
, hmc_index
,
1570 VMC_INVALID_BUFFER_ID
);
1574 buffer_id
= buffer
->id
;
1577 free_dma_buffer(to_vio_dev(adapter
->dev
),
1579 buffer
->real_addr_local
,
1580 buffer
->dma_addr_local
);
1582 memset(buffer
, 0, sizeof(struct ibmvmc_buffer
));
1583 spin_unlock_irqrestore(&hmcs
[hmc_index
].lock
, flags
);
1585 dev_dbg(adapter
->dev
, "rem_buffer: removed buffer 0x%x.\n", buffer_id
);
1586 ibmvmc_send_rem_buffer_resp(adapter
, VMC_MSG_SUCCESS
, hmc_session
,
1587 hmc_index
, buffer_id
);
1592 static int ibmvmc_recv_msg(struct crq_server_adapter
*adapter
,
1593 struct ibmvmc_crq_msg
*crq
)
1595 struct ibmvmc_buffer
*buffer
;
1596 struct ibmvmc_hmc
*hmc
;
1597 unsigned long msg_len
;
1601 unsigned long flags
;
1607 /* Hypervisor writes CRQs directly into our memory in big endian */
1608 dev_dbg(adapter
->dev
, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
1609 be64_to_cpu(*((unsigned long *)crq
)),
1610 be64_to_cpu(*(((unsigned long *)crq
) + 1)));
1612 hmc_session
= crq
->hmc_session
;
1613 hmc_index
= crq
->hmc_index
;
1614 buffer_id
= be16_to_cpu(crq
->var2
.buffer_id
);
1615 msg_len
= be32_to_cpu(crq
->var3
.msg_len
);
1617 if (hmc_index
> ibmvmc
.max_hmc_index
) {
1618 dev_err(adapter
->dev
, "Recv_msg: invalid hmc_index = 0x%x\n",
1620 ibmvmc_send_add_buffer_resp(adapter
, VMC_MSG_INVALID_HMC_INDEX
,
1621 hmc_session
, hmc_index
, buffer_id
);
1625 if (buffer_id
>= ibmvmc
.max_buffer_pool_size
) {
1626 dev_err(adapter
->dev
, "Recv_msg: invalid buffer_id = 0x%x\n",
1628 ibmvmc_send_add_buffer_resp(adapter
, VMC_MSG_INVALID_BUFFER_ID
,
1629 hmc_session
, hmc_index
, buffer_id
);
1633 hmc
= &hmcs
[hmc_index
];
1634 spin_lock_irqsave(&hmc
->lock
, flags
);
1636 if (hmc
->state
== ibmhmc_state_free
) {
1637 dev_err(adapter
->dev
, "Recv_msg: invalid hmc state = 0x%x\n",
1639 /* HMC connection is not valid (possibly was reset under us). */
1640 spin_unlock_irqrestore(&hmc
->lock
, flags
);
1644 buffer
= &hmc
->buffer
[buffer_id
];
1646 if (buffer
->valid
== 0 || buffer
->owner
== VMC_BUF_OWNER_ALPHA
) {
1647 dev_err(adapter
->dev
, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
1648 buffer
->valid
, buffer
->owner
);
1649 spin_unlock_irqrestore(&hmc
->lock
, flags
);
1653 /* RDMA the data into the partition. */
1654 rc
= h_copy_rdma(msg_len
,
1656 buffer
->dma_addr_remote
,
1658 buffer
->dma_addr_local
);
1660 dev_dbg(adapter
->dev
, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
1661 (unsigned int)msg_len
, (unsigned int)buffer_id
,
1662 (unsigned int)hmc
->queue_head
, (unsigned int)hmc_index
);
1663 buffer
->msg_len
= msg_len
;
1665 buffer
->owner
= VMC_BUF_OWNER_ALPHA
;
1668 dev_err(adapter
->dev
, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
1670 spin_unlock_irqrestore(&hmc
->lock
, flags
);
1674 /* Must be locked because read operates on the same data */
1675 hmc
->queue_outbound_msgs
[hmc
->queue_head
] = buffer_id
;
1677 if (hmc
->queue_head
== ibmvmc_max_buf_pool_size
)
1678 hmc
->queue_head
= 0;
1680 if (hmc
->queue_head
== hmc
->queue_tail
)
1681 dev_err(adapter
->dev
, "outbound buffer queue wrapped.\n");
1683 spin_unlock_irqrestore(&hmc
->lock
, flags
);
1685 wake_up_interruptible(&ibmvmc_read_wait
);
1691 * ibmvmc_process_capabilities - Process Capabilities
1693 * @adapter: crq_server_adapter struct
1694 * @crqp: ibmvmc_crq_msg struct
1697 static void ibmvmc_process_capabilities(struct crq_server_adapter
*adapter
,
1698 struct ibmvmc_crq_msg
*crqp
)
1700 struct ibmvmc_admin_crq_msg
*crq
= (struct ibmvmc_admin_crq_msg
*)crqp
;
1702 if ((be16_to_cpu(crq
->version
) >> 8) !=
1703 (IBMVMC_PROTOCOL_VERSION
>> 8)) {
1704 dev_err(adapter
->dev
, "init failed, incompatible versions 0x%x 0x%x\n",
1705 be16_to_cpu(crq
->version
),
1706 IBMVMC_PROTOCOL_VERSION
);
1707 ibmvmc
.state
= ibmvmc_state_failed
;
1711 ibmvmc
.max_mtu
= min_t(u32
, ibmvmc_max_mtu
, be32_to_cpu(crq
->max_mtu
));
1712 ibmvmc
.max_buffer_pool_size
= min_t(u16
, ibmvmc_max_buf_pool_size
,
1713 be16_to_cpu(crq
->pool_size
));
1714 ibmvmc
.max_hmc_index
= min_t(u8
, ibmvmc_max_hmcs
, crq
->max_hmc
) - 1;
1715 ibmvmc
.state
= ibmvmc_state_ready
;
1717 dev_info(adapter
->dev
, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
1718 ibmvmc
.max_mtu
, ibmvmc
.max_buffer_pool_size
,
1719 ibmvmc
.max_hmc_index
);
1723 * ibmvmc_validate_hmc_session - Validate HMC Session
1725 * @adapter: crq_server_adapter struct
1726 * @crq: ibmvmc_crq_msg struct
1730 * Non-zero - Failure
1732 static int ibmvmc_validate_hmc_session(struct crq_server_adapter
*adapter
,
1733 struct ibmvmc_crq_msg
*crq
)
1735 unsigned char hmc_index
;
1737 hmc_index
= crq
->hmc_index
;
1739 if (crq
->hmc_session
== 0)
1742 if (hmc_index
> ibmvmc
.max_hmc_index
)
1745 if (hmcs
[hmc_index
].session
!= crq
->hmc_session
) {
1746 dev_warn(adapter
->dev
, "Drop, bad session: expected 0x%x, recv 0x%x\n",
1747 hmcs
[hmc_index
].session
, crq
->hmc_session
);
1755 * ibmvmc_reset - Reset
1757 * @adapter: crq_server_adapter struct
1758 * @xport_event: export_event field
1760 * Closes all HMC sessions and conditionally schedules a CRQ reset.
1761 * @xport_event: If true, the partner closed their CRQ; we don't need to reset.
1762 * If false, we need to schedule a CRQ reset.
1764 static void ibmvmc_reset(struct crq_server_adapter
*adapter
, bool xport_event
)
1768 if (ibmvmc
.state
!= ibmvmc_state_sched_reset
) {
1769 dev_info(adapter
->dev
, "*** Reset to initial state.\n");
1770 for (i
= 0; i
< ibmvmc_max_hmcs
; i
++)
1771 ibmvmc_return_hmc(&hmcs
[i
], xport_event
);
1774 /* CRQ was closed by the partner. We don't need to do
1775 * anything except set ourself to the correct state to
1778 ibmvmc
.state
= ibmvmc_state_crqinit
;
1780 /* The partner did not close their CRQ - instead, we're
1781 * closing the CRQ on our end. Need to schedule this
1782 * for process context, because CRQ reset may require a
1785 * Setting ibmvmc.state here immediately prevents
1786 * ibmvmc_open from completing until the reset
1787 * completes in process context.
1789 ibmvmc
.state
= ibmvmc_state_sched_reset
;
1790 dev_dbg(adapter
->dev
, "Device reset scheduled");
1791 wake_up_interruptible(&adapter
->reset_wait_queue
);
1797 * ibmvmc_reset_task - Reset Task
1801 * Performs a CRQ reset of the VMC device in process context.
1802 * NOTE: This function should not be called directly, use ibmvmc_reset.
1804 static int ibmvmc_reset_task(void *data
)
1806 struct crq_server_adapter
*adapter
= data
;
1809 set_user_nice(current
, -20);
1811 while (!kthread_should_stop()) {
1812 wait_event_interruptible(adapter
->reset_wait_queue
,
1813 (ibmvmc
.state
== ibmvmc_state_sched_reset
) ||
1814 kthread_should_stop());
1816 if (kthread_should_stop())
1819 dev_dbg(adapter
->dev
, "CRQ resetting in process context");
1820 tasklet_disable(&adapter
->work_task
);
1822 rc
= ibmvmc_reset_crq_queue(adapter
);
1824 if (rc
!= H_SUCCESS
&& rc
!= H_RESOURCE
) {
1825 dev_err(adapter
->dev
, "Error initializing CRQ. rc = 0x%x\n",
1827 ibmvmc
.state
= ibmvmc_state_failed
;
1829 ibmvmc
.state
= ibmvmc_state_crqinit
;
1831 if (ibmvmc_send_crq(adapter
, 0xC001000000000000LL
, 0)
1832 != 0 && rc
!= H_RESOURCE
)
1833 dev_warn(adapter
->dev
, "Failed to send initialize CRQ message\n");
1836 vio_enable_interrupts(to_vio_dev(adapter
->dev
));
1837 tasklet_enable(&adapter
->work_task
);
1844 * ibmvmc_process_open_resp - Process Open Response
1846 * @crq: ibmvmc_crq_msg struct
1847 * @adapter: crq_server_adapter struct
1849 * This command is sent by the hypervisor in response to the Interface
1850 * Open message. When this message is received, the indicated buffer is
1851 * again available for management partition use.
1853 static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg
*crq
,
1854 struct crq_server_adapter
*adapter
)
1856 unsigned char hmc_index
;
1857 unsigned short buffer_id
;
1859 hmc_index
= crq
->hmc_index
;
1860 if (hmc_index
> ibmvmc
.max_hmc_index
) {
1861 /* Why would PHYP give an index > max negotiated? */
1862 ibmvmc_reset(adapter
, false);
1867 dev_warn(adapter
->dev
, "open_resp: failed - status 0x%x\n",
1869 ibmvmc_return_hmc(&hmcs
[hmc_index
], false);
1873 if (hmcs
[hmc_index
].state
== ibmhmc_state_opening
) {
1874 buffer_id
= be16_to_cpu(crq
->var2
.buffer_id
);
1875 if (buffer_id
>= ibmvmc
.max_buffer_pool_size
) {
1876 dev_err(adapter
->dev
, "open_resp: invalid buffer_id = 0x%x\n",
1878 hmcs
[hmc_index
].state
= ibmhmc_state_failed
;
1880 ibmvmc_free_hmc_buffer(&hmcs
[hmc_index
],
1881 &hmcs
[hmc_index
].buffer
[buffer_id
]);
1882 hmcs
[hmc_index
].state
= ibmhmc_state_ready
;
1883 dev_dbg(adapter
->dev
, "open_resp: set hmc state = ready\n");
1886 dev_warn(adapter
->dev
, "open_resp: invalid hmc state (0x%x)\n",
1887 hmcs
[hmc_index
].state
);
1892 * ibmvmc_process_close_resp - Process Close Response
1894 * @crq: ibmvmc_crq_msg struct
1895 * @adapter: crq_server_adapter struct
1897 * This command is sent by the hypervisor in response to the managemant
1898 * application Interface Close message.
1900 * If the close fails, simply reset the entire driver as the state of the VMC
1901 * must be in tough shape.
1903 static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg
*crq
,
1904 struct crq_server_adapter
*adapter
)
1906 unsigned char hmc_index
;
1908 hmc_index
= crq
->hmc_index
;
1909 if (hmc_index
> ibmvmc
.max_hmc_index
) {
1910 ibmvmc_reset(adapter
, false);
1915 dev_warn(adapter
->dev
, "close_resp: failed - status 0x%x\n",
1917 ibmvmc_reset(adapter
, false);
1921 ibmvmc_return_hmc(&hmcs
[hmc_index
], false);
1925 * ibmvmc_crq_process - Process CRQ
1927 * @adapter: crq_server_adapter struct
1928 * @crq: ibmvmc_crq_msg struct
1930 * Process the CRQ message based upon the type of message received.
1933 static void ibmvmc_crq_process(struct crq_server_adapter
*adapter
,
1934 struct ibmvmc_crq_msg
*crq
)
1936 switch (crq
->type
) {
1937 case VMC_MSG_CAP_RESP
:
1938 dev_dbg(adapter
->dev
, "CRQ recv: capabilities resp (0x%x)\n",
1940 if (ibmvmc
.state
== ibmvmc_state_capabilities
)
1941 ibmvmc_process_capabilities(adapter
, crq
);
1943 dev_warn(adapter
->dev
, "caps msg invalid in state 0x%x\n",
1946 case VMC_MSG_OPEN_RESP
:
1947 dev_dbg(adapter
->dev
, "CRQ recv: open resp (0x%x)\n",
1949 if (ibmvmc_validate_hmc_session(adapter
, crq
) == 0)
1950 ibmvmc_process_open_resp(crq
, adapter
);
1952 case VMC_MSG_ADD_BUF
:
1953 dev_dbg(adapter
->dev
, "CRQ recv: add buf (0x%x)\n",
1955 if (ibmvmc_validate_hmc_session(adapter
, crq
) == 0)
1956 ibmvmc_add_buffer(adapter
, crq
);
1958 case VMC_MSG_REM_BUF
:
1959 dev_dbg(adapter
->dev
, "CRQ recv: rem buf (0x%x)\n",
1961 if (ibmvmc_validate_hmc_session(adapter
, crq
) == 0)
1962 ibmvmc_rem_buffer(adapter
, crq
);
1964 case VMC_MSG_SIGNAL
:
1965 dev_dbg(adapter
->dev
, "CRQ recv: signal msg (0x%x)\n",
1967 if (ibmvmc_validate_hmc_session(adapter
, crq
) == 0)
1968 ibmvmc_recv_msg(adapter
, crq
);
1970 case VMC_MSG_CLOSE_RESP
:
1971 dev_dbg(adapter
->dev
, "CRQ recv: close resp (0x%x)\n",
1973 if (ibmvmc_validate_hmc_session(adapter
, crq
) == 0)
1974 ibmvmc_process_close_resp(crq
, adapter
);
1979 case VMC_MSG_ADD_BUF_RESP
:
1980 case VMC_MSG_REM_BUF_RESP
:
1981 dev_warn(adapter
->dev
, "CRQ recv: unexpected msg (0x%x)\n",
1985 dev_warn(adapter
->dev
, "CRQ recv: unknown msg (0x%x)\n",
1992 * ibmvmc_handle_crq_init - Handle CRQ Init
1994 * @crq: ibmvmc_crq_msg struct
1995 * @adapter: crq_server_adapter struct
1997 * Handle the type of crq initialization based on whether
1998 * it is a message or a response.
2001 static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg
*crq
,
2002 struct crq_server_adapter
*adapter
)
2004 switch (crq
->type
) {
2005 case 0x01: /* Initialization message */
2006 dev_dbg(adapter
->dev
, "CRQ recv: CRQ init msg - state 0x%x\n",
2008 if (ibmvmc
.state
== ibmvmc_state_crqinit
) {
2009 /* Send back a response */
2010 if (ibmvmc_send_crq(adapter
, 0xC002000000000000,
2012 ibmvmc_send_capabilities(adapter
);
2014 dev_err(adapter
->dev
, " Unable to send init rsp\n");
2016 dev_err(adapter
->dev
, "Invalid state 0x%x mtu = 0x%x\n",
2017 ibmvmc
.state
, ibmvmc
.max_mtu
);
2021 case 0x02: /* Initialization response */
2022 dev_dbg(adapter
->dev
, "CRQ recv: initialization resp msg - state 0x%x\n",
2024 if (ibmvmc
.state
== ibmvmc_state_crqinit
)
2025 ibmvmc_send_capabilities(adapter
);
2028 dev_warn(adapter
->dev
, "Unknown crq message type 0x%lx\n",
2029 (unsigned long)crq
->type
);
2034 * ibmvmc_handle_crq - Handle CRQ
2036 * @crq: ibmvmc_crq_msg struct
2037 * @adapter: crq_server_adapter struct
2039 * Read the command elements from the command queue and execute the
2040 * requests based upon the type of crq message.
2043 static void ibmvmc_handle_crq(struct ibmvmc_crq_msg
*crq
,
2044 struct crq_server_adapter
*adapter
)
2046 switch (crq
->valid
) {
2047 case 0xC0: /* initialization */
2048 ibmvmc_handle_crq_init(crq
, adapter
);
2050 case 0xFF: /* Hypervisor telling us the connection is closed */
2051 dev_warn(adapter
->dev
, "CRQ recv: virtual adapter failed - resetting.\n");
2052 ibmvmc_reset(adapter
, true);
2054 case 0x80: /* real payload */
2055 ibmvmc_crq_process(adapter
, crq
);
2058 dev_warn(adapter
->dev
, "CRQ recv: unknown msg 0x%02x.\n",
2064 static void ibmvmc_task(unsigned long data
)
2066 struct crq_server_adapter
*adapter
=
2067 (struct crq_server_adapter
*)data
;
2068 struct vio_dev
*vdev
= to_vio_dev(adapter
->dev
);
2069 struct ibmvmc_crq_msg
*crq
;
2073 /* Pull all the valid messages off the CRQ */
2074 while ((crq
= crq_queue_next_crq(&adapter
->queue
)) != NULL
) {
2075 ibmvmc_handle_crq(crq
, adapter
);
2077 /* CRQ reset was requested, stop processing CRQs.
2078 * Interrupts will be re-enabled by the reset task.
2080 if (ibmvmc
.state
== ibmvmc_state_sched_reset
)
2084 vio_enable_interrupts(vdev
);
2085 crq
= crq_queue_next_crq(&adapter
->queue
);
2087 vio_disable_interrupts(vdev
);
2088 ibmvmc_handle_crq(crq
, adapter
);
2090 /* CRQ reset was requested, stop processing CRQs.
2091 * Interrupts will be re-enabled by the reset task.
2093 if (ibmvmc
.state
== ibmvmc_state_sched_reset
)
2102 * ibmvmc_init_crq_queue - Init CRQ Queue
2104 * @adapter: crq_server_adapter struct
2108 * Non-zero - Failure
2110 static int ibmvmc_init_crq_queue(struct crq_server_adapter
*adapter
)
2112 struct vio_dev
*vdev
= to_vio_dev(adapter
->dev
);
2113 struct crq_queue
*queue
= &adapter
->queue
;
2117 queue
->msgs
= (struct ibmvmc_crq_msg
*)get_zeroed_page(GFP_KERNEL
);
2122 queue
->size
= PAGE_SIZE
/ sizeof(*queue
->msgs
);
2124 queue
->msg_token
= dma_map_single(adapter
->dev
, queue
->msgs
,
2125 queue
->size
* sizeof(*queue
->msgs
),
2128 if (dma_mapping_error(adapter
->dev
, queue
->msg_token
))
2131 retrc
= plpar_hcall_norets(H_REG_CRQ
,
2133 queue
->msg_token
, PAGE_SIZE
);
2136 if (rc
== H_RESOURCE
)
2137 rc
= ibmvmc_reset_crq_queue(adapter
);
2140 dev_warn(adapter
->dev
, "Partner adapter not ready\n");
2142 } else if (rc
!= 0) {
2143 dev_err(adapter
->dev
, "Error %d opening adapter\n", rc
);
2144 goto reg_crq_failed
;
2148 spin_lock_init(&queue
->lock
);
2150 tasklet_init(&adapter
->work_task
, ibmvmc_task
, (unsigned long)adapter
);
2152 if (request_irq(vdev
->irq
,
2153 ibmvmc_handle_event
,
2154 0, "ibmvmc", (void *)adapter
) != 0) {
2155 dev_err(adapter
->dev
, "couldn't register irq 0x%x\n",
2157 goto req_irq_failed
;
2160 rc
= vio_enable_interrupts(vdev
);
2162 dev_err(adapter
->dev
, "Error %d enabling interrupts!!!\n", rc
);
2163 goto req_irq_failed
;
2169 /* Cannot have any work since we either never got our IRQ registered,
2170 * or never got interrupts enabled
2172 tasklet_kill(&adapter
->work_task
);
2173 h_free_crq(vdev
->unit_address
);
2175 dma_unmap_single(adapter
->dev
,
2177 queue
->size
* sizeof(*queue
->msgs
), DMA_BIDIRECTIONAL
);
2179 free_page((unsigned long)queue
->msgs
);
2184 /* Fill in the liobn and riobn fields on the adapter */
2185 static int read_dma_window(struct vio_dev
*vdev
,
2186 struct crq_server_adapter
*adapter
)
2188 const __be32
*dma_window
;
2191 /* TODO Using of_parse_dma_window would be better, but it doesn't give
2192 * a way to read multiple windows without already knowing the size of
2193 * a window or the number of windows
2196 (const __be32
*)vio_get_attribute(vdev
, "ibm,my-dma-window",
2199 dev_warn(adapter
->dev
, "Couldn't find ibm,my-dma-window property\n");
2203 adapter
->liobn
= be32_to_cpu(*dma_window
);
2206 prop
= (const __be32
*)vio_get_attribute(vdev
, "ibm,#dma-address-cells",
2209 dev_warn(adapter
->dev
, "Couldn't find ibm,#dma-address-cells property\n");
2212 dma_window
+= be32_to_cpu(*prop
);
2215 prop
= (const __be32
*)vio_get_attribute(vdev
, "ibm,#dma-size-cells",
2218 dev_warn(adapter
->dev
, "Couldn't find ibm,#dma-size-cells property\n");
2221 dma_window
+= be32_to_cpu(*prop
);
2224 /* dma_window should point to the second window now */
2225 adapter
->riobn
= be32_to_cpu(*dma_window
);
2230 static int ibmvmc_probe(struct vio_dev
*vdev
, const struct vio_device_id
*id
)
2232 struct crq_server_adapter
*adapter
= &ibmvmc_adapter
;
2235 dev_set_drvdata(&vdev
->dev
, NULL
);
2236 memset(adapter
, 0, sizeof(*adapter
));
2237 adapter
->dev
= &vdev
->dev
;
2239 dev_info(adapter
->dev
, "Probe for UA 0x%x\n", vdev
->unit_address
);
2241 rc
= read_dma_window(vdev
, adapter
);
2243 ibmvmc
.state
= ibmvmc_state_failed
;
2247 dev_dbg(adapter
->dev
, "Probe: liobn 0x%x, riobn 0x%x\n",
2248 adapter
->liobn
, adapter
->riobn
);
2250 init_waitqueue_head(&adapter
->reset_wait_queue
);
2251 adapter
->reset_task
= kthread_run(ibmvmc_reset_task
, adapter
, "ibmvmc");
2252 if (IS_ERR(adapter
->reset_task
)) {
2253 dev_err(adapter
->dev
, "Failed to start reset thread\n");
2254 ibmvmc
.state
= ibmvmc_state_failed
;
2255 rc
= PTR_ERR(adapter
->reset_task
);
2256 adapter
->reset_task
= NULL
;
2260 rc
= ibmvmc_init_crq_queue(adapter
);
2261 if (rc
!= 0 && rc
!= H_RESOURCE
) {
2262 dev_err(adapter
->dev
, "Error initializing CRQ. rc = 0x%x\n",
2264 ibmvmc
.state
= ibmvmc_state_failed
;
2268 ibmvmc
.state
= ibmvmc_state_crqinit
;
2270 /* Try to send an initialization message. Note that this is allowed
2271 * to fail if the other end is not acive. In that case we just wait
2272 * for the other side to initialize.
2274 if (ibmvmc_send_crq(adapter
, 0xC001000000000000LL
, 0) != 0 &&
2276 dev_warn(adapter
->dev
, "Failed to send initialize CRQ message\n");
2278 dev_set_drvdata(&vdev
->dev
, adapter
);
2283 kthread_stop(adapter
->reset_task
);
2284 adapter
->reset_task
= NULL
;
2288 static int ibmvmc_remove(struct vio_dev
*vdev
)
2290 struct crq_server_adapter
*adapter
= dev_get_drvdata(&vdev
->dev
);
2292 dev_info(adapter
->dev
, "Entering remove for UA 0x%x\n",
2293 vdev
->unit_address
);
2294 ibmvmc_release_crq_queue(adapter
);
2299 static struct vio_device_id ibmvmc_device_table
[] = {
2300 { "ibm,vmc", "IBM,vmc" },
2303 MODULE_DEVICE_TABLE(vio
, ibmvmc_device_table
);
2305 static struct vio_driver ibmvmc_driver
= {
2306 .name
= ibmvmc_driver_name
,
2307 .id_table
= ibmvmc_device_table
,
2308 .probe
= ibmvmc_probe
,
2309 .remove
= ibmvmc_remove
,
2312 static void __init
ibmvmc_scrub_module_parms(void)
2314 if (ibmvmc_max_mtu
> MAX_MTU
) {
2315 pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU
);
2316 ibmvmc_max_mtu
= MAX_MTU
;
2317 } else if (ibmvmc_max_mtu
< MIN_MTU
) {
2318 pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU
);
2319 ibmvmc_max_mtu
= MIN_MTU
;
2322 if (ibmvmc_max_buf_pool_size
> MAX_BUF_POOL_SIZE
) {
2323 pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
2325 ibmvmc_max_buf_pool_size
= MAX_BUF_POOL_SIZE
;
2326 } else if (ibmvmc_max_buf_pool_size
< MIN_BUF_POOL_SIZE
) {
2327 pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
2329 ibmvmc_max_buf_pool_size
= MIN_BUF_POOL_SIZE
;
2332 if (ibmvmc_max_hmcs
> MAX_HMCS
) {
2333 pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS
);
2334 ibmvmc_max_hmcs
= MAX_HMCS
;
2335 } else if (ibmvmc_max_hmcs
< MIN_HMCS
) {
2336 pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS
);
2337 ibmvmc_max_hmcs
= MIN_HMCS
;
2341 static struct miscdevice ibmvmc_miscdev
= {
2342 .name
= ibmvmc_driver_name
,
2343 .minor
= MISC_DYNAMIC_MINOR
,
2344 .fops
= &ibmvmc_fops
,
2347 static int __init
ibmvmc_module_init(void)
2351 ibmvmc
.state
= ibmvmc_state_initial
;
2352 pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION
);
2354 rc
= misc_register(&ibmvmc_miscdev
);
2356 pr_err("ibmvmc: misc registration failed\n");
2357 goto misc_register_failed
;
2359 pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR
,
2360 ibmvmc_miscdev
.minor
);
2362 /* Initialize data structures */
2363 memset(hmcs
, 0, sizeof(struct ibmvmc_hmc
) * MAX_HMCS
);
2364 for (i
= 0; i
< MAX_HMCS
; i
++) {
2365 spin_lock_init(&hmcs
[i
].lock
);
2366 hmcs
[i
].state
= ibmhmc_state_free
;
2367 for (j
= 0; j
< MAX_BUF_POOL_SIZE
; j
++)
2368 hmcs
[i
].queue_outbound_msgs
[j
] = VMC_INVALID_BUFFER_ID
;
2371 /* Sanity check module parms */
2372 ibmvmc_scrub_module_parms();
2375 * Initialize some reasonable values. Might be negotiated smaller
2376 * values during the capabilities exchange.
2378 ibmvmc
.max_mtu
= ibmvmc_max_mtu
;
2379 ibmvmc
.max_buffer_pool_size
= ibmvmc_max_buf_pool_size
;
2380 ibmvmc
.max_hmc_index
= ibmvmc_max_hmcs
- 1;
2382 rc
= vio_register_driver(&ibmvmc_driver
);
2385 pr_err("ibmvmc: rc %d from vio_register_driver\n", rc
);
2386 goto vio_reg_failed
;
2392 misc_deregister(&ibmvmc_miscdev
);
2393 misc_register_failed
:
2397 static void __exit
ibmvmc_module_exit(void)
2399 pr_info("ibmvmc: module exit\n");
2400 vio_unregister_driver(&ibmvmc_driver
);
2401 misc_deregister(&ibmvmc_miscdev
);
2404 module_init(ibmvmc_module_init
);
2405 module_exit(ibmvmc_module_exit
);
2407 module_param_named(buf_pool_size
, ibmvmc_max_buf_pool_size
,
2409 MODULE_PARM_DESC(buf_pool_size
, "Buffer pool size");
2410 module_param_named(max_hmcs
, ibmvmc_max_hmcs
, int, 0644);
2411 MODULE_PARM_DESC(max_hmcs
, "Max HMCs");
2412 module_param_named(max_mtu
, ibmvmc_max_mtu
, int, 0644);
2413 MODULE_PARM_DESC(max_mtu
, "Max MTU");
2415 MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
2416 MODULE_DESCRIPTION("IBM VMC");
2417 MODULE_VERSION(IBMVMC_DRIVER_VERSION
);
2418 MODULE_LICENSE("GPL v2");