gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / crypto / cavium / nitrox / nitrox_mbx.c
blobb51b0449b478354aeb9573b49dde3c53cd8934a3
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/workqueue.h>
4 #include "nitrox_csr.h"
5 #include "nitrox_hal.h"
6 #include "nitrox_dev.h"
8 #define RING_TO_VFNO(_x, _y) ((_x) / (_y))
10 /**
11 * mbx_msg_type - Mailbox message types
13 enum mbx_msg_type {
14 MBX_MSG_TYPE_NOP,
15 MBX_MSG_TYPE_REQ,
16 MBX_MSG_TYPE_ACK,
17 MBX_MSG_TYPE_NACK,
20 /**
21 * mbx_msg_opcode - Mailbox message opcodes
23 enum mbx_msg_opcode {
24 MSG_OP_VF_MODE = 1,
25 MSG_OP_VF_UP,
26 MSG_OP_VF_DOWN,
27 MSG_OP_CHIPID_VFID,
28 MSG_OP_MCODE_INFO = 11,
31 struct pf2vf_work {
32 struct nitrox_vfdev *vfdev;
33 struct nitrox_device *ndev;
34 struct work_struct pf2vf_resp;
37 static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
39 u64 reg_addr;
41 reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
42 return nitrox_read_csr(ndev, reg_addr);
45 static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
46 int ring)
48 u64 reg_addr;
50 reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
51 nitrox_write_csr(ndev, reg_addr, value);
54 static void pf2vf_send_response(struct nitrox_device *ndev,
55 struct nitrox_vfdev *vfdev)
57 union mbox_msg msg;
59 msg.value = vfdev->msg.value;
61 switch (vfdev->msg.opcode) {
62 case MSG_OP_VF_MODE:
63 msg.data = ndev->mode;
64 break;
65 case MSG_OP_VF_UP:
66 vfdev->nr_queues = vfdev->msg.data;
67 atomic_set(&vfdev->state, __NDEV_READY);
68 break;
69 case MSG_OP_CHIPID_VFID:
70 msg.id.chipid = ndev->idx;
71 msg.id.vfid = vfdev->vfno;
72 break;
73 case MSG_OP_VF_DOWN:
74 vfdev->nr_queues = 0;
75 atomic_set(&vfdev->state, __NDEV_NOT_READY);
76 break;
77 case MSG_OP_MCODE_INFO:
78 msg.data = 0;
79 msg.mcode_info.count = 2;
80 msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
81 msg.mcode_info.next_se_grp = 1;
82 msg.mcode_info.next_ae_grp = 1;
83 break;
84 default:
85 msg.type = MBX_MSG_TYPE_NOP;
86 break;
89 if (msg.type == MBX_MSG_TYPE_NOP)
90 return;
92 /* send ACK to VF */
93 msg.type = MBX_MSG_TYPE_ACK;
94 pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
96 vfdev->msg.value = 0;
97 atomic64_inc(&vfdev->mbx_resp);
100 static void pf2vf_resp_handler(struct work_struct *work)
102 struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
103 pf2vf_resp);
104 struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
105 struct nitrox_device *ndev = pf2vf_resp->ndev;
107 switch (vfdev->msg.type) {
108 case MBX_MSG_TYPE_REQ:
109 /* process the request from VF */
110 pf2vf_send_response(ndev, vfdev);
111 break;
112 case MBX_MSG_TYPE_ACK:
113 case MBX_MSG_TYPE_NACK:
114 break;
117 kfree(pf2vf_resp);
120 void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
122 struct nitrox_vfdev *vfdev;
123 struct pf2vf_work *pfwork;
124 u64 value, reg_addr;
125 u32 i;
126 int vfno;
128 /* loop for VF(0..63) */
129 reg_addr = NPS_PKT_MBOX_INT_LO;
130 value = nitrox_read_csr(ndev, reg_addr);
131 for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
132 /* get the vfno from ring */
133 vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
134 vfdev = ndev->iov.vfdev + vfno;
135 vfdev->ring = i;
136 /* fill the vf mailbox data */
137 vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
138 pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
139 if (!pfwork)
140 continue;
142 pfwork->vfdev = vfdev;
143 pfwork->ndev = ndev;
144 INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
145 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
146 /* clear the corresponding vf bit */
147 nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
150 /* loop for VF(64..127) */
151 reg_addr = NPS_PKT_MBOX_INT_HI;
152 value = nitrox_read_csr(ndev, reg_addr);
153 for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
154 /* get the vfno from ring */
155 vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
156 vfdev = ndev->iov.vfdev + vfno;
157 vfdev->ring = (i + 64);
158 /* fill the vf mailbox data */
159 vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
161 pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
162 if (!pfwork)
163 continue;
165 pfwork->vfdev = vfdev;
166 pfwork->ndev = ndev;
167 INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
168 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
169 /* clear the corresponding vf bit */
170 nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
174 int nitrox_mbox_init(struct nitrox_device *ndev)
176 struct nitrox_vfdev *vfdev;
177 int i;
179 ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
180 sizeof(struct nitrox_vfdev), GFP_KERNEL);
181 if (!ndev->iov.vfdev)
182 return -ENOMEM;
184 for (i = 0; i < ndev->iov.num_vfs; i++) {
185 vfdev = ndev->iov.vfdev + i;
186 vfdev->vfno = i;
189 /* allocate pf2vf response workqueue */
190 ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
191 if (!ndev->iov.pf2vf_wq) {
192 kfree(ndev->iov.vfdev);
193 return -ENOMEM;
195 /* enable pf2vf mailbox interrupts */
196 enable_pf2vf_mbox_interrupts(ndev);
198 return 0;
201 void nitrox_mbox_cleanup(struct nitrox_device *ndev)
203 /* disable pf2vf mailbox interrupts */
204 disable_pf2vf_mbox_interrupts(ndev);
205 /* destroy workqueue */
206 if (ndev->iov.pf2vf_wq)
207 destroy_workqueue(ndev->iov.pf2vf_wq);
209 kfree(ndev->iov.vfdev);
210 ndev->iov.pf2vf_wq = NULL;
211 ndev->iov.vfdev = NULL;