1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015-2018, Intel Corporation.
6 #define pr_fmt(fmt) "kcs-bmc: " fmt
8 #include <linux/errno.h>
10 #include <linux/ipmi_bmc.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/poll.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
19 #define DEVICE_NAME "ipmi-kcs"
21 #define KCS_MSG_BUFSIZ 1000
23 #define KCS_ZERO_DATA 0
26 /* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
27 #define KCS_STATUS_STATE(state) (state << 6)
28 #define KCS_STATUS_STATE_MASK GENMASK(7, 6)
29 #define KCS_STATUS_CMD_DAT BIT(3)
30 #define KCS_STATUS_SMS_ATN BIT(2)
31 #define KCS_STATUS_IBF BIT(1)
32 #define KCS_STATUS_OBF BIT(0)
34 /* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
42 /* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
43 #define KCS_CMD_GET_STATUS_ABORT 0x60
44 #define KCS_CMD_WRITE_START 0x61
45 #define KCS_CMD_WRITE_END 0x62
46 #define KCS_CMD_READ_BYTE 0x68
48 static inline u8
read_data(struct kcs_bmc
*kcs_bmc
)
50 return kcs_bmc
->io_inputb(kcs_bmc
, kcs_bmc
->ioreg
.idr
);
53 static inline void write_data(struct kcs_bmc
*kcs_bmc
, u8 data
)
55 kcs_bmc
->io_outputb(kcs_bmc
, kcs_bmc
->ioreg
.odr
, data
);
58 static inline u8
read_status(struct kcs_bmc
*kcs_bmc
)
60 return kcs_bmc
->io_inputb(kcs_bmc
, kcs_bmc
->ioreg
.str
);
63 static inline void write_status(struct kcs_bmc
*kcs_bmc
, u8 data
)
65 kcs_bmc
->io_outputb(kcs_bmc
, kcs_bmc
->ioreg
.str
, data
);
68 static void update_status_bits(struct kcs_bmc
*kcs_bmc
, u8 mask
, u8 val
)
70 u8 tmp
= read_status(kcs_bmc
);
75 write_status(kcs_bmc
, tmp
);
78 static inline void set_state(struct kcs_bmc
*kcs_bmc
, u8 state
)
80 update_status_bits(kcs_bmc
, KCS_STATUS_STATE_MASK
,
81 KCS_STATUS_STATE(state
));
84 static void kcs_force_abort(struct kcs_bmc
*kcs_bmc
)
86 set_state(kcs_bmc
, ERROR_STATE
);
88 write_data(kcs_bmc
, KCS_ZERO_DATA
);
90 kcs_bmc
->phase
= KCS_PHASE_ERROR
;
91 kcs_bmc
->data_in_avail
= false;
92 kcs_bmc
->data_in_idx
= 0;
95 static void kcs_bmc_handle_data(struct kcs_bmc
*kcs_bmc
)
99 switch (kcs_bmc
->phase
) {
100 case KCS_PHASE_WRITE_START
:
101 kcs_bmc
->phase
= KCS_PHASE_WRITE_DATA
;
104 case KCS_PHASE_WRITE_DATA
:
105 if (kcs_bmc
->data_in_idx
< KCS_MSG_BUFSIZ
) {
106 set_state(kcs_bmc
, WRITE_STATE
);
107 write_data(kcs_bmc
, KCS_ZERO_DATA
);
108 kcs_bmc
->data_in
[kcs_bmc
->data_in_idx
++] =
111 kcs_force_abort(kcs_bmc
);
112 kcs_bmc
->error
= KCS_LENGTH_ERROR
;
116 case KCS_PHASE_WRITE_END_CMD
:
117 if (kcs_bmc
->data_in_idx
< KCS_MSG_BUFSIZ
) {
118 set_state(kcs_bmc
, READ_STATE
);
119 kcs_bmc
->data_in
[kcs_bmc
->data_in_idx
++] =
121 kcs_bmc
->phase
= KCS_PHASE_WRITE_DONE
;
122 kcs_bmc
->data_in_avail
= true;
123 wake_up_interruptible(&kcs_bmc
->queue
);
125 kcs_force_abort(kcs_bmc
);
126 kcs_bmc
->error
= KCS_LENGTH_ERROR
;
131 if (kcs_bmc
->data_out_idx
== kcs_bmc
->data_out_len
)
132 set_state(kcs_bmc
, IDLE_STATE
);
134 data
= read_data(kcs_bmc
);
135 if (data
!= KCS_CMD_READ_BYTE
) {
136 set_state(kcs_bmc
, ERROR_STATE
);
137 write_data(kcs_bmc
, KCS_ZERO_DATA
);
141 if (kcs_bmc
->data_out_idx
== kcs_bmc
->data_out_len
) {
142 write_data(kcs_bmc
, KCS_ZERO_DATA
);
143 kcs_bmc
->phase
= KCS_PHASE_IDLE
;
148 kcs_bmc
->data_out
[kcs_bmc
->data_out_idx
++]);
151 case KCS_PHASE_ABORT_ERROR1
:
152 set_state(kcs_bmc
, READ_STATE
);
154 write_data(kcs_bmc
, kcs_bmc
->error
);
155 kcs_bmc
->phase
= KCS_PHASE_ABORT_ERROR2
;
158 case KCS_PHASE_ABORT_ERROR2
:
159 set_state(kcs_bmc
, IDLE_STATE
);
161 write_data(kcs_bmc
, KCS_ZERO_DATA
);
162 kcs_bmc
->phase
= KCS_PHASE_IDLE
;
166 kcs_force_abort(kcs_bmc
);
171 static void kcs_bmc_handle_cmd(struct kcs_bmc
*kcs_bmc
)
175 set_state(kcs_bmc
, WRITE_STATE
);
176 write_data(kcs_bmc
, KCS_ZERO_DATA
);
178 cmd
= read_data(kcs_bmc
);
180 case KCS_CMD_WRITE_START
:
181 kcs_bmc
->phase
= KCS_PHASE_WRITE_START
;
182 kcs_bmc
->error
= KCS_NO_ERROR
;
183 kcs_bmc
->data_in_avail
= false;
184 kcs_bmc
->data_in_idx
= 0;
187 case KCS_CMD_WRITE_END
:
188 if (kcs_bmc
->phase
!= KCS_PHASE_WRITE_DATA
) {
189 kcs_force_abort(kcs_bmc
);
193 kcs_bmc
->phase
= KCS_PHASE_WRITE_END_CMD
;
196 case KCS_CMD_GET_STATUS_ABORT
:
197 if (kcs_bmc
->error
== KCS_NO_ERROR
)
198 kcs_bmc
->error
= KCS_ABORTED_BY_COMMAND
;
200 kcs_bmc
->phase
= KCS_PHASE_ABORT_ERROR1
;
201 kcs_bmc
->data_in_avail
= false;
202 kcs_bmc
->data_in_idx
= 0;
206 kcs_force_abort(kcs_bmc
);
207 kcs_bmc
->error
= KCS_ILLEGAL_CONTROL_CODE
;
212 int kcs_bmc_handle_event(struct kcs_bmc
*kcs_bmc
)
218 spin_lock_irqsave(&kcs_bmc
->lock
, flags
);
220 status
= read_status(kcs_bmc
);
221 if (status
& KCS_STATUS_IBF
) {
222 if (!kcs_bmc
->running
)
223 kcs_force_abort(kcs_bmc
);
224 else if (status
& KCS_STATUS_CMD_DAT
)
225 kcs_bmc_handle_cmd(kcs_bmc
);
227 kcs_bmc_handle_data(kcs_bmc
);
232 spin_unlock_irqrestore(&kcs_bmc
->lock
, flags
);
236 EXPORT_SYMBOL(kcs_bmc_handle_event
);
238 static inline struct kcs_bmc
*to_kcs_bmc(struct file
*filp
)
240 return container_of(filp
->private_data
, struct kcs_bmc
, miscdev
);
243 static int kcs_bmc_open(struct inode
*inode
, struct file
*filp
)
245 struct kcs_bmc
*kcs_bmc
= to_kcs_bmc(filp
);
248 spin_lock_irq(&kcs_bmc
->lock
);
249 if (!kcs_bmc
->running
)
250 kcs_bmc
->running
= 1;
253 spin_unlock_irq(&kcs_bmc
->lock
);
258 static __poll_t
kcs_bmc_poll(struct file
*filp
, poll_table
*wait
)
260 struct kcs_bmc
*kcs_bmc
= to_kcs_bmc(filp
);
263 poll_wait(filp
, &kcs_bmc
->queue
, wait
);
265 spin_lock_irq(&kcs_bmc
->lock
);
266 if (kcs_bmc
->data_in_avail
)
268 spin_unlock_irq(&kcs_bmc
->lock
);
273 static ssize_t
kcs_bmc_read(struct file
*filp
, char __user
*buf
,
274 size_t count
, loff_t
*ppos
)
276 struct kcs_bmc
*kcs_bmc
= to_kcs_bmc(filp
);
281 if (!(filp
->f_flags
& O_NONBLOCK
))
282 wait_event_interruptible(kcs_bmc
->queue
,
283 kcs_bmc
->data_in_avail
);
285 mutex_lock(&kcs_bmc
->mutex
);
287 spin_lock_irq(&kcs_bmc
->lock
);
288 data_avail
= kcs_bmc
->data_in_avail
;
290 data_len
= kcs_bmc
->data_in_idx
;
291 memcpy(kcs_bmc
->kbuffer
, kcs_bmc
->data_in
, data_len
);
293 spin_unlock_irq(&kcs_bmc
->lock
);
300 if (count
< data_len
) {
301 pr_err("channel=%u with too large data : %zu\n",
302 kcs_bmc
->channel
, data_len
);
304 spin_lock_irq(&kcs_bmc
->lock
);
305 kcs_force_abort(kcs_bmc
);
306 spin_unlock_irq(&kcs_bmc
->lock
);
312 if (copy_to_user(buf
, kcs_bmc
->kbuffer
, data_len
)) {
319 spin_lock_irq(&kcs_bmc
->lock
);
320 if (kcs_bmc
->phase
== KCS_PHASE_WRITE_DONE
) {
321 kcs_bmc
->phase
= KCS_PHASE_WAIT_READ
;
322 kcs_bmc
->data_in_avail
= false;
323 kcs_bmc
->data_in_idx
= 0;
327 spin_unlock_irq(&kcs_bmc
->lock
);
330 mutex_unlock(&kcs_bmc
->mutex
);
335 static ssize_t
kcs_bmc_write(struct file
*filp
, const char __user
*buf
,
336 size_t count
, loff_t
*ppos
)
338 struct kcs_bmc
*kcs_bmc
= to_kcs_bmc(filp
);
341 /* a minimum response size '3' : netfn + cmd + ccode */
342 if (count
< 3 || count
> KCS_MSG_BUFSIZ
)
345 mutex_lock(&kcs_bmc
->mutex
);
347 if (copy_from_user(kcs_bmc
->kbuffer
, buf
, count
)) {
352 spin_lock_irq(&kcs_bmc
->lock
);
353 if (kcs_bmc
->phase
== KCS_PHASE_WAIT_READ
) {
354 kcs_bmc
->phase
= KCS_PHASE_READ
;
355 kcs_bmc
->data_out_idx
= 1;
356 kcs_bmc
->data_out_len
= count
;
357 memcpy(kcs_bmc
->data_out
, kcs_bmc
->kbuffer
, count
);
358 write_data(kcs_bmc
, kcs_bmc
->data_out
[0]);
363 spin_unlock_irq(&kcs_bmc
->lock
);
366 mutex_unlock(&kcs_bmc
->mutex
);
371 static long kcs_bmc_ioctl(struct file
*filp
, unsigned int cmd
,
374 struct kcs_bmc
*kcs_bmc
= to_kcs_bmc(filp
);
377 spin_lock_irq(&kcs_bmc
->lock
);
380 case IPMI_BMC_IOCTL_SET_SMS_ATN
:
381 update_status_bits(kcs_bmc
, KCS_STATUS_SMS_ATN
,
385 case IPMI_BMC_IOCTL_CLEAR_SMS_ATN
:
386 update_status_bits(kcs_bmc
, KCS_STATUS_SMS_ATN
,
390 case IPMI_BMC_IOCTL_FORCE_ABORT
:
391 kcs_force_abort(kcs_bmc
);
399 spin_unlock_irq(&kcs_bmc
->lock
);
404 static int kcs_bmc_release(struct inode
*inode
, struct file
*filp
)
406 struct kcs_bmc
*kcs_bmc
= to_kcs_bmc(filp
);
408 spin_lock_irq(&kcs_bmc
->lock
);
409 kcs_bmc
->running
= 0;
410 kcs_force_abort(kcs_bmc
);
411 spin_unlock_irq(&kcs_bmc
->lock
);
416 static const struct file_operations kcs_bmc_fops
= {
417 .owner
= THIS_MODULE
,
418 .open
= kcs_bmc_open
,
419 .read
= kcs_bmc_read
,
420 .write
= kcs_bmc_write
,
421 .release
= kcs_bmc_release
,
422 .poll
= kcs_bmc_poll
,
423 .unlocked_ioctl
= kcs_bmc_ioctl
,
426 struct kcs_bmc
*kcs_bmc_alloc(struct device
*dev
, int sizeof_priv
, u32 channel
)
428 struct kcs_bmc
*kcs_bmc
;
430 kcs_bmc
= devm_kzalloc(dev
, sizeof(*kcs_bmc
) + sizeof_priv
, GFP_KERNEL
);
434 spin_lock_init(&kcs_bmc
->lock
);
435 kcs_bmc
->channel
= channel
;
437 mutex_init(&kcs_bmc
->mutex
);
438 init_waitqueue_head(&kcs_bmc
->queue
);
440 kcs_bmc
->data_in
= devm_kmalloc(dev
, KCS_MSG_BUFSIZ
, GFP_KERNEL
);
441 kcs_bmc
->data_out
= devm_kmalloc(dev
, KCS_MSG_BUFSIZ
, GFP_KERNEL
);
442 kcs_bmc
->kbuffer
= devm_kmalloc(dev
, KCS_MSG_BUFSIZ
, GFP_KERNEL
);
444 kcs_bmc
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
445 kcs_bmc
->miscdev
.name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s%u",
446 DEVICE_NAME
, channel
);
447 if (!kcs_bmc
->data_in
|| !kcs_bmc
->data_out
|| !kcs_bmc
->kbuffer
||
448 !kcs_bmc
->miscdev
.name
)
450 kcs_bmc
->miscdev
.fops
= &kcs_bmc_fops
;
454 EXPORT_SYMBOL(kcs_bmc_alloc
);
456 MODULE_LICENSE("GPL v2");
457 MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
458 MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");