1 // SPDX-License-Identifier: GPL-2.0-only
3 * cec-api.c - HDMI Consumer Electronics Control framework - API
5 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
8 #include <linux/errno.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/kmod.h>
13 #include <linux/ktime.h>
14 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/uaccess.h>
19 #include <linux/version.h>
21 #include <media/cec-pin.h>
23 #include "cec-pin-priv.h"
25 static inline struct cec_devnode
*cec_devnode_data(struct file
*filp
)
27 struct cec_fh
*fh
= filp
->private_data
;
29 return &fh
->adap
->devnode
;
32 /* CEC file operations */
34 static __poll_t
cec_poll(struct file
*filp
,
35 struct poll_table_struct
*poll
)
37 struct cec_fh
*fh
= filp
->private_data
;
38 struct cec_adapter
*adap
= fh
->adap
;
41 poll_wait(filp
, &fh
->wait
, poll
);
42 if (!cec_is_registered(adap
))
43 return EPOLLERR
| EPOLLHUP
;
44 mutex_lock(&adap
->lock
);
45 if (adap
->is_configured
&&
46 adap
->transmit_queue_sz
< CEC_MAX_MSG_TX_QUEUE_SZ
)
47 res
|= EPOLLOUT
| EPOLLWRNORM
;
49 res
|= EPOLLIN
| EPOLLRDNORM
;
50 if (fh
->total_queued_events
)
52 mutex_unlock(&adap
->lock
);
56 static bool cec_is_busy(const struct cec_adapter
*adap
,
57 const struct cec_fh
*fh
)
59 bool valid_initiator
= adap
->cec_initiator
&& adap
->cec_initiator
== fh
;
60 bool valid_follower
= adap
->cec_follower
&& adap
->cec_follower
== fh
;
63 * Exclusive initiators and followers can always access the CEC adapter
65 if (valid_initiator
|| valid_follower
)
68 * All others can only access the CEC adapter if there is no
69 * exclusive initiator and they are in INITIATOR mode.
71 return adap
->cec_initiator
||
72 fh
->mode_initiator
== CEC_MODE_NO_INITIATOR
;
75 static long cec_adap_g_caps(struct cec_adapter
*adap
,
76 struct cec_caps __user
*parg
)
78 struct cec_caps caps
= {};
80 strscpy(caps
.driver
, adap
->devnode
.dev
.parent
->driver
->name
,
82 strscpy(caps
.name
, adap
->name
, sizeof(caps
.name
));
83 caps
.available_log_addrs
= adap
->available_log_addrs
;
84 caps
.capabilities
= adap
->capabilities
;
85 caps
.version
= LINUX_VERSION_CODE
;
86 if (copy_to_user(parg
, &caps
, sizeof(caps
)))
91 static long cec_adap_g_phys_addr(struct cec_adapter
*adap
,
96 mutex_lock(&adap
->lock
);
97 phys_addr
= adap
->phys_addr
;
98 mutex_unlock(&adap
->lock
);
99 if (copy_to_user(parg
, &phys_addr
, sizeof(phys_addr
)))
104 static int cec_validate_phys_addr(u16 phys_addr
)
108 if (phys_addr
== CEC_PHYS_ADDR_INVALID
)
110 for (i
= 0; i
< 16; i
+= 4)
111 if (phys_addr
& (0xf << i
))
115 for (i
+= 4; i
< 16; i
+= 4)
116 if ((phys_addr
& (0xf << i
)) == 0)
121 static long cec_adap_s_phys_addr(struct cec_adapter
*adap
, struct cec_fh
*fh
,
122 bool block
, __u16 __user
*parg
)
127 if (!(adap
->capabilities
& CEC_CAP_PHYS_ADDR
))
129 if (copy_from_user(&phys_addr
, parg
, sizeof(phys_addr
)))
132 err
= cec_validate_phys_addr(phys_addr
);
135 mutex_lock(&adap
->lock
);
136 if (cec_is_busy(adap
, fh
))
139 __cec_s_phys_addr(adap
, phys_addr
, block
);
140 mutex_unlock(&adap
->lock
);
144 static long cec_adap_g_log_addrs(struct cec_adapter
*adap
,
145 struct cec_log_addrs __user
*parg
)
147 struct cec_log_addrs log_addrs
;
149 mutex_lock(&adap
->lock
);
150 log_addrs
= adap
->log_addrs
;
151 if (!adap
->is_configured
)
152 memset(log_addrs
.log_addr
, CEC_LOG_ADDR_INVALID
,
153 sizeof(log_addrs
.log_addr
));
154 mutex_unlock(&adap
->lock
);
156 if (copy_to_user(parg
, &log_addrs
, sizeof(log_addrs
)))
161 static long cec_adap_s_log_addrs(struct cec_adapter
*adap
, struct cec_fh
*fh
,
162 bool block
, struct cec_log_addrs __user
*parg
)
164 struct cec_log_addrs log_addrs
;
167 if (!(adap
->capabilities
& CEC_CAP_LOG_ADDRS
))
169 if (copy_from_user(&log_addrs
, parg
, sizeof(log_addrs
)))
171 log_addrs
.flags
&= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK
|
172 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU
|
173 CEC_LOG_ADDRS_FL_CDC_ONLY
;
174 mutex_lock(&adap
->lock
);
175 if (!adap
->is_configuring
&&
176 (!log_addrs
.num_log_addrs
|| !adap
->is_configured
) &&
177 !cec_is_busy(adap
, fh
)) {
178 err
= __cec_s_log_addrs(adap
, &log_addrs
, block
);
180 log_addrs
= adap
->log_addrs
;
182 mutex_unlock(&adap
->lock
);
185 if (copy_to_user(parg
, &log_addrs
, sizeof(log_addrs
)))
190 static long cec_adap_g_connector_info(struct cec_adapter
*adap
,
191 struct cec_log_addrs __user
*parg
)
195 if (!(adap
->capabilities
& CEC_CAP_CONNECTOR_INFO
))
198 mutex_lock(&adap
->lock
);
199 if (copy_to_user(parg
, &adap
->conn_info
, sizeof(adap
->conn_info
)))
201 mutex_unlock(&adap
->lock
);
205 static long cec_transmit(struct cec_adapter
*adap
, struct cec_fh
*fh
,
206 bool block
, struct cec_msg __user
*parg
)
208 struct cec_msg msg
= {};
211 if (!(adap
->capabilities
& CEC_CAP_TRANSMIT
))
213 if (copy_from_user(&msg
, parg
, sizeof(msg
)))
216 mutex_lock(&adap
->lock
);
217 if (adap
->log_addrs
.num_log_addrs
== 0)
219 else if (adap
->is_configuring
)
221 else if (cec_is_busy(adap
, fh
))
224 err
= cec_transmit_msg_fh(adap
, &msg
, fh
, block
);
225 mutex_unlock(&adap
->lock
);
228 if (copy_to_user(parg
, &msg
, sizeof(msg
)))
233 /* Called by CEC_RECEIVE: wait for a message to arrive */
234 static int cec_receive_msg(struct cec_fh
*fh
, struct cec_msg
*msg
, bool block
)
236 u32 timeout
= msg
->timeout
;
240 mutex_lock(&fh
->lock
);
241 /* Are there received messages queued up? */
242 if (fh
->queued_msgs
) {
243 /* Yes, return the first one */
244 struct cec_msg_entry
*entry
=
245 list_first_entry(&fh
->msgs
,
246 struct cec_msg_entry
, list
);
248 list_del(&entry
->list
);
252 mutex_unlock(&fh
->lock
);
253 /* restore original timeout value */
254 msg
->timeout
= timeout
;
258 /* No, return EAGAIN in non-blocking mode or wait */
259 mutex_unlock(&fh
->lock
);
261 /* Return when in non-blocking mode */
266 /* The user specified a timeout */
267 res
= wait_event_interruptible_timeout(fh
->wait
,
269 msecs_to_jiffies(msg
->timeout
));
275 /* Wait indefinitely */
276 res
= wait_event_interruptible(fh
->wait
,
279 /* Exit on error, otherwise loop to get the new message */
284 static long cec_receive(struct cec_adapter
*adap
, struct cec_fh
*fh
,
285 bool block
, struct cec_msg __user
*parg
)
287 struct cec_msg msg
= {};
290 if (copy_from_user(&msg
, parg
, sizeof(msg
)))
293 err
= cec_receive_msg(fh
, &msg
, block
);
297 if (copy_to_user(parg
, &msg
, sizeof(msg
)))
302 static long cec_dqevent(struct cec_adapter
*adap
, struct cec_fh
*fh
,
303 bool block
, struct cec_event __user
*parg
)
305 struct cec_event_entry
*ev
= NULL
;
311 mutex_lock(&fh
->lock
);
312 while (!fh
->total_queued_events
&& block
) {
313 mutex_unlock(&fh
->lock
);
314 err
= wait_event_interruptible(fh
->wait
,
315 fh
->total_queued_events
);
318 mutex_lock(&fh
->lock
);
321 /* Find the oldest event */
322 for (i
= 0; i
< CEC_NUM_EVENTS
; i
++) {
323 struct cec_event_entry
*entry
=
324 list_first_entry_or_null(&fh
->events
[i
],
325 struct cec_event_entry
, list
);
327 if (entry
&& entry
->ev
.ts
<= ts
) {
340 if (copy_to_user(parg
, &ev
->ev
, sizeof(ev
->ev
)))
342 if (ev_idx
>= CEC_NUM_CORE_EVENTS
)
344 fh
->queued_events
[ev_idx
]--;
345 fh
->total_queued_events
--;
348 mutex_unlock(&fh
->lock
);
352 static long cec_g_mode(struct cec_adapter
*adap
, struct cec_fh
*fh
,
355 u32 mode
= fh
->mode_initiator
| fh
->mode_follower
;
357 if (copy_to_user(parg
, &mode
, sizeof(mode
)))
362 static long cec_s_mode(struct cec_adapter
*adap
, struct cec_fh
*fh
,
368 bool send_pin_event
= false;
371 if (copy_from_user(&mode
, parg
, sizeof(mode
)))
373 if (mode
& ~(CEC_MODE_INITIATOR_MSK
| CEC_MODE_FOLLOWER_MSK
)) {
374 dprintk(1, "%s: invalid mode bits set\n", __func__
);
378 mode_initiator
= mode
& CEC_MODE_INITIATOR_MSK
;
379 mode_follower
= mode
& CEC_MODE_FOLLOWER_MSK
;
381 if (mode_initiator
> CEC_MODE_EXCL_INITIATOR
||
382 mode_follower
> CEC_MODE_MONITOR_ALL
) {
383 dprintk(1, "%s: unknown mode\n", __func__
);
387 if (mode_follower
== CEC_MODE_MONITOR_ALL
&&
388 !(adap
->capabilities
& CEC_CAP_MONITOR_ALL
)) {
389 dprintk(1, "%s: MONITOR_ALL not supported\n", __func__
);
393 if (mode_follower
== CEC_MODE_MONITOR_PIN
&&
394 !(adap
->capabilities
& CEC_CAP_MONITOR_PIN
)) {
395 dprintk(1, "%s: MONITOR_PIN not supported\n", __func__
);
399 /* Follower modes should always be able to send CEC messages */
400 if ((mode_initiator
== CEC_MODE_NO_INITIATOR
||
401 !(adap
->capabilities
& CEC_CAP_TRANSMIT
)) &&
402 mode_follower
>= CEC_MODE_FOLLOWER
&&
403 mode_follower
<= CEC_MODE_EXCL_FOLLOWER_PASSTHRU
) {
404 dprintk(1, "%s: cannot transmit\n", __func__
);
408 /* Monitor modes require CEC_MODE_NO_INITIATOR */
409 if (mode_initiator
&& mode_follower
>= CEC_MODE_MONITOR_PIN
) {
410 dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
415 /* Monitor modes require CAP_NET_ADMIN */
416 if (mode_follower
>= CEC_MODE_MONITOR_PIN
&& !capable(CAP_NET_ADMIN
))
419 mutex_lock(&adap
->lock
);
421 * You can't become exclusive follower if someone else already
424 if ((mode_follower
== CEC_MODE_EXCL_FOLLOWER
||
425 mode_follower
== CEC_MODE_EXCL_FOLLOWER_PASSTHRU
) &&
426 adap
->cec_follower
&& adap
->cec_follower
!= fh
)
429 * You can't become exclusive initiator if someone else already
432 if (mode_initiator
== CEC_MODE_EXCL_INITIATOR
&&
433 adap
->cec_initiator
&& adap
->cec_initiator
!= fh
)
437 bool old_mon_all
= fh
->mode_follower
== CEC_MODE_MONITOR_ALL
;
438 bool new_mon_all
= mode_follower
== CEC_MODE_MONITOR_ALL
;
440 if (old_mon_all
!= new_mon_all
) {
442 err
= cec_monitor_all_cnt_inc(adap
);
444 cec_monitor_all_cnt_dec(adap
);
449 bool old_mon_pin
= fh
->mode_follower
== CEC_MODE_MONITOR_PIN
;
450 bool new_mon_pin
= mode_follower
== CEC_MODE_MONITOR_PIN
;
452 if (old_mon_pin
!= new_mon_pin
) {
453 send_pin_event
= new_mon_pin
;
455 err
= cec_monitor_pin_cnt_inc(adap
);
457 cec_monitor_pin_cnt_dec(adap
);
462 mutex_unlock(&adap
->lock
);
466 if (fh
->mode_follower
== CEC_MODE_FOLLOWER
)
467 adap
->follower_cnt
--;
468 if (mode_follower
== CEC_MODE_FOLLOWER
)
469 adap
->follower_cnt
++;
470 if (send_pin_event
) {
471 struct cec_event ev
= {
472 .flags
= CEC_EVENT_FL_INITIAL_STATE
,
475 ev
.event
= adap
->cec_pin_is_high
? CEC_EVENT_PIN_CEC_HIGH
:
476 CEC_EVENT_PIN_CEC_LOW
;
477 cec_queue_event_fh(fh
, &ev
, 0);
479 if (mode_follower
== CEC_MODE_EXCL_FOLLOWER
||
480 mode_follower
== CEC_MODE_EXCL_FOLLOWER_PASSTHRU
) {
482 mode_follower
== CEC_MODE_EXCL_FOLLOWER_PASSTHRU
;
483 adap
->cec_follower
= fh
;
484 } else if (adap
->cec_follower
== fh
) {
485 adap
->passthrough
= false;
486 adap
->cec_follower
= NULL
;
488 if (mode_initiator
== CEC_MODE_EXCL_INITIATOR
)
489 adap
->cec_initiator
= fh
;
490 else if (adap
->cec_initiator
== fh
)
491 adap
->cec_initiator
= NULL
;
492 fh
->mode_initiator
= mode_initiator
;
493 fh
->mode_follower
= mode_follower
;
494 mutex_unlock(&adap
->lock
);
498 static long cec_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
500 struct cec_fh
*fh
= filp
->private_data
;
501 struct cec_adapter
*adap
= fh
->adap
;
502 bool block
= !(filp
->f_flags
& O_NONBLOCK
);
503 void __user
*parg
= (void __user
*)arg
;
505 if (!cec_is_registered(adap
))
509 case CEC_ADAP_G_CAPS
:
510 return cec_adap_g_caps(adap
, parg
);
512 case CEC_ADAP_G_PHYS_ADDR
:
513 return cec_adap_g_phys_addr(adap
, parg
);
515 case CEC_ADAP_S_PHYS_ADDR
:
516 return cec_adap_s_phys_addr(adap
, fh
, block
, parg
);
518 case CEC_ADAP_G_LOG_ADDRS
:
519 return cec_adap_g_log_addrs(adap
, parg
);
521 case CEC_ADAP_S_LOG_ADDRS
:
522 return cec_adap_s_log_addrs(adap
, fh
, block
, parg
);
524 case CEC_ADAP_G_CONNECTOR_INFO
:
525 return cec_adap_g_connector_info(adap
, parg
);
528 return cec_transmit(adap
, fh
, block
, parg
);
531 return cec_receive(adap
, fh
, block
, parg
);
534 return cec_dqevent(adap
, fh
, block
, parg
);
537 return cec_g_mode(adap
, fh
, parg
);
540 return cec_s_mode(adap
, fh
, parg
);
547 static int cec_open(struct inode
*inode
, struct file
*filp
)
549 struct cec_devnode
*devnode
=
550 container_of(inode
->i_cdev
, struct cec_devnode
, cdev
);
551 struct cec_adapter
*adap
= to_cec_adapter(devnode
);
552 struct cec_fh
*fh
= kzalloc(sizeof(*fh
), GFP_KERNEL
);
554 * Initial events that are automatically sent when the cec device is
557 struct cec_event ev
= {
558 .event
= CEC_EVENT_STATE_CHANGE
,
559 .flags
= CEC_EVENT_FL_INITIAL_STATE
,
567 INIT_LIST_HEAD(&fh
->msgs
);
568 INIT_LIST_HEAD(&fh
->xfer_list
);
569 for (i
= 0; i
< CEC_NUM_EVENTS
; i
++)
570 INIT_LIST_HEAD(&fh
->events
[i
]);
571 mutex_init(&fh
->lock
);
572 init_waitqueue_head(&fh
->wait
);
574 fh
->mode_initiator
= CEC_MODE_INITIATOR
;
577 err
= cec_get_device(devnode
);
583 mutex_lock(&devnode
->lock
);
584 if (list_empty(&devnode
->fhs
) &&
586 adap
->phys_addr
== CEC_PHYS_ADDR_INVALID
) {
587 err
= adap
->ops
->adap_enable(adap
, true);
589 mutex_unlock(&devnode
->lock
);
594 filp
->private_data
= fh
;
596 /* Queue up initial state events */
597 ev
.state_change
.phys_addr
= adap
->phys_addr
;
598 ev
.state_change
.log_addr_mask
= adap
->log_addrs
.log_addr_mask
;
599 ev
.state_change
.have_conn_info
=
600 adap
->conn_info
.type
!= CEC_CONNECTOR_TYPE_NO_CONNECTOR
;
601 cec_queue_event_fh(fh
, &ev
, 0);
602 #ifdef CONFIG_CEC_PIN
603 if (adap
->pin
&& adap
->pin
->ops
->read_hpd
) {
604 err
= adap
->pin
->ops
->read_hpd(adap
);
606 ev
.event
= err
? CEC_EVENT_PIN_HPD_HIGH
:
607 CEC_EVENT_PIN_HPD_LOW
;
608 cec_queue_event_fh(fh
, &ev
, 0);
611 if (adap
->pin
&& adap
->pin
->ops
->read_5v
) {
612 err
= adap
->pin
->ops
->read_5v(adap
);
614 ev
.event
= err
? CEC_EVENT_PIN_5V_HIGH
:
615 CEC_EVENT_PIN_5V_LOW
;
616 cec_queue_event_fh(fh
, &ev
, 0);
621 list_add(&fh
->list
, &devnode
->fhs
);
622 mutex_unlock(&devnode
->lock
);
627 /* Override for the release function */
628 static int cec_release(struct inode
*inode
, struct file
*filp
)
630 struct cec_devnode
*devnode
= cec_devnode_data(filp
);
631 struct cec_adapter
*adap
= to_cec_adapter(devnode
);
632 struct cec_fh
*fh
= filp
->private_data
;
635 mutex_lock(&adap
->lock
);
636 if (adap
->cec_initiator
== fh
)
637 adap
->cec_initiator
= NULL
;
638 if (adap
->cec_follower
== fh
) {
639 adap
->cec_follower
= NULL
;
640 adap
->passthrough
= false;
642 if (fh
->mode_follower
== CEC_MODE_FOLLOWER
)
643 adap
->follower_cnt
--;
644 if (fh
->mode_follower
== CEC_MODE_MONITOR_PIN
)
645 cec_monitor_pin_cnt_dec(adap
);
646 if (fh
->mode_follower
== CEC_MODE_MONITOR_ALL
)
647 cec_monitor_all_cnt_dec(adap
);
648 mutex_unlock(&adap
->lock
);
650 mutex_lock(&devnode
->lock
);
652 if (cec_is_registered(adap
) && list_empty(&devnode
->fhs
) &&
653 !adap
->needs_hpd
&& adap
->phys_addr
== CEC_PHYS_ADDR_INVALID
) {
654 WARN_ON(adap
->ops
->adap_enable(adap
, false));
656 mutex_unlock(&devnode
->lock
);
658 /* Unhook pending transmits from this filehandle. */
659 mutex_lock(&adap
->lock
);
660 while (!list_empty(&fh
->xfer_list
)) {
661 struct cec_data
*data
=
662 list_first_entry(&fh
->xfer_list
, struct cec_data
, xfer_list
);
664 data
->blocking
= false;
666 list_del(&data
->xfer_list
);
668 mutex_unlock(&adap
->lock
);
669 while (!list_empty(&fh
->msgs
)) {
670 struct cec_msg_entry
*entry
=
671 list_first_entry(&fh
->msgs
, struct cec_msg_entry
, list
);
673 list_del(&entry
->list
);
676 for (i
= CEC_NUM_CORE_EVENTS
; i
< CEC_NUM_EVENTS
; i
++) {
677 while (!list_empty(&fh
->events
[i
])) {
678 struct cec_event_entry
*entry
=
679 list_first_entry(&fh
->events
[i
],
680 struct cec_event_entry
, list
);
682 list_del(&entry
->list
);
688 cec_put_device(devnode
);
689 filp
->private_data
= NULL
;
693 const struct file_operations cec_devnode_fops
= {
694 .owner
= THIS_MODULE
,
696 .unlocked_ioctl
= cec_ioctl
,
697 .compat_ioctl
= cec_ioctl
,
698 .release
= cec_release
,