2 * cec-api.c - HDMI Consumer Electronics Control framework - API
4 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/kmod.h>
25 #include <linux/ktime.h>
26 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/uaccess.h>
31 #include <linux/version.h>
35 static inline struct cec_devnode
*cec_devnode_data(struct file
*filp
)
37 struct cec_fh
*fh
= filp
->private_data
;
39 return &fh
->adap
->devnode
;
42 /* CEC file operations */
44 static unsigned int cec_poll(struct file
*filp
,
45 struct poll_table_struct
*poll
)
47 struct cec_devnode
*devnode
= cec_devnode_data(filp
);
48 struct cec_fh
*fh
= filp
->private_data
;
49 struct cec_adapter
*adap
= fh
->adap
;
52 if (!devnode
->registered
)
53 return POLLERR
| POLLHUP
;
54 mutex_lock(&adap
->lock
);
55 if (adap
->is_configured
&&
56 adap
->transmit_queue_sz
< CEC_MAX_MSG_TX_QUEUE_SZ
)
57 res
|= POLLOUT
| POLLWRNORM
;
59 res
|= POLLIN
| POLLRDNORM
;
60 if (fh
->pending_events
)
62 poll_wait(filp
, &fh
->wait
, poll
);
63 mutex_unlock(&adap
->lock
);
67 static bool cec_is_busy(const struct cec_adapter
*adap
,
68 const struct cec_fh
*fh
)
70 bool valid_initiator
= adap
->cec_initiator
&& adap
->cec_initiator
== fh
;
71 bool valid_follower
= adap
->cec_follower
&& adap
->cec_follower
== fh
;
74 * Exclusive initiators and followers can always access the CEC adapter
76 if (valid_initiator
|| valid_follower
)
79 * All others can only access the CEC adapter if there is no
80 * exclusive initiator and they are in INITIATOR mode.
82 return adap
->cec_initiator
||
83 fh
->mode_initiator
== CEC_MODE_NO_INITIATOR
;
86 static long cec_adap_g_caps(struct cec_adapter
*adap
,
87 struct cec_caps __user
*parg
)
89 struct cec_caps caps
= {};
91 strlcpy(caps
.driver
, adap
->devnode
.dev
.parent
->driver
->name
,
93 strlcpy(caps
.name
, adap
->name
, sizeof(caps
.name
));
94 caps
.available_log_addrs
= adap
->available_log_addrs
;
95 caps
.capabilities
= adap
->capabilities
;
96 caps
.version
= LINUX_VERSION_CODE
;
97 if (copy_to_user(parg
, &caps
, sizeof(caps
)))
102 static long cec_adap_g_phys_addr(struct cec_adapter
*adap
,
107 mutex_lock(&adap
->lock
);
108 phys_addr
= adap
->phys_addr
;
109 mutex_unlock(&adap
->lock
);
110 if (copy_to_user(parg
, &phys_addr
, sizeof(phys_addr
)))
115 static long cec_adap_s_phys_addr(struct cec_adapter
*adap
, struct cec_fh
*fh
,
116 bool block
, __u16 __user
*parg
)
121 if (!(adap
->capabilities
& CEC_CAP_PHYS_ADDR
))
123 if (copy_from_user(&phys_addr
, parg
, sizeof(phys_addr
)))
126 err
= cec_phys_addr_validate(phys_addr
, NULL
, NULL
);
129 mutex_lock(&adap
->lock
);
130 if (cec_is_busy(adap
, fh
))
133 __cec_s_phys_addr(adap
, phys_addr
, block
);
134 mutex_unlock(&adap
->lock
);
138 static long cec_adap_g_log_addrs(struct cec_adapter
*adap
,
139 struct cec_log_addrs __user
*parg
)
141 struct cec_log_addrs log_addrs
;
143 mutex_lock(&adap
->lock
);
144 log_addrs
= adap
->log_addrs
;
145 if (!adap
->is_configured
)
146 memset(log_addrs
.log_addr
, CEC_LOG_ADDR_INVALID
,
147 sizeof(log_addrs
.log_addr
));
148 mutex_unlock(&adap
->lock
);
150 if (copy_to_user(parg
, &log_addrs
, sizeof(log_addrs
)))
155 static long cec_adap_s_log_addrs(struct cec_adapter
*adap
, struct cec_fh
*fh
,
156 bool block
, struct cec_log_addrs __user
*parg
)
158 struct cec_log_addrs log_addrs
;
161 if (!(adap
->capabilities
& CEC_CAP_LOG_ADDRS
))
163 if (copy_from_user(&log_addrs
, parg
, sizeof(log_addrs
)))
165 log_addrs
.flags
&= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK
|
166 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU
|
167 CEC_LOG_ADDRS_FL_CDC_ONLY
;
168 mutex_lock(&adap
->lock
);
169 if (!adap
->is_configuring
&&
170 (!log_addrs
.num_log_addrs
|| !adap
->is_configured
) &&
171 !cec_is_busy(adap
, fh
)) {
172 err
= __cec_s_log_addrs(adap
, &log_addrs
, block
);
174 log_addrs
= adap
->log_addrs
;
176 mutex_unlock(&adap
->lock
);
179 if (copy_to_user(parg
, &log_addrs
, sizeof(log_addrs
)))
184 static long cec_transmit(struct cec_adapter
*adap
, struct cec_fh
*fh
,
185 bool block
, struct cec_msg __user
*parg
)
187 struct cec_msg msg
= {};
190 if (!(adap
->capabilities
& CEC_CAP_TRANSMIT
))
192 if (copy_from_user(&msg
, parg
, sizeof(msg
)))
195 /* A CDC-Only device can only send CDC messages */
196 if ((adap
->log_addrs
.flags
& CEC_LOG_ADDRS_FL_CDC_ONLY
) &&
197 (msg
.len
== 1 || msg
.msg
[1] != CEC_MSG_CDC_MESSAGE
))
200 mutex_lock(&adap
->lock
);
201 if (!adap
->is_configured
)
203 else if (cec_is_busy(adap
, fh
))
206 err
= cec_transmit_msg_fh(adap
, &msg
, fh
, block
);
207 mutex_unlock(&adap
->lock
);
210 if (copy_to_user(parg
, &msg
, sizeof(msg
)))
215 /* Called by CEC_RECEIVE: wait for a message to arrive */
216 static int cec_receive_msg(struct cec_fh
*fh
, struct cec_msg
*msg
, bool block
)
218 u32 timeout
= msg
->timeout
;
222 mutex_lock(&fh
->lock
);
223 /* Are there received messages queued up? */
224 if (fh
->queued_msgs
) {
225 /* Yes, return the first one */
226 struct cec_msg_entry
*entry
=
227 list_first_entry(&fh
->msgs
,
228 struct cec_msg_entry
, list
);
230 list_del(&entry
->list
);
234 mutex_unlock(&fh
->lock
);
235 /* restore original timeout value */
236 msg
->timeout
= timeout
;
240 /* No, return EAGAIN in non-blocking mode or wait */
241 mutex_unlock(&fh
->lock
);
243 /* Return when in non-blocking mode */
248 /* The user specified a timeout */
249 res
= wait_event_interruptible_timeout(fh
->wait
,
251 msecs_to_jiffies(msg
->timeout
));
257 /* Wait indefinitely */
258 res
= wait_event_interruptible(fh
->wait
,
261 /* Exit on error, otherwise loop to get the new message */
266 static long cec_receive(struct cec_adapter
*adap
, struct cec_fh
*fh
,
267 bool block
, struct cec_msg __user
*parg
)
269 struct cec_msg msg
= {};
272 if (copy_from_user(&msg
, parg
, sizeof(msg
)))
274 mutex_lock(&adap
->lock
);
275 if (!adap
->is_configured
&& fh
->mode_follower
< CEC_MODE_MONITOR
)
277 mutex_unlock(&adap
->lock
);
281 err
= cec_receive_msg(fh
, &msg
, block
);
285 if (copy_to_user(parg
, &msg
, sizeof(msg
)))
290 static long cec_dqevent(struct cec_adapter
*adap
, struct cec_fh
*fh
,
291 bool block
, struct cec_event __user
*parg
)
293 struct cec_event
*ev
= NULL
;
298 mutex_lock(&fh
->lock
);
299 while (!fh
->pending_events
&& block
) {
300 mutex_unlock(&fh
->lock
);
301 err
= wait_event_interruptible(fh
->wait
, fh
->pending_events
);
304 mutex_lock(&fh
->lock
);
307 /* Find the oldest event */
308 for (i
= 0; i
< CEC_NUM_EVENTS
; i
++) {
309 if (fh
->pending_events
& (1 << (i
+ 1)) &&
310 fh
->events
[i
].ts
<= ts
) {
320 if (copy_to_user(parg
, ev
, sizeof(*ev
))) {
325 fh
->pending_events
&= ~(1 << ev
->event
);
328 mutex_unlock(&fh
->lock
);
332 static long cec_g_mode(struct cec_adapter
*adap
, struct cec_fh
*fh
,
335 u32 mode
= fh
->mode_initiator
| fh
->mode_follower
;
337 if (copy_to_user(parg
, &mode
, sizeof(mode
)))
342 static long cec_s_mode(struct cec_adapter
*adap
, struct cec_fh
*fh
,
350 if (copy_from_user(&mode
, parg
, sizeof(mode
)))
352 if (mode
& ~(CEC_MODE_INITIATOR_MSK
| CEC_MODE_FOLLOWER_MSK
))
355 mode_initiator
= mode
& CEC_MODE_INITIATOR_MSK
;
356 mode_follower
= mode
& CEC_MODE_FOLLOWER_MSK
;
358 if (mode_initiator
> CEC_MODE_EXCL_INITIATOR
||
359 mode_follower
> CEC_MODE_MONITOR_ALL
)
362 if (mode_follower
== CEC_MODE_MONITOR_ALL
&&
363 !(adap
->capabilities
& CEC_CAP_MONITOR_ALL
))
366 /* Follower modes should always be able to send CEC messages */
367 if ((mode_initiator
== CEC_MODE_NO_INITIATOR
||
368 !(adap
->capabilities
& CEC_CAP_TRANSMIT
)) &&
369 mode_follower
>= CEC_MODE_FOLLOWER
&&
370 mode_follower
<= CEC_MODE_EXCL_FOLLOWER_PASSTHRU
)
373 /* Monitor modes require CEC_MODE_NO_INITIATOR */
374 if (mode_initiator
&& mode_follower
>= CEC_MODE_MONITOR
)
377 /* Monitor modes require CAP_NET_ADMIN */
378 if (mode_follower
>= CEC_MODE_MONITOR
&& !capable(CAP_NET_ADMIN
))
381 mutex_lock(&adap
->lock
);
383 * You can't become exclusive follower if someone else already
386 if ((mode_follower
== CEC_MODE_EXCL_FOLLOWER
||
387 mode_follower
== CEC_MODE_EXCL_FOLLOWER_PASSTHRU
) &&
388 adap
->cec_follower
&& adap
->cec_follower
!= fh
)
391 * You can't become exclusive initiator if someone else already
394 if (mode_initiator
== CEC_MODE_EXCL_INITIATOR
&&
395 adap
->cec_initiator
&& adap
->cec_initiator
!= fh
)
399 bool old_mon_all
= fh
->mode_follower
== CEC_MODE_MONITOR_ALL
;
400 bool new_mon_all
= mode_follower
== CEC_MODE_MONITOR_ALL
;
402 if (old_mon_all
!= new_mon_all
) {
404 err
= cec_monitor_all_cnt_inc(adap
);
406 cec_monitor_all_cnt_dec(adap
);
411 mutex_unlock(&adap
->lock
);
415 if (fh
->mode_follower
== CEC_MODE_FOLLOWER
)
416 adap
->follower_cnt
--;
417 if (mode_follower
== CEC_MODE_FOLLOWER
)
418 adap
->follower_cnt
++;
419 if (mode_follower
== CEC_MODE_EXCL_FOLLOWER
||
420 mode_follower
== CEC_MODE_EXCL_FOLLOWER_PASSTHRU
) {
422 mode_follower
== CEC_MODE_EXCL_FOLLOWER_PASSTHRU
;
423 adap
->cec_follower
= fh
;
424 } else if (adap
->cec_follower
== fh
) {
425 adap
->passthrough
= false;
426 adap
->cec_follower
= NULL
;
428 if (mode_initiator
== CEC_MODE_EXCL_INITIATOR
)
429 adap
->cec_initiator
= fh
;
430 else if (adap
->cec_initiator
== fh
)
431 adap
->cec_initiator
= NULL
;
432 fh
->mode_initiator
= mode_initiator
;
433 fh
->mode_follower
= mode_follower
;
434 mutex_unlock(&adap
->lock
);
438 static long cec_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
440 struct cec_devnode
*devnode
= cec_devnode_data(filp
);
441 struct cec_fh
*fh
= filp
->private_data
;
442 struct cec_adapter
*adap
= fh
->adap
;
443 bool block
= !(filp
->f_flags
& O_NONBLOCK
);
444 void __user
*parg
= (void __user
*)arg
;
446 if (!devnode
->registered
)
450 case CEC_ADAP_G_CAPS
:
451 return cec_adap_g_caps(adap
, parg
);
453 case CEC_ADAP_G_PHYS_ADDR
:
454 return cec_adap_g_phys_addr(adap
, parg
);
456 case CEC_ADAP_S_PHYS_ADDR
:
457 return cec_adap_s_phys_addr(adap
, fh
, block
, parg
);
459 case CEC_ADAP_G_LOG_ADDRS
:
460 return cec_adap_g_log_addrs(adap
, parg
);
462 case CEC_ADAP_S_LOG_ADDRS
:
463 return cec_adap_s_log_addrs(adap
, fh
, block
, parg
);
466 return cec_transmit(adap
, fh
, block
, parg
);
469 return cec_receive(adap
, fh
, block
, parg
);
472 return cec_dqevent(adap
, fh
, block
, parg
);
475 return cec_g_mode(adap
, fh
, parg
);
478 return cec_s_mode(adap
, fh
, parg
);
485 static int cec_open(struct inode
*inode
, struct file
*filp
)
487 struct cec_devnode
*devnode
=
488 container_of(inode
->i_cdev
, struct cec_devnode
, cdev
);
489 struct cec_adapter
*adap
= to_cec_adapter(devnode
);
490 struct cec_fh
*fh
= kzalloc(sizeof(*fh
), GFP_KERNEL
);
492 * Initial events that are automatically sent when the cec device is
495 struct cec_event ev_state
= {
496 .event
= CEC_EVENT_STATE_CHANGE
,
497 .flags
= CEC_EVENT_FL_INITIAL_STATE
,
504 INIT_LIST_HEAD(&fh
->msgs
);
505 INIT_LIST_HEAD(&fh
->xfer_list
);
506 mutex_init(&fh
->lock
);
507 init_waitqueue_head(&fh
->wait
);
509 fh
->mode_initiator
= CEC_MODE_INITIATOR
;
512 err
= cec_get_device(devnode
);
518 filp
->private_data
= fh
;
520 mutex_lock(&devnode
->lock
);
521 /* Queue up initial state events */
522 ev_state
.state_change
.phys_addr
= adap
->phys_addr
;
523 ev_state
.state_change
.log_addr_mask
= adap
->log_addrs
.log_addr_mask
;
524 cec_queue_event_fh(fh
, &ev_state
, 0);
526 list_add(&fh
->list
, &devnode
->fhs
);
527 mutex_unlock(&devnode
->lock
);
532 /* Override for the release function */
533 static int cec_release(struct inode
*inode
, struct file
*filp
)
535 struct cec_devnode
*devnode
= cec_devnode_data(filp
);
536 struct cec_adapter
*adap
= to_cec_adapter(devnode
);
537 struct cec_fh
*fh
= filp
->private_data
;
539 mutex_lock(&adap
->lock
);
540 if (adap
->cec_initiator
== fh
)
541 adap
->cec_initiator
= NULL
;
542 if (adap
->cec_follower
== fh
) {
543 adap
->cec_follower
= NULL
;
544 adap
->passthrough
= false;
546 if (fh
->mode_follower
== CEC_MODE_FOLLOWER
)
547 adap
->follower_cnt
--;
548 if (fh
->mode_follower
== CEC_MODE_MONITOR_ALL
)
549 cec_monitor_all_cnt_dec(adap
);
550 mutex_unlock(&adap
->lock
);
552 mutex_lock(&devnode
->lock
);
554 mutex_unlock(&devnode
->lock
);
556 /* Unhook pending transmits from this filehandle. */
557 mutex_lock(&adap
->lock
);
558 while (!list_empty(&fh
->xfer_list
)) {
559 struct cec_data
*data
=
560 list_first_entry(&fh
->xfer_list
, struct cec_data
, xfer_list
);
562 data
->blocking
= false;
564 list_del(&data
->xfer_list
);
566 mutex_unlock(&adap
->lock
);
567 while (!list_empty(&fh
->msgs
)) {
568 struct cec_msg_entry
*entry
=
569 list_first_entry(&fh
->msgs
, struct cec_msg_entry
, list
);
571 list_del(&entry
->list
);
576 cec_put_device(devnode
);
577 filp
->private_data
= NULL
;
581 const struct file_operations cec_devnode_fops
= {
582 .owner
= THIS_MODULE
,
584 .unlocked_ioctl
= cec_ioctl
,
585 .release
= cec_release
,