1 // SPDX-License-Identifier: GPL-2.0
2 // rc-ir-raw.c - handle IR pulse/space events
4 // Copyright (C) 2010 by Mauro Carvalho Chehab
6 #include <linux/export.h>
7 #include <linux/kthread.h>
8 #include <linux/mutex.h>
9 #include <linux/kmod.h>
10 #include <linux/sched.h>
11 #include "rc-core-priv.h"
13 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
14 static LIST_HEAD(ir_raw_client_list
);
16 /* Used to handle IR raw handler extensions */
17 DEFINE_MUTEX(ir_raw_handler_lock
);
18 static LIST_HEAD(ir_raw_handler_list
);
19 static atomic64_t available_protocols
= ATOMIC64_INIT(0);
21 static int ir_raw_event_thread(void *data
)
23 struct ir_raw_event ev
;
24 struct ir_raw_handler
*handler
;
25 struct ir_raw_event_ctrl
*raw
= data
;
26 struct rc_dev
*dev
= raw
->dev
;
29 mutex_lock(&ir_raw_handler_lock
);
30 while (kfifo_out(&raw
->kfifo
, &ev
, 1)) {
31 if (is_timing_event(ev
)) {
33 dev_warn_once(&dev
->dev
, "nonsensical timing event of duration 0");
34 if (is_timing_event(raw
->prev_ev
) &&
35 !is_transition(&ev
, &raw
->prev_ev
))
36 dev_warn_once(&dev
->dev
, "two consecutive events of type %s",
38 if (raw
->prev_ev
.reset
&& ev
.pulse
== 0)
39 dev_warn_once(&dev
->dev
, "timing event after reset should be pulse");
41 list_for_each_entry(handler
, &ir_raw_handler_list
, list
)
42 if (dev
->enabled_protocols
&
43 handler
->protocols
|| !handler
->protocols
)
44 handler
->decode(dev
, ev
);
45 ir_lirc_raw_event(dev
, ev
);
48 mutex_unlock(&ir_raw_handler_lock
);
50 set_current_state(TASK_INTERRUPTIBLE
);
52 if (kthread_should_stop()) {
53 __set_current_state(TASK_RUNNING
);
55 } else if (!kfifo_is_empty(&raw
->kfifo
))
56 set_current_state(TASK_RUNNING
);
65 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
66 * @dev: the struct rc_dev device descriptor
67 * @ev: the struct ir_raw_event descriptor of the pulse/space
69 * This routine (which may be called from an interrupt context) stores a
70 * pulse/space duration for the raw ir decoding state machines. Pulses are
71 * signalled as positive values and spaces as negative values. A zero value
72 * will reset the decoding state machines.
74 int ir_raw_event_store(struct rc_dev
*dev
, struct ir_raw_event
*ev
)
79 dev_dbg(&dev
->dev
, "sample: (%05dus %s)\n",
80 TO_US(ev
->duration
), TO_STR(ev
->pulse
));
82 if (!kfifo_put(&dev
->raw
->kfifo
, *ev
)) {
83 dev_err(&dev
->dev
, "IR event FIFO is full!\n");
89 EXPORT_SYMBOL_GPL(ir_raw_event_store
);
92 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
93 * @dev: the struct rc_dev device descriptor
94 * @pulse: true for pulse, false for space
96 * This routine (which may be called from an interrupt context) is used to
97 * store the beginning of an ir pulse or space (or the start/end of ir
98 * reception) for the raw ir decoding state machines. This is used by
99 * hardware which does not provide durations directly but only interrupts
100 * (or similar events) on state change.
102 int ir_raw_event_store_edge(struct rc_dev
*dev
, bool pulse
)
105 struct ir_raw_event ev
= {};
111 ev
.duration
= ktime_to_ns(ktime_sub(now
, dev
->raw
->last_event
));
114 return ir_raw_event_store_with_timeout(dev
, &ev
);
116 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge
);
119 * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
120 * ir decoders, schedule decoding and
122 * @dev: the struct rc_dev device descriptor
123 * @ev: the struct ir_raw_event descriptor of the pulse/space
125 * This routine (which may be called from an interrupt context) stores a
126 * pulse/space duration for the raw ir decoding state machines, schedules
127 * decoding and generates a timeout.
129 int ir_raw_event_store_with_timeout(struct rc_dev
*dev
, struct ir_raw_event
*ev
)
139 spin_lock(&dev
->raw
->edge_spinlock
);
140 rc
= ir_raw_event_store(dev
, ev
);
142 dev
->raw
->last_event
= now
;
144 /* timer could be set to timeout (125ms by default) */
145 if (!timer_pending(&dev
->raw
->edge_handle
) ||
146 time_after(dev
->raw
->edge_handle
.expires
,
147 jiffies
+ msecs_to_jiffies(15))) {
148 mod_timer(&dev
->raw
->edge_handle
,
149 jiffies
+ msecs_to_jiffies(15));
151 spin_unlock(&dev
->raw
->edge_spinlock
);
155 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout
);
158 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
159 * @dev: the struct rc_dev device descriptor
160 * @ev: the event that has occurred
162 * This routine (which may be called from an interrupt context) works
163 * in similar manner to ir_raw_event_store_edge.
164 * This routine is intended for devices with limited internal buffer
165 * It automerges samples of same type, and handles timeouts. Returns non-zero
166 * if the event was added, and zero if the event was ignored due to idle
169 int ir_raw_event_store_with_filter(struct rc_dev
*dev
, struct ir_raw_event
*ev
)
174 /* Ignore spaces in idle mode */
175 if (dev
->idle
&& !ev
->pulse
)
178 ir_raw_event_set_idle(dev
, false);
180 if (!dev
->raw
->this_ev
.duration
)
181 dev
->raw
->this_ev
= *ev
;
182 else if (ev
->pulse
== dev
->raw
->this_ev
.pulse
)
183 dev
->raw
->this_ev
.duration
+= ev
->duration
;
185 ir_raw_event_store(dev
, &dev
->raw
->this_ev
);
186 dev
->raw
->this_ev
= *ev
;
189 /* Enter idle mode if necessary */
190 if (!ev
->pulse
&& dev
->timeout
&&
191 dev
->raw
->this_ev
.duration
>= dev
->timeout
)
192 ir_raw_event_set_idle(dev
, true);
196 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter
);
199 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
200 * @dev: the struct rc_dev device descriptor
201 * @idle: whether the device is idle or not
203 void ir_raw_event_set_idle(struct rc_dev
*dev
, bool idle
)
208 dev_dbg(&dev
->dev
, "%s idle mode\n", idle
? "enter" : "leave");
211 dev
->raw
->this_ev
.timeout
= true;
212 ir_raw_event_store(dev
, &dev
->raw
->this_ev
);
213 dev
->raw
->this_ev
= (struct ir_raw_event
) {};
217 dev
->s_idle(dev
, idle
);
221 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle
);
224 * ir_raw_event_handle() - schedules the decoding of stored ir data
225 * @dev: the struct rc_dev device descriptor
227 * This routine will tell rc-core to start decoding stored ir data.
229 void ir_raw_event_handle(struct rc_dev
*dev
)
231 if (!dev
->raw
|| !dev
->raw
->thread
)
234 wake_up_process(dev
->raw
->thread
);
236 EXPORT_SYMBOL_GPL(ir_raw_event_handle
);
238 /* used internally by the sysfs interface */
240 ir_raw_get_allowed_protocols(void)
242 return atomic64_read(&available_protocols
);
245 static int change_protocol(struct rc_dev
*dev
, u64
*rc_proto
)
247 struct ir_raw_handler
*handler
;
250 mutex_lock(&ir_raw_handler_lock
);
251 list_for_each_entry(handler
, &ir_raw_handler_list
, list
) {
252 if (!(dev
->enabled_protocols
& handler
->protocols
) &&
253 (*rc_proto
& handler
->protocols
) && handler
->raw_register
)
254 handler
->raw_register(dev
);
256 if ((dev
->enabled_protocols
& handler
->protocols
) &&
257 !(*rc_proto
& handler
->protocols
) &&
258 handler
->raw_unregister
)
259 handler
->raw_unregister(dev
);
261 mutex_unlock(&ir_raw_handler_lock
);
263 if (!dev
->max_timeout
)
266 mutex_lock(&ir_raw_handler_lock
);
267 list_for_each_entry(handler
, &ir_raw_handler_list
, list
) {
268 if (handler
->protocols
& *rc_proto
) {
269 if (timeout
< handler
->min_timeout
)
270 timeout
= handler
->min_timeout
;
273 mutex_unlock(&ir_raw_handler_lock
);
276 timeout
= IR_DEFAULT_TIMEOUT
;
278 timeout
+= MS_TO_NS(10);
280 if (timeout
< dev
->min_timeout
)
281 timeout
= dev
->min_timeout
;
282 else if (timeout
> dev
->max_timeout
)
283 timeout
= dev
->max_timeout
;
286 dev
->s_timeout(dev
, timeout
);
288 dev
->timeout
= timeout
;
293 static void ir_raw_disable_protocols(struct rc_dev
*dev
, u64 protocols
)
295 mutex_lock(&dev
->lock
);
296 dev
->enabled_protocols
&= ~protocols
;
297 mutex_unlock(&dev
->lock
);
301 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
302 * @ev: Pointer to pointer to next free event. *@ev is incremented for
303 * each raw event filled.
304 * @max: Maximum number of raw events to fill.
305 * @timings: Manchester modulation timings.
306 * @n: Number of bits of data.
307 * @data: Data bits to encode.
309 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
310 * modulation with the timing characteristics described by @timings, writing up
311 * to @max raw IR events using the *@ev pointer.
313 * Returns: 0 on success.
314 * -ENOBUFS if there isn't enough space in the array to fit the
315 * full encoded data. In this case all @max events will have been
318 int ir_raw_gen_manchester(struct ir_raw_event
**ev
, unsigned int max
,
319 const struct ir_raw_timings_manchester
*timings
,
320 unsigned int n
, u64 data
)
328 if (timings
->leader_pulse
) {
331 init_ir_raw_event_duration((*ev
), 1, timings
->leader_pulse
);
332 if (timings
->leader_space
) {
335 init_ir_raw_event_duration(++(*ev
), 0,
336 timings
->leader_space
);
339 /* continue existing signal */
342 /* from here on *ev will point to the last event rather than the next */
345 need_pulse
= !(data
& i
);
347 need_pulse
= !need_pulse
;
348 if (need_pulse
== !!(*ev
)->pulse
) {
349 (*ev
)->duration
+= timings
->clock
;
353 init_ir_raw_event_duration(++(*ev
), need_pulse
,
359 init_ir_raw_event_duration(++(*ev
), !need_pulse
,
364 if (timings
->trailer_space
) {
366 (*ev
)->duration
+= timings
->trailer_space
;
370 init_ir_raw_event_duration(++(*ev
), 0,
371 timings
->trailer_space
);
376 /* point to the next event rather than last event before returning */
380 EXPORT_SYMBOL(ir_raw_gen_manchester
);
383 * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
384 * @ev: Pointer to pointer to next free event. *@ev is incremented for
385 * each raw event filled.
386 * @max: Maximum number of raw events to fill.
387 * @timings: Pulse distance modulation timings.
388 * @n: Number of bits of data.
389 * @data: Data bits to encode.
391 * Encodes the @n least significant bits of @data using pulse-distance
392 * modulation with the timing characteristics described by @timings, writing up
393 * to @max raw IR events using the *@ev pointer.
395 * Returns: 0 on success.
396 * -ENOBUFS if there isn't enough space in the array to fit the
397 * full encoded data. In this case all @max events will have been
400 int ir_raw_gen_pd(struct ir_raw_event
**ev
, unsigned int max
,
401 const struct ir_raw_timings_pd
*timings
,
402 unsigned int n
, u64 data
)
408 if (timings
->header_pulse
) {
409 ret
= ir_raw_gen_pulse_space(ev
, &max
, timings
->header_pulse
,
410 timings
->header_space
);
415 if (timings
->msb_first
) {
416 for (i
= n
- 1; i
>= 0; --i
) {
417 space
= timings
->bit_space
[(data
>> i
) & 1];
418 ret
= ir_raw_gen_pulse_space(ev
, &max
,
425 for (i
= 0; i
< n
; ++i
, data
>>= 1) {
426 space
= timings
->bit_space
[data
& 1];
427 ret
= ir_raw_gen_pulse_space(ev
, &max
,
435 ret
= ir_raw_gen_pulse_space(ev
, &max
, timings
->trailer_pulse
,
436 timings
->trailer_space
);
439 EXPORT_SYMBOL(ir_raw_gen_pd
);
442 * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
443 * @ev: Pointer to pointer to next free event. *@ev is incremented for
444 * each raw event filled.
445 * @max: Maximum number of raw events to fill.
446 * @timings: Pulse distance modulation timings.
447 * @n: Number of bits of data.
448 * @data: Data bits to encode.
450 * Encodes the @n least significant bits of @data using space-distance
451 * modulation with the timing characteristics described by @timings, writing up
452 * to @max raw IR events using the *@ev pointer.
454 * Returns: 0 on success.
455 * -ENOBUFS if there isn't enough space in the array to fit the
456 * full encoded data. In this case all @max events will have been
459 int ir_raw_gen_pl(struct ir_raw_event
**ev
, unsigned int max
,
460 const struct ir_raw_timings_pl
*timings
,
461 unsigned int n
, u64 data
)
470 init_ir_raw_event_duration((*ev
)++, 1, timings
->header_pulse
);
472 if (timings
->msb_first
) {
473 for (i
= n
- 1; i
>= 0; --i
) {
476 init_ir_raw_event_duration((*ev
)++, 0,
480 pulse
= timings
->bit_pulse
[(data
>> i
) & 1];
481 init_ir_raw_event_duration((*ev
)++, 1, pulse
);
484 for (i
= 0; i
< n
; ++i
, data
>>= 1) {
487 init_ir_raw_event_duration((*ev
)++, 0,
491 pulse
= timings
->bit_pulse
[data
& 1];
492 init_ir_raw_event_duration((*ev
)++, 1, pulse
);
499 init_ir_raw_event_duration((*ev
)++, 0, timings
->trailer_space
);
503 EXPORT_SYMBOL(ir_raw_gen_pl
);
506 * ir_raw_encode_scancode() - Encode a scancode as raw events
508 * @protocol: protocol
509 * @scancode: scancode filter describing a single scancode
510 * @events: array of raw events to write into
511 * @max: max number of raw events
513 * Attempts to encode the scancode as raw events.
515 * Returns: The number of events written.
516 * -ENOBUFS if there isn't enough space in the array to fit the
517 * encoding. In this case all @max events will have been written.
518 * -EINVAL if the scancode is ambiguous or invalid, or if no
519 * compatible encoder was found.
521 int ir_raw_encode_scancode(enum rc_proto protocol
, u32 scancode
,
522 struct ir_raw_event
*events
, unsigned int max
)
524 struct ir_raw_handler
*handler
;
526 u64 mask
= 1ULL << protocol
;
528 ir_raw_load_modules(&mask
);
530 mutex_lock(&ir_raw_handler_lock
);
531 list_for_each_entry(handler
, &ir_raw_handler_list
, list
) {
532 if (handler
->protocols
& mask
&& handler
->encode
) {
533 ret
= handler
->encode(protocol
, scancode
, events
, max
);
534 if (ret
>= 0 || ret
== -ENOBUFS
)
538 mutex_unlock(&ir_raw_handler_lock
);
542 EXPORT_SYMBOL(ir_raw_encode_scancode
);
545 * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
549 * This callback is armed by ir_raw_event_store_edge(). It does two things:
550 * first of all, rather than calling ir_raw_event_handle() for each
551 * edge and waking up the rc thread, 15 ms after the first edge
552 * ir_raw_event_handle() is called. Secondly, generate a timeout event
553 * no more IR is received after the rc_dev timeout.
555 static void ir_raw_edge_handle(struct timer_list
*t
)
557 struct ir_raw_event_ctrl
*raw
= from_timer(raw
, t
, edge_handle
);
558 struct rc_dev
*dev
= raw
->dev
;
562 spin_lock_irqsave(&dev
->raw
->edge_spinlock
, flags
);
563 interval
= ktime_sub(ktime_get(), dev
->raw
->last_event
);
564 if (ktime_to_ns(interval
) >= dev
->timeout
) {
565 struct ir_raw_event ev
= {
567 .duration
= ktime_to_ns(interval
)
570 ir_raw_event_store(dev
, &ev
);
572 mod_timer(&dev
->raw
->edge_handle
,
573 jiffies
+ nsecs_to_jiffies(dev
->timeout
-
574 ktime_to_ns(interval
)));
576 spin_unlock_irqrestore(&dev
->raw
->edge_spinlock
, flags
);
578 ir_raw_event_handle(dev
);
582 * ir_raw_encode_carrier() - Get carrier used for protocol
584 * @protocol: protocol
586 * Attempts to find the carrier for the specified protocol
588 * Returns: The carrier in Hz
589 * -EINVAL if the protocol is invalid, or if no
590 * compatible encoder was found.
592 int ir_raw_encode_carrier(enum rc_proto protocol
)
594 struct ir_raw_handler
*handler
;
596 u64 mask
= BIT_ULL(protocol
);
598 mutex_lock(&ir_raw_handler_lock
);
599 list_for_each_entry(handler
, &ir_raw_handler_list
, list
) {
600 if (handler
->protocols
& mask
&& handler
->encode
) {
601 ret
= handler
->carrier
;
605 mutex_unlock(&ir_raw_handler_lock
);
609 EXPORT_SYMBOL(ir_raw_encode_carrier
);
612 * Used to (un)register raw event clients
614 int ir_raw_event_prepare(struct rc_dev
*dev
)
619 dev
->raw
= kzalloc(sizeof(*dev
->raw
), GFP_KERNEL
);
624 dev
->change_protocol
= change_protocol
;
626 spin_lock_init(&dev
->raw
->edge_spinlock
);
627 timer_setup(&dev
->raw
->edge_handle
, ir_raw_edge_handle
, 0);
628 INIT_KFIFO(dev
->raw
->kfifo
);
633 int ir_raw_event_register(struct rc_dev
*dev
)
635 struct task_struct
*thread
;
637 thread
= kthread_run(ir_raw_event_thread
, dev
->raw
, "rc%u", dev
->minor
);
639 return PTR_ERR(thread
);
641 dev
->raw
->thread
= thread
;
643 mutex_lock(&ir_raw_handler_lock
);
644 list_add_tail(&dev
->raw
->list
, &ir_raw_client_list
);
645 mutex_unlock(&ir_raw_handler_lock
);
650 void ir_raw_event_free(struct rc_dev
*dev
)
659 void ir_raw_event_unregister(struct rc_dev
*dev
)
661 struct ir_raw_handler
*handler
;
663 if (!dev
|| !dev
->raw
)
666 kthread_stop(dev
->raw
->thread
);
667 del_timer_sync(&dev
->raw
->edge_handle
);
669 mutex_lock(&ir_raw_handler_lock
);
670 list_del(&dev
->raw
->list
);
671 list_for_each_entry(handler
, &ir_raw_handler_list
, list
)
672 if (handler
->raw_unregister
&&
673 (handler
->protocols
& dev
->enabled_protocols
))
674 handler
->raw_unregister(dev
);
678 ir_raw_event_free(dev
);
681 * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
682 * ensure that the raw member is null on unlock; this is how
683 * "device gone" is checked.
685 mutex_unlock(&ir_raw_handler_lock
);
689 * Extension interface - used to register the IR decoders
692 int ir_raw_handler_register(struct ir_raw_handler
*ir_raw_handler
)
694 mutex_lock(&ir_raw_handler_lock
);
695 list_add_tail(&ir_raw_handler
->list
, &ir_raw_handler_list
);
696 atomic64_or(ir_raw_handler
->protocols
, &available_protocols
);
697 mutex_unlock(&ir_raw_handler_lock
);
701 EXPORT_SYMBOL(ir_raw_handler_register
);
703 void ir_raw_handler_unregister(struct ir_raw_handler
*ir_raw_handler
)
705 struct ir_raw_event_ctrl
*raw
;
706 u64 protocols
= ir_raw_handler
->protocols
;
708 mutex_lock(&ir_raw_handler_lock
);
709 list_del(&ir_raw_handler
->list
);
710 list_for_each_entry(raw
, &ir_raw_client_list
, list
) {
711 if (ir_raw_handler
->raw_unregister
&&
712 (raw
->dev
->enabled_protocols
& protocols
))
713 ir_raw_handler
->raw_unregister(raw
->dev
);
714 ir_raw_disable_protocols(raw
->dev
, protocols
);
716 atomic64_andnot(protocols
, &available_protocols
);
717 mutex_unlock(&ir_raw_handler_lock
);
719 EXPORT_SYMBOL(ir_raw_handler_unregister
);