x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / drivers / media / pci / pt1 / pt1.c
blobb6b1a8d20d86ba17bca293a258c4579f078d5911
1 /*
2 * driver for Earthsoft PT1/PT2
4 * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
6 * based on pt1dvr - http://pt1dvr.sourceforge.jp/
7 * by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/kernel.h>
21 #include <linux/sched/signal.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/pci.h>
26 #include <linux/kthread.h>
27 #include <linux/freezer.h>
28 #include <linux/ratelimit.h>
30 #include "dvbdev.h"
31 #include "dvb_demux.h"
32 #include "dmxdev.h"
33 #include "dvb_net.h"
34 #include "dvb_frontend.h"
36 #include "va1j5jf8007t.h"
37 #include "va1j5jf8007s.h"
39 #define DRIVER_NAME "earth-pt1"
41 #define PT1_PAGE_SHIFT 12
42 #define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
43 #define PT1_NR_UPACKETS 1024
44 #define PT1_NR_BUFS 511
46 struct pt1_buffer_page {
47 __le32 upackets[PT1_NR_UPACKETS];
50 struct pt1_table_page {
51 __le32 next_pfn;
52 __le32 buf_pfns[PT1_NR_BUFS];
55 struct pt1_buffer {
56 struct pt1_buffer_page *page;
57 dma_addr_t addr;
60 struct pt1_table {
61 struct pt1_table_page *page;
62 dma_addr_t addr;
63 struct pt1_buffer bufs[PT1_NR_BUFS];
66 #define PT1_NR_ADAPS 4
68 struct pt1_adapter;
70 struct pt1 {
71 struct pci_dev *pdev;
72 void __iomem *regs;
73 struct i2c_adapter i2c_adap;
74 int i2c_running;
75 struct pt1_adapter *adaps[PT1_NR_ADAPS];
76 struct pt1_table *tables;
77 struct task_struct *kthread;
78 int table_index;
79 int buf_index;
81 struct mutex lock;
82 int power;
83 int reset;
86 struct pt1_adapter {
87 struct pt1 *pt1;
88 int index;
90 u8 *buf;
91 int upacket_count;
92 int packet_count;
93 int st_count;
95 struct dvb_adapter adap;
96 struct dvb_demux demux;
97 int users;
98 struct dmxdev dmxdev;
99 struct dvb_frontend *fe;
100 int (*orig_set_voltage)(struct dvb_frontend *fe,
101 enum fe_sec_voltage voltage);
102 int (*orig_sleep)(struct dvb_frontend *fe);
103 int (*orig_init)(struct dvb_frontend *fe);
105 enum fe_sec_voltage voltage;
106 int sleep;
109 static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
111 writel(data, pt1->regs + reg * 4);
114 static u32 pt1_read_reg(struct pt1 *pt1, int reg)
116 return readl(pt1->regs + reg * 4);
119 static int pt1_nr_tables = 8;
120 module_param_named(nr_tables, pt1_nr_tables, int, 0);
122 static void pt1_increment_table_count(struct pt1 *pt1)
124 pt1_write_reg(pt1, 0, 0x00000020);
127 static void pt1_init_table_count(struct pt1 *pt1)
129 pt1_write_reg(pt1, 0, 0x00000010);
132 static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
134 pt1_write_reg(pt1, 5, first_pfn);
135 pt1_write_reg(pt1, 0, 0x0c000040);
138 static void pt1_unregister_tables(struct pt1 *pt1)
140 pt1_write_reg(pt1, 0, 0x08080000);
143 static int pt1_sync(struct pt1 *pt1)
145 int i;
146 for (i = 0; i < 57; i++) {
147 if (pt1_read_reg(pt1, 0) & 0x20000000)
148 return 0;
149 pt1_write_reg(pt1, 0, 0x00000008);
151 dev_err(&pt1->pdev->dev, "could not sync\n");
152 return -EIO;
155 static u64 pt1_identify(struct pt1 *pt1)
157 int i;
158 u64 id;
159 id = 0;
160 for (i = 0; i < 57; i++) {
161 id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
162 pt1_write_reg(pt1, 0, 0x00000008);
164 return id;
167 static int pt1_unlock(struct pt1 *pt1)
169 int i;
170 pt1_write_reg(pt1, 0, 0x00000008);
171 for (i = 0; i < 3; i++) {
172 if (pt1_read_reg(pt1, 0) & 0x80000000)
173 return 0;
174 schedule_timeout_uninterruptible((HZ + 999) / 1000);
176 dev_err(&pt1->pdev->dev, "could not unlock\n");
177 return -EIO;
180 static int pt1_reset_pci(struct pt1 *pt1)
182 int i;
183 pt1_write_reg(pt1, 0, 0x01010000);
184 pt1_write_reg(pt1, 0, 0x01000000);
185 for (i = 0; i < 10; i++) {
186 if (pt1_read_reg(pt1, 0) & 0x00000001)
187 return 0;
188 schedule_timeout_uninterruptible((HZ + 999) / 1000);
190 dev_err(&pt1->pdev->dev, "could not reset PCI\n");
191 return -EIO;
194 static int pt1_reset_ram(struct pt1 *pt1)
196 int i;
197 pt1_write_reg(pt1, 0, 0x02020000);
198 pt1_write_reg(pt1, 0, 0x02000000);
199 for (i = 0; i < 10; i++) {
200 if (pt1_read_reg(pt1, 0) & 0x00000002)
201 return 0;
202 schedule_timeout_uninterruptible((HZ + 999) / 1000);
204 dev_err(&pt1->pdev->dev, "could not reset RAM\n");
205 return -EIO;
208 static int pt1_do_enable_ram(struct pt1 *pt1)
210 int i, j;
211 u32 status;
212 status = pt1_read_reg(pt1, 0) & 0x00000004;
213 pt1_write_reg(pt1, 0, 0x00000002);
214 for (i = 0; i < 10; i++) {
215 for (j = 0; j < 1024; j++) {
216 if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
217 return 0;
219 schedule_timeout_uninterruptible((HZ + 999) / 1000);
221 dev_err(&pt1->pdev->dev, "could not enable RAM\n");
222 return -EIO;
225 static int pt1_enable_ram(struct pt1 *pt1)
227 int i, ret;
228 int phase;
229 schedule_timeout_uninterruptible((HZ + 999) / 1000);
230 phase = pt1->pdev->device == 0x211a ? 128 : 166;
231 for (i = 0; i < phase; i++) {
232 ret = pt1_do_enable_ram(pt1);
233 if (ret < 0)
234 return ret;
236 return 0;
239 static void pt1_disable_ram(struct pt1 *pt1)
241 pt1_write_reg(pt1, 0, 0x0b0b0000);
244 static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
246 pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
249 static void pt1_init_streams(struct pt1 *pt1)
251 int i;
252 for (i = 0; i < PT1_NR_ADAPS; i++)
253 pt1_set_stream(pt1, i, 0);
256 static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
258 u32 upacket;
259 int i;
260 int index;
261 struct pt1_adapter *adap;
262 int offset;
263 u8 *buf;
264 int sc;
266 if (!page->upackets[PT1_NR_UPACKETS - 1])
267 return 0;
269 for (i = 0; i < PT1_NR_UPACKETS; i++) {
270 upacket = le32_to_cpu(page->upackets[i]);
271 index = (upacket >> 29) - 1;
272 if (index < 0 || index >= PT1_NR_ADAPS)
273 continue;
275 adap = pt1->adaps[index];
276 if (upacket >> 25 & 1)
277 adap->upacket_count = 0;
278 else if (!adap->upacket_count)
279 continue;
281 if (upacket >> 24 & 1)
282 printk_ratelimited(KERN_INFO "earth-pt1: device buffer overflowing. table[%d] buf[%d]\n",
283 pt1->table_index, pt1->buf_index);
284 sc = upacket >> 26 & 0x7;
285 if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
286 printk_ratelimited(KERN_INFO "earth-pt1: data loss in streamID(adapter)[%d]\n",
287 index);
288 adap->st_count = sc;
290 buf = adap->buf;
291 offset = adap->packet_count * 188 + adap->upacket_count * 3;
292 buf[offset] = upacket >> 16;
293 buf[offset + 1] = upacket >> 8;
294 if (adap->upacket_count != 62)
295 buf[offset + 2] = upacket;
297 if (++adap->upacket_count >= 63) {
298 adap->upacket_count = 0;
299 if (++adap->packet_count >= 21) {
300 dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
301 adap->packet_count = 0;
306 page->upackets[PT1_NR_UPACKETS - 1] = 0;
307 return 1;
310 static int pt1_thread(void *data)
312 struct pt1 *pt1;
313 struct pt1_buffer_page *page;
315 pt1 = data;
316 set_freezable();
318 while (!kthread_should_stop()) {
319 try_to_freeze();
321 page = pt1->tables[pt1->table_index].bufs[pt1->buf_index].page;
322 if (!pt1_filter(pt1, page)) {
323 schedule_timeout_interruptible((HZ + 999) / 1000);
324 continue;
327 if (++pt1->buf_index >= PT1_NR_BUFS) {
328 pt1_increment_table_count(pt1);
329 pt1->buf_index = 0;
330 if (++pt1->table_index >= pt1_nr_tables)
331 pt1->table_index = 0;
335 return 0;
338 static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
340 dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
343 static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
345 void *page;
346 dma_addr_t addr;
348 page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
349 GFP_KERNEL);
350 if (page == NULL)
351 return NULL;
353 BUG_ON(addr & (PT1_PAGE_SIZE - 1));
354 BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
356 *addrp = addr;
357 *pfnp = addr >> PT1_PAGE_SHIFT;
358 return page;
361 static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
363 pt1_free_page(pt1, buf->page, buf->addr);
366 static int
367 pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf, u32 *pfnp)
369 struct pt1_buffer_page *page;
370 dma_addr_t addr;
372 page = pt1_alloc_page(pt1, &addr, pfnp);
373 if (page == NULL)
374 return -ENOMEM;
376 page->upackets[PT1_NR_UPACKETS - 1] = 0;
378 buf->page = page;
379 buf->addr = addr;
380 return 0;
383 static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
385 int i;
387 for (i = 0; i < PT1_NR_BUFS; i++)
388 pt1_cleanup_buffer(pt1, &table->bufs[i]);
390 pt1_free_page(pt1, table->page, table->addr);
393 static int
394 pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
396 struct pt1_table_page *page;
397 dma_addr_t addr;
398 int i, ret;
399 u32 buf_pfn;
401 page = pt1_alloc_page(pt1, &addr, pfnp);
402 if (page == NULL)
403 return -ENOMEM;
405 for (i = 0; i < PT1_NR_BUFS; i++) {
406 ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
407 if (ret < 0)
408 goto err;
410 page->buf_pfns[i] = cpu_to_le32(buf_pfn);
413 pt1_increment_table_count(pt1);
414 table->page = page;
415 table->addr = addr;
416 return 0;
418 err:
419 while (i--)
420 pt1_cleanup_buffer(pt1, &table->bufs[i]);
422 pt1_free_page(pt1, page, addr);
423 return ret;
426 static void pt1_cleanup_tables(struct pt1 *pt1)
428 struct pt1_table *tables;
429 int i;
431 tables = pt1->tables;
432 pt1_unregister_tables(pt1);
434 for (i = 0; i < pt1_nr_tables; i++)
435 pt1_cleanup_table(pt1, &tables[i]);
437 vfree(tables);
440 static int pt1_init_tables(struct pt1 *pt1)
442 struct pt1_table *tables;
443 int i, ret;
444 u32 first_pfn, pfn;
446 tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
447 if (tables == NULL)
448 return -ENOMEM;
450 pt1_init_table_count(pt1);
452 i = 0;
453 if (pt1_nr_tables) {
454 ret = pt1_init_table(pt1, &tables[0], &first_pfn);
455 if (ret)
456 goto err;
457 i++;
460 while (i < pt1_nr_tables) {
461 ret = pt1_init_table(pt1, &tables[i], &pfn);
462 if (ret)
463 goto err;
464 tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
465 i++;
468 tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
470 pt1_register_tables(pt1, first_pfn);
471 pt1->tables = tables;
472 return 0;
474 err:
475 while (i--)
476 pt1_cleanup_table(pt1, &tables[i]);
478 vfree(tables);
479 return ret;
482 static int pt1_start_polling(struct pt1 *pt1)
484 int ret = 0;
486 mutex_lock(&pt1->lock);
487 if (!pt1->kthread) {
488 pt1->kthread = kthread_run(pt1_thread, pt1, "earth-pt1");
489 if (IS_ERR(pt1->kthread)) {
490 ret = PTR_ERR(pt1->kthread);
491 pt1->kthread = NULL;
494 mutex_unlock(&pt1->lock);
495 return ret;
498 static int pt1_start_feed(struct dvb_demux_feed *feed)
500 struct pt1_adapter *adap;
501 adap = container_of(feed->demux, struct pt1_adapter, demux);
502 if (!adap->users++) {
503 int ret;
505 ret = pt1_start_polling(adap->pt1);
506 if (ret)
507 return ret;
508 pt1_set_stream(adap->pt1, adap->index, 1);
510 return 0;
513 static void pt1_stop_polling(struct pt1 *pt1)
515 int i, count;
517 mutex_lock(&pt1->lock);
518 for (i = 0, count = 0; i < PT1_NR_ADAPS; i++)
519 count += pt1->adaps[i]->users;
521 if (count == 0 && pt1->kthread) {
522 kthread_stop(pt1->kthread);
523 pt1->kthread = NULL;
525 mutex_unlock(&pt1->lock);
528 static int pt1_stop_feed(struct dvb_demux_feed *feed)
530 struct pt1_adapter *adap;
531 adap = container_of(feed->demux, struct pt1_adapter, demux);
532 if (!--adap->users) {
533 pt1_set_stream(adap->pt1, adap->index, 0);
534 pt1_stop_polling(adap->pt1);
536 return 0;
539 static void
540 pt1_update_power(struct pt1 *pt1)
542 int bits;
543 int i;
544 struct pt1_adapter *adap;
545 static const int sleep_bits[] = {
546 1 << 4,
547 1 << 6 | 1 << 7,
548 1 << 5,
549 1 << 6 | 1 << 8,
552 bits = pt1->power | !pt1->reset << 3;
553 mutex_lock(&pt1->lock);
554 for (i = 0; i < PT1_NR_ADAPS; i++) {
555 adap = pt1->adaps[i];
556 switch (adap->voltage) {
557 case SEC_VOLTAGE_13: /* actually 11V */
558 bits |= 1 << 1;
559 break;
560 case SEC_VOLTAGE_18: /* actually 15V */
561 bits |= 1 << 1 | 1 << 2;
562 break;
563 default:
564 break;
567 /* XXX: The bits should be changed depending on adap->sleep. */
568 bits |= sleep_bits[i];
570 pt1_write_reg(pt1, 1, bits);
571 mutex_unlock(&pt1->lock);
574 static int pt1_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage)
576 struct pt1_adapter *adap;
578 adap = container_of(fe->dvb, struct pt1_adapter, adap);
579 adap->voltage = voltage;
580 pt1_update_power(adap->pt1);
582 if (adap->orig_set_voltage)
583 return adap->orig_set_voltage(fe, voltage);
584 else
585 return 0;
588 static int pt1_sleep(struct dvb_frontend *fe)
590 struct pt1_adapter *adap;
592 adap = container_of(fe->dvb, struct pt1_adapter, adap);
593 adap->sleep = 1;
594 pt1_update_power(adap->pt1);
596 if (adap->orig_sleep)
597 return adap->orig_sleep(fe);
598 else
599 return 0;
602 static int pt1_wakeup(struct dvb_frontend *fe)
604 struct pt1_adapter *adap;
606 adap = container_of(fe->dvb, struct pt1_adapter, adap);
607 adap->sleep = 0;
608 pt1_update_power(adap->pt1);
609 schedule_timeout_uninterruptible((HZ + 999) / 1000);
611 if (adap->orig_init)
612 return adap->orig_init(fe);
613 else
614 return 0;
617 static void pt1_free_adapter(struct pt1_adapter *adap)
619 adap->demux.dmx.close(&adap->demux.dmx);
620 dvb_dmxdev_release(&adap->dmxdev);
621 dvb_dmx_release(&adap->demux);
622 dvb_unregister_adapter(&adap->adap);
623 free_page((unsigned long)adap->buf);
624 kfree(adap);
627 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
629 static struct pt1_adapter *
630 pt1_alloc_adapter(struct pt1 *pt1)
632 struct pt1_adapter *adap;
633 void *buf;
634 struct dvb_adapter *dvb_adap;
635 struct dvb_demux *demux;
636 struct dmxdev *dmxdev;
637 int ret;
639 adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
640 if (!adap) {
641 ret = -ENOMEM;
642 goto err;
645 adap->pt1 = pt1;
647 adap->voltage = SEC_VOLTAGE_OFF;
648 adap->sleep = 1;
650 buf = (u8 *)__get_free_page(GFP_KERNEL);
651 if (!buf) {
652 ret = -ENOMEM;
653 goto err_kfree;
656 adap->buf = buf;
657 adap->upacket_count = 0;
658 adap->packet_count = 0;
659 adap->st_count = -1;
661 dvb_adap = &adap->adap;
662 dvb_adap->priv = adap;
663 ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
664 &pt1->pdev->dev, adapter_nr);
665 if (ret < 0)
666 goto err_free_page;
668 demux = &adap->demux;
669 demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
670 demux->priv = adap;
671 demux->feednum = 256;
672 demux->filternum = 256;
673 demux->start_feed = pt1_start_feed;
674 demux->stop_feed = pt1_stop_feed;
675 demux->write_to_decoder = NULL;
676 ret = dvb_dmx_init(demux);
677 if (ret < 0)
678 goto err_unregister_adapter;
680 dmxdev = &adap->dmxdev;
681 dmxdev->filternum = 256;
682 dmxdev->demux = &demux->dmx;
683 dmxdev->capabilities = 0;
684 ret = dvb_dmxdev_init(dmxdev, dvb_adap);
685 if (ret < 0)
686 goto err_dmx_release;
688 return adap;
690 err_dmx_release:
691 dvb_dmx_release(demux);
692 err_unregister_adapter:
693 dvb_unregister_adapter(dvb_adap);
694 err_free_page:
695 free_page((unsigned long)buf);
696 err_kfree:
697 kfree(adap);
698 err:
699 return ERR_PTR(ret);
702 static void pt1_cleanup_adapters(struct pt1 *pt1)
704 int i;
705 for (i = 0; i < PT1_NR_ADAPS; i++)
706 pt1_free_adapter(pt1->adaps[i]);
709 static int pt1_init_adapters(struct pt1 *pt1)
711 int i;
712 struct pt1_adapter *adap;
713 int ret;
715 for (i = 0; i < PT1_NR_ADAPS; i++) {
716 adap = pt1_alloc_adapter(pt1);
717 if (IS_ERR(adap)) {
718 ret = PTR_ERR(adap);
719 goto err;
722 adap->index = i;
723 pt1->adaps[i] = adap;
725 return 0;
727 err:
728 while (i--)
729 pt1_free_adapter(pt1->adaps[i]);
731 return ret;
734 static void pt1_cleanup_frontend(struct pt1_adapter *adap)
736 dvb_unregister_frontend(adap->fe);
739 static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
741 int ret;
743 adap->orig_set_voltage = fe->ops.set_voltage;
744 adap->orig_sleep = fe->ops.sleep;
745 adap->orig_init = fe->ops.init;
746 fe->ops.set_voltage = pt1_set_voltage;
747 fe->ops.sleep = pt1_sleep;
748 fe->ops.init = pt1_wakeup;
750 ret = dvb_register_frontend(&adap->adap, fe);
751 if (ret < 0)
752 return ret;
754 adap->fe = fe;
755 return 0;
758 static void pt1_cleanup_frontends(struct pt1 *pt1)
760 int i;
761 for (i = 0; i < PT1_NR_ADAPS; i++)
762 pt1_cleanup_frontend(pt1->adaps[i]);
765 struct pt1_config {
766 struct va1j5jf8007s_config va1j5jf8007s_config;
767 struct va1j5jf8007t_config va1j5jf8007t_config;
770 static const struct pt1_config pt1_configs[2] = {
773 .demod_address = 0x1b,
774 .frequency = VA1J5JF8007S_20MHZ,
777 .demod_address = 0x1a,
778 .frequency = VA1J5JF8007T_20MHZ,
780 }, {
782 .demod_address = 0x19,
783 .frequency = VA1J5JF8007S_20MHZ,
786 .demod_address = 0x18,
787 .frequency = VA1J5JF8007T_20MHZ,
792 static const struct pt1_config pt2_configs[2] = {
795 .demod_address = 0x1b,
796 .frequency = VA1J5JF8007S_25MHZ,
799 .demod_address = 0x1a,
800 .frequency = VA1J5JF8007T_25MHZ,
802 }, {
804 .demod_address = 0x19,
805 .frequency = VA1J5JF8007S_25MHZ,
808 .demod_address = 0x18,
809 .frequency = VA1J5JF8007T_25MHZ,
814 static int pt1_init_frontends(struct pt1 *pt1)
816 int i, j;
817 struct i2c_adapter *i2c_adap;
818 const struct pt1_config *configs, *config;
819 struct dvb_frontend *fe[4];
820 int ret;
822 i = 0;
823 j = 0;
825 i2c_adap = &pt1->i2c_adap;
826 configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
827 do {
828 config = &configs[i / 2];
830 fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
831 i2c_adap);
832 if (!fe[i]) {
833 ret = -ENODEV; /* This does not sound nice... */
834 goto err;
836 i++;
838 fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
839 i2c_adap);
840 if (!fe[i]) {
841 ret = -ENODEV;
842 goto err;
844 i++;
846 ret = va1j5jf8007s_prepare(fe[i - 2]);
847 if (ret < 0)
848 goto err;
850 ret = va1j5jf8007t_prepare(fe[i - 1]);
851 if (ret < 0)
852 goto err;
854 } while (i < 4);
856 do {
857 ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
858 if (ret < 0)
859 goto err;
860 } while (++j < 4);
862 return 0;
864 err:
865 while (i-- > j)
866 fe[i]->ops.release(fe[i]);
868 while (j--)
869 dvb_unregister_frontend(fe[j]);
871 return ret;
874 static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
875 int clock, int data, int next_addr)
877 pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
878 !clock << 11 | !data << 10 | next_addr);
881 static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
883 pt1_i2c_emit(pt1, addr, 1, 0, 0, data, addr + 1);
884 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
885 pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
886 *addrp = addr + 3;
889 static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
891 pt1_i2c_emit(pt1, addr, 1, 0, 0, 1, addr + 1);
892 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
893 pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
894 pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
895 *addrp = addr + 4;
898 static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
900 int i;
901 for (i = 0; i < 8; i++)
902 pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
903 pt1_i2c_write_bit(pt1, addr, &addr, 1);
904 *addrp = addr;
907 static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
909 int i;
910 for (i = 0; i < 8; i++)
911 pt1_i2c_read_bit(pt1, addr, &addr);
912 pt1_i2c_write_bit(pt1, addr, &addr, last);
913 *addrp = addr;
916 static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
918 pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
919 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
920 pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
921 *addrp = addr + 3;
924 static void
925 pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
927 int i;
928 pt1_i2c_prepare(pt1, addr, &addr);
929 pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
930 for (i = 0; i < msg->len; i++)
931 pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
932 *addrp = addr;
935 static void
936 pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
938 int i;
939 pt1_i2c_prepare(pt1, addr, &addr);
940 pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
941 for (i = 0; i < msg->len; i++)
942 pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
943 *addrp = addr;
946 static int pt1_i2c_end(struct pt1 *pt1, int addr)
948 pt1_i2c_emit(pt1, addr, 1, 0, 0, 0, addr + 1);
949 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
950 pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
952 pt1_write_reg(pt1, 0, 0x00000004);
953 do {
954 if (signal_pending(current))
955 return -EINTR;
956 schedule_timeout_interruptible((HZ + 999) / 1000);
957 } while (pt1_read_reg(pt1, 0) & 0x00000080);
958 return 0;
961 static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
963 int addr;
964 addr = 0;
966 pt1_i2c_emit(pt1, addr, 0, 0, 1, 1, addr /* itself */);
967 addr = addr + 1;
969 if (!pt1->i2c_running) {
970 pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
971 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
972 addr = addr + 2;
973 pt1->i2c_running = 1;
975 *addrp = addr;
978 static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
980 struct pt1 *pt1;
981 int i;
982 struct i2c_msg *msg, *next_msg;
983 int addr, ret;
984 u16 len;
985 u32 word;
987 pt1 = i2c_get_adapdata(adap);
989 for (i = 0; i < num; i++) {
990 msg = &msgs[i];
991 if (msg->flags & I2C_M_RD)
992 return -ENOTSUPP;
994 if (i + 1 < num)
995 next_msg = &msgs[i + 1];
996 else
997 next_msg = NULL;
999 if (next_msg && next_msg->flags & I2C_M_RD) {
1000 i++;
1002 len = next_msg->len;
1003 if (len > 4)
1004 return -ENOTSUPP;
1006 pt1_i2c_begin(pt1, &addr);
1007 pt1_i2c_write_msg(pt1, addr, &addr, msg);
1008 pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
1009 ret = pt1_i2c_end(pt1, addr);
1010 if (ret < 0)
1011 return ret;
1013 word = pt1_read_reg(pt1, 2);
1014 while (len--) {
1015 next_msg->buf[len] = word;
1016 word >>= 8;
1018 } else {
1019 pt1_i2c_begin(pt1, &addr);
1020 pt1_i2c_write_msg(pt1, addr, &addr, msg);
1021 ret = pt1_i2c_end(pt1, addr);
1022 if (ret < 0)
1023 return ret;
1027 return num;
1030 static u32 pt1_i2c_func(struct i2c_adapter *adap)
1032 return I2C_FUNC_I2C;
1035 static const struct i2c_algorithm pt1_i2c_algo = {
1036 .master_xfer = pt1_i2c_xfer,
1037 .functionality = pt1_i2c_func,
1040 static void pt1_i2c_wait(struct pt1 *pt1)
1042 int i;
1043 for (i = 0; i < 128; i++)
1044 pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
1047 static void pt1_i2c_init(struct pt1 *pt1)
1049 int i;
1050 for (i = 0; i < 1024; i++)
1051 pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
1054 static void pt1_remove(struct pci_dev *pdev)
1056 struct pt1 *pt1;
1057 void __iomem *regs;
1059 pt1 = pci_get_drvdata(pdev);
1060 regs = pt1->regs;
1062 if (pt1->kthread)
1063 kthread_stop(pt1->kthread);
1064 pt1_cleanup_tables(pt1);
1065 pt1_cleanup_frontends(pt1);
1066 pt1_disable_ram(pt1);
1067 pt1->power = 0;
1068 pt1->reset = 1;
1069 pt1_update_power(pt1);
1070 pt1_cleanup_adapters(pt1);
1071 i2c_del_adapter(&pt1->i2c_adap);
1072 kfree(pt1);
1073 pci_iounmap(pdev, regs);
1074 pci_release_regions(pdev);
1075 pci_disable_device(pdev);
1078 static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1080 int ret;
1081 void __iomem *regs;
1082 struct pt1 *pt1;
1083 struct i2c_adapter *i2c_adap;
1085 ret = pci_enable_device(pdev);
1086 if (ret < 0)
1087 goto err;
1089 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1090 if (ret < 0)
1091 goto err_pci_disable_device;
1093 pci_set_master(pdev);
1095 ret = pci_request_regions(pdev, DRIVER_NAME);
1096 if (ret < 0)
1097 goto err_pci_disable_device;
1099 regs = pci_iomap(pdev, 0, 0);
1100 if (!regs) {
1101 ret = -EIO;
1102 goto err_pci_release_regions;
1105 pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
1106 if (!pt1) {
1107 ret = -ENOMEM;
1108 goto err_pci_iounmap;
1111 mutex_init(&pt1->lock);
1112 pt1->pdev = pdev;
1113 pt1->regs = regs;
1114 pci_set_drvdata(pdev, pt1);
1116 ret = pt1_init_adapters(pt1);
1117 if (ret < 0)
1118 goto err_kfree;
1120 mutex_init(&pt1->lock);
1122 pt1->power = 0;
1123 pt1->reset = 1;
1124 pt1_update_power(pt1);
1126 i2c_adap = &pt1->i2c_adap;
1127 i2c_adap->algo = &pt1_i2c_algo;
1128 i2c_adap->algo_data = NULL;
1129 i2c_adap->dev.parent = &pdev->dev;
1130 strcpy(i2c_adap->name, DRIVER_NAME);
1131 i2c_set_adapdata(i2c_adap, pt1);
1132 ret = i2c_add_adapter(i2c_adap);
1133 if (ret < 0)
1134 goto err_pt1_cleanup_adapters;
1136 pt1_i2c_init(pt1);
1137 pt1_i2c_wait(pt1);
1139 ret = pt1_sync(pt1);
1140 if (ret < 0)
1141 goto err_i2c_del_adapter;
1143 pt1_identify(pt1);
1145 ret = pt1_unlock(pt1);
1146 if (ret < 0)
1147 goto err_i2c_del_adapter;
1149 ret = pt1_reset_pci(pt1);
1150 if (ret < 0)
1151 goto err_i2c_del_adapter;
1153 ret = pt1_reset_ram(pt1);
1154 if (ret < 0)
1155 goto err_i2c_del_adapter;
1157 ret = pt1_enable_ram(pt1);
1158 if (ret < 0)
1159 goto err_i2c_del_adapter;
1161 pt1_init_streams(pt1);
1163 pt1->power = 1;
1164 pt1_update_power(pt1);
1165 schedule_timeout_uninterruptible((HZ + 49) / 50);
1167 pt1->reset = 0;
1168 pt1_update_power(pt1);
1169 schedule_timeout_uninterruptible((HZ + 999) / 1000);
1171 ret = pt1_init_frontends(pt1);
1172 if (ret < 0)
1173 goto err_pt1_disable_ram;
1175 ret = pt1_init_tables(pt1);
1176 if (ret < 0)
1177 goto err_pt1_cleanup_frontends;
1179 return 0;
1181 err_pt1_cleanup_frontends:
1182 pt1_cleanup_frontends(pt1);
1183 err_pt1_disable_ram:
1184 pt1_disable_ram(pt1);
1185 pt1->power = 0;
1186 pt1->reset = 1;
1187 pt1_update_power(pt1);
1188 err_i2c_del_adapter:
1189 i2c_del_adapter(i2c_adap);
1190 err_pt1_cleanup_adapters:
1191 pt1_cleanup_adapters(pt1);
1192 err_kfree:
1193 kfree(pt1);
1194 err_pci_iounmap:
1195 pci_iounmap(pdev, regs);
1196 err_pci_release_regions:
1197 pci_release_regions(pdev);
1198 err_pci_disable_device:
1199 pci_disable_device(pdev);
1200 err:
1201 return ret;
1205 static const struct pci_device_id pt1_id_table[] = {
1206 { PCI_DEVICE(0x10ee, 0x211a) },
1207 { PCI_DEVICE(0x10ee, 0x222a) },
1208 { },
1210 MODULE_DEVICE_TABLE(pci, pt1_id_table);
1212 static struct pci_driver pt1_driver = {
1213 .name = DRIVER_NAME,
1214 .probe = pt1_probe,
1215 .remove = pt1_remove,
1216 .id_table = pt1_id_table,
1219 module_pci_driver(pt1_driver);
1221 MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
1222 MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
1223 MODULE_LICENSE("GPL");