Update workflows/publish_pypi.yml
[manga-dl.git] / manga_py / providers / plus_comico_jp.py
blob66579346639cde4c1f6256710d75ea6b0daa3653
1 from time import time
2 from urllib import parse
4 from manga_py.crypt.puzzle import Puzzle
5 from manga_py.fs import rename
6 from manga_py.provider import Provider
7 from .helpers.std import Std
10 class PlusComicoJp(Provider, Std):
11 scrambles = []
13 def get_chapter_index(self) -> str:
14 return self.re.search('/store/\d+/(\d+)', self.chapter).group(1)
16 def get_content(self):
17 content = self._storage.get('main_content', None)
18 if content:
19 return content
20 idx = self.re.search('/store/(\d+)', self.get_url())
21 url = '{}/store/{}/'.format(self.domain, idx.group(1))
22 return self.http_get(url)
24 def get_manga_name(self) -> str:
25 return self.text_content_full(self.content, 'h1 > ._title')
27 def get_chapters(self):
28 idx = self.re.search(r'/store/(\d+)', self.get_url()).group(1)
29 with self.http().post('{}/store/api/getTitleArticles.nhn'.format(
30 self.domain
31 ), data={
32 'titleNo': idx
33 }) as resp:
34 json = resp.json()
35 items = []
36 for i in json.get('result', {}).get('list', {}):
37 for m in i.get('articleList'):
38 if m.get('freeFlg') == 'Y':
39 items.append(m.get('articleDetailUrl'))
40 return items
42 def get_files(self):
43 url = self.http().requests(self.chapter, method='head')
44 location = url.headers.get('location')
45 self.http().requests(location, method='head')
47 location = parse.urlparse(location)
48 params = parse.parse_qs(location.query)
50 ts = int(time())
51 base_url = '{}://{}{}/diazepam_hybrid.php?param={}&ts={}&_={}&reqtype=0'.format(
52 location.scheme,
53 location.netloc,
54 self.re.search(r'(.+)/\w+\.php', location.path).group(1),
55 parse.quote_plus(params.get('param')[0]),
56 ts,
57 ts + 1305,
60 pages_url = base_url + '&mode=7&file=face.xml&callback=jQ12_34'
61 scramble_url = base_url + '&mode=8&file={:0>4}.xml'
62 file_url = base_url + '&mode=1&file={:0>4}_0000.bin'
64 total_pages = self.re.search(r'TotalPage>(\d+)</TotalPage', self.http_get(pages_url))
65 if total_pages:
66 total_pages = int(total_pages.group(1))
67 else:
68 total_pages = 0
69 items = []
70 self.scrambles = []
71 for i in range(total_pages):
72 c = self.re.search(r'Scramble>(.+?)</Scramble', self.http_get(scramble_url.format(i)))
73 self.scrambles.append(c.group(1))
74 items.append(file_url.format(i))
75 return items
77 def get_cover(self) -> str:
78 return self._cover_from_content('.cover img')
80 def after_file_save(self, _path: str, idx: int):
81 _matrix = self.scrambles[idx].split(',')
82 div_num = 4
83 matrix = {}
84 n = 0
85 for i in _matrix:
86 matrix[int(i)] = n
87 n += 1
88 p = Puzzle(div_num, div_num, matrix, 8)
89 p.need_copy_orig = True
90 p.de_scramble(_path, '{}.jpg'.format(_path))
91 rename('{}.jpg'.format(_path), _path)
92 return _path, None
94 def save_file(self, idx=None, callback=None, url=None, in_arc_name=None):
95 if in_arc_name is None:
96 in_arc_name = '{}_image.jpg'.format(idx)
97 super().save_file(idx, callback, url, in_arc_name)
100 main = PlusComicoJp