modified: pixi.toml
[GalaxyCodeBases.git] / etc / Mac / getwwdc2013video.py
blob70940c9764f6d0b1fed63f808bb6a7d6eee6f438
1 #!/usr/bin/env python
2 # coding: utf-8
3 __author__ = 'Snake'
6 import urllib2, cookielib
7 from bs4 import BeautifulSoup as bs
9 base_url = 'https://developer.apple.com/wwdc/videos/'
10 base_header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20100101 Firefox/21.0'}
13 #获取网站内容
14 def get_web(url, cookie, choose):
15 download = []
16 content = get_url_result(url, cookie)
17 soup = bs(str(content))
18 for div in soup.findAll('p', {'class': 'download'}):
19 if choose != 1 and choose != 3 and choose != 4:
20 hd = div('a')[0]['href']
21 download.append(hd)
22 if choose != 2 and choose != 3 and choose != 5:
23 sd = div('a')[1]['href']
24 download.append(sd)
25 try:
26 if choose != 1 and choose != 2:
27 pdf = div('a')[2]['href']
28 download.append(pdf)
29 except:
30 print "none pdf"
31 return download
34 #根据url和cookie获取网页内容
35 #如果传入cookie不为空那么将通过cookie的方式获取url信息
36 #如果传入的cookie为空那么就已普通的方式过去url信息
37 def get_url_result(url, cookie=None):
38 headers = base_header
39 headers['Referer'] = url
40 if cookie:
41 headers['Cookie'] = cookie
42 opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))
43 urllib2.install_opener(opener)
44 req = urllib2.Request(url=url, headers=headers)
45 try:
46 content = urllib2.urlopen(req, timeout=20).read()
47 return content
48 except Exception as e:
49 print e.message + '\n'
53 def main():
54 cookie = raw_input("cookie: ")
55 print "please choose: 1-SD,2-HD,3-PDF,4-SD+PDF,5-HD+PDF,0-ALL"
56 choose = int(raw_input("choose: "))
57 download = get_web(base_url, cookie, choose)
58 for d in download:
59 print d
61 if __name__ == '__main__':
62 main()