Skip to content

Commit

Permalink
Merge branch 'qq' into develop
Browse files Browse the repository at this point in the history
  • Loading branch information
rosynirvana committed Jul 10, 2017
2 parents 6d96300 + cbe4fe0 commit dd0c850
Showing 1 changed file with 34 additions and 67 deletions.
101 changes: 34 additions & 67 deletions src/you_get/extractors/qq.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,75 +3,42 @@
__all__ = ['qq_download']

from ..common import *
from ..util.log import *
from .qie import download as qieDownload
from urllib.parse import urlparse,parse_qs

def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False):
info_api = 'http://vv.video.qq.com/getinfo?otype=json&appver=3%2E2%2E19%2E333&platform=11&defnpayver=1&vid=' + vid
info = get_html(info_api)
info_api = 'http://vv.video.qq.com/getinfo?otype=json&appver=3.2.19.333&platform=11&defnpayver=1&vid={}'.format(vid)
info = get_content(info_api)
video_json = json.loads(match1(info, r'QZOutputJson=(.*)')[:-1])
parts_vid = video_json['vl']['vi'][0]['vid']
parts_ti = video_json['vl']['vi'][0]['ti']
parts_prefix = video_json['vl']['vi'][0]['ul']['ui'][0]['url']
parts_formats = video_json['fl']['fi']
if parts_prefix.endswith('/'):
parts_prefix = parts_prefix[:-1]
# find best quality
# only looking for fhd(1080p) and shd(720p) here.
# 480p usually come with a single file, will be downloaded as fallback.
best_quality = ''
for part_format in parts_formats:
if part_format['name'] == 'fhd':
best_quality = 'fhd'
break

if part_format['name'] == 'shd':
best_quality = 'shd'

for part_format in parts_formats:
if (not best_quality == '') and (not part_format['name'] == best_quality):
continue
part_format_id = part_format['id']
part_format_sl = part_format['sl']
if part_format_sl == 0:
part_urls= []
total_size = 0
try:
# For fhd(1080p), every part is about 100M and 6 minutes
# try 100 parts here limited download longest single video of 10 hours.
for part in range(1,100):
filename = vid + '.p' + str(part_format_id % 10000) + '.' + str(part) + '.mp4'
key_api = "http://vv.video.qq.com/getkey?otype=json&platform=11&format=%s&vid=%s&filename=%s" % (part_format_id, parts_vid, filename)
#print(filename)
#print(key_api)
part_info = get_html(key_api)
key_json = json.loads(match1(part_info, r'QZOutputJson=(.*)')[:-1])
#print(key_json)
vkey = key_json['key']
url = '%s/%s?vkey=%s' % (parts_prefix, filename, vkey)
part_urls.append(url)
_, ext, size = url_info(url, faker=True)
total_size += size
except:
pass
print_info(site_info, parts_ti, ext, total_size)
if not info_only:
download_urls(part_urls, parts_ti, ext, total_size, output_dir=output_dir, merge=merge)
else:
fvkey = video_json['vl']['vi'][0]['fvkey']
mp4 = video_json['vl']['vi'][0]['cl'].get('ci', None)
if mp4:
old_id = mp4[0]['keyid'].split('.')[1]
new_id = 'p' + str(int(old_id) % 10000)
mp4 = mp4[0]['keyid'].replace(old_id, new_id) + '.mp4'
else:
mp4 = video_json['vl']['vi'][0]['fn']
url = '%s/%s?vkey=%s' % ( parts_prefix, mp4, fvkey )
_, ext, size = url_info(url, faker=True)

print_info(site_info, title, ext, size)
if not info_only:
download_urls([url], title, ext, size, output_dir=output_dir, merge=merge)

if video_json['exem'] != 0:
log.wtf(video_json['msg'])
fn_pre = video_json['vl']['vi'][0]['lnk']
title = video_json['vl']['vi'][0]['ti']
host = video_json['vl']['vi'][0]['ul']['ui'][0]['url']
streams = video_json['fl']['fi']
seg_cnt = len(video_json['vl']['vi'][0]['cl']['ci'])

best_quality = streams[-1]['name']
part_format_id = streams[-1]['id']

part_urls= []
total_size = 0
for part in range(1, seg_cnt+1):
filename = fn_pre + '.p' + str(part_format_id % 10000) + '.' + str(part) + '.mp4'
key_api = "http://vv.video.qq.com/getkey?otype=json&platform=11&format={}&vid={}&filename={}&appver=3.2.19.333".format(part_format_id, vid, filename)
part_info = get_content(key_api)
key_json = json.loads(match1(part_info, r'QZOutputJson=(.*)')[:-1])
vkey = key_json['key']
url = '{}{}?vkey={}'.format(host, filename, vkey)
part_urls.append(url)
_, ext, size = url_info(url)
total_size += size

print_info(site_info, title, ext, total_size)
if not info_only:
download_urls(part_urls, title, ext, total_size, output_dir=output_dir, merge=merge)

def kg_qq_download_by_shareid(shareid, output_dir='.', info_only=False, caption=False):
BASE_URL = 'http://cgi.kg.qq.com/fcgi-bin/kg_ugc_getdetail'
Expand Down Expand Up @@ -124,7 +91,7 @@ def qq_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
return

if 'mp.weixin.qq.com/s?' in url:
content = get_html(url)
content = get_content(url)
vids = matchall(content, [r'\bvid=(\w+)'])
for vid in vids:
qq_download_by_vid(vid, vid, output_dir, merge, info_only)
Expand All @@ -143,7 +110,7 @@ def qq_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
url = new_url

if 'kuaibao.qq.com' in url or re.match(r'http://daxue.qq.com/content/content/id/\d+', url):
content = get_html(url)
content = get_content(url)
vid = match1(content, r'vid\s*=\s*"\s*([^"]+)"')
title = match1(content, r'title">([^"]+)</p>')
title = title.strip() if title else vid
Expand All @@ -152,7 +119,7 @@ def qq_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
# for embedded URLs; don't know what the title is
title = vid
else:
content = get_html(url)
content = get_content(url)
vid = parse_qs(urlparse(url).query).get('vid') #for links specified vid like http://v.qq.com/cover/p/ps6mnfqyrfo7es3.html?vid=q0181hpdvo5
vid = vid[0] if vid else match1(content, r'vid"*\s*:\s*"\s*([^"]+)"') #general fallback
title = match1(content,r'<a.*?id\s*=\s*"%s".*?title\s*=\s*"(.+?)".*?>'%vid)
Expand Down

0 comments on commit dd0c850

Please sign in to comment.