Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
myh1000 committed Jan 17, 2017
1 parent 8575925 commit 9fbe765
Showing 1 changed file with 96 additions and 36 deletions.
132 changes: 96 additions & 36 deletions src/you_get/extractors/kissanime.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,34 +10,49 @@
scraper = cfscrape.create_scraper()
spinner = itertools.cycle(['-', '/', '|', '\\'])


def toggle(num):
while num > 0:
sys.stdout.write(next(spinner)) # write the next character
sys.stdout.flush() # flush stdout buffer (actual character display)
sys.stdout.write('\b') # erase the last written char
sys.stdout.flush() # flush stdout buffer (actual character display)
sys.stdout.write('\b') # erase the last written char


def get_title(html):
btitle = re.search(b'<title>(.*)</title>', html, flags=re.DOTALL).group(1)
title = ' '.join(str.join(" ", re.search(' (.*)-', btitle.decode('UTF-8'), flags=re.DOTALL).group(1).strip('\t, \r, \n').splitlines()).split())
title = ' '.join(
str.join(
" ",
re.search(
' (.*)-', btitle.decode('UTF-8'), flags=re.DOTALL).group(1)
.strip('\t, \r, \n').splitlines()).split())

return title

def kissanime_download(url, s=False, output_dir='.', merge=True, info_only=False, **kwargs):

def kissanime_download(url,
s=False,
output_dir='.',
merge=True,
info_only=False,
**kwargs):
if ' ' in url:
search = url.split(' ',1)[1]
search = url.split(' ', 1)[1]
url = ("https://kissanime.to/Search/Anime/" + search)
search = True
else:
search = False
p = Process(target = toggle, args=(1,))
p = Process(target=toggle, args=(1, ))
p.start()
html = scraper.get(url).content
p.terminate()

sys.stdout.write('\b')
if search:
kissanime_download_search(html, output_dir, merge, info_only, **kwargs)
elif "id=" not in url.lower():
kissanime_download_playlist(html, s, output_dir, merge, info_only, **kwargs)
kissanime_download_playlist(html, s, output_dir, merge, info_only, **
kwargs)
else:
url = kissanime_url(html)
title = get_title(html)
Expand All @@ -47,7 +62,7 @@ def kissanime_download(url, s=False, output_dir='.', merge=True, info_only=False
if stream_id > len(url) or int(stream_id) <= 0:
log.e('[Error] Invalid format id.')
exit(2)
url = url[len(url)-stream_id]
url = url[len(url) - stream_id]
type, ext, size = url_info(url, faker=True)
print_info(site_info, title, type, size)

Expand All @@ -56,7 +71,8 @@ def kissanime_download(url, s=False, output_dir='.', merge=True, info_only=False
for idx, link in enumerate(url):
if idx == 0:
print('[ DEFAULT ] __________________________________')
print('dl-with: you-get --format='+str(len(url)-idx)+' [url]')
print('dl-with: you-get --format=' + str(
len(url) - idx) + ' [url]')
type, ext, size = url_info(link, faker=True)
print_info(site_info, title, type, size)
else:
Expand All @@ -68,8 +84,18 @@ def kissanime_download(url, s=False, output_dir='.', merge=True, info_only=False
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)

def kissanime_download_playlist(html, search=False, output_dir='.', merge=True, info_only=False, **kwargs):
playlist = re.sub( '\s+', ' ', (re.search(b'<table class="listing">(.*)</table>', html, flags=re.DOTALL).group(1).decode('UTF-8'))).strip()

def kissanime_download_playlist(html,
search=False,
output_dir='.',
merge=True,
info_only=False,
**kwargs):
playlist = re.sub('\s+',
' ', (re.search(
b'<table class="listing">(.*)</table>',
html,
flags=re.DOTALL).group(1).decode('UTF-8'))).strip()
links = re.findall(u'<a href="([^."]+)', playlist)
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
Expand All @@ -78,7 +104,7 @@ def kissanime_download_playlist(html, search=False, output_dir='.', merge=True,
if stream_id > len(links) or int(stream_id) <= 0:
log.e('[Error] Invalid format id.')
exit(2)
url = 'https://kissanime.to' + links[len(links)-stream_id]
url = 'https://kissanime.to' + links[len(links) - stream_id]
html = scraper.get(url).content
url = kissanime_url(html)[0]
title = get_title(html)
Expand All @@ -89,11 +115,14 @@ def kissanime_download_playlist(html, search=False, output_dir='.', merge=True,
download_urls([url], title, ext, size, output_dir, merge)
else:
stream_id_range = kwargs['stream_id'].partition('-')
if int(stream_id_range[2]) > len(links) or int(stream_id_range[0]) <= 0 or int(stream_id_range[0]) > int(stream_id_range[2]):
if int(stream_id_range[2]) > len(links) or int(stream_id_range[
0]) <= 0 or int(stream_id_range[0]) > int(stream_id_range[
2]):
log.e('[Error] Invalid format id range.')
exit(2)
for x in range(int(stream_id_range[2]), int(stream_id_range[0])-1, -1):
url = 'https://kissanime.to' + links[len(links)-x]
for x in range(
int(stream_id_range[2]), int(stream_id_range[0]) - 1, -1):
url = 'https://kissanime.to' + links[len(links) - x]
html = scraper.get(url).content
url = kissanime_url(html)[0]
title = get_title(html)
Expand All @@ -102,10 +131,14 @@ def kissanime_download_playlist(html, search=False, output_dir='.', merge=True,

if not info_only:
if x == int(stream_id_range[2]):
print("The download bar may look weird, but its fine. The console is just trying to update the same progress bar for multiple downloads.")
print(
"The download bar may look weird, but its fine. The console is just trying to update the same progress bar for multiple downloads."
)
elif x == int(stream_id_range[0]):
print("The download bar may look weird, but its fine.")
p = Process(target = download_urls, args=([url], title, ext, size, output_dir, merge))
p = Process(
target=download_urls,
args=([url], title, ext, size, output_dir, merge))
p.start()
elif search:
url_list = []
Expand All @@ -114,8 +147,10 @@ def kissanime_download_playlist(html, search=False, output_dir='.', merge=True,
url = 'https://kissanime.to' + link
url_list.append(url)
html = scraper.get(url).content
print('[ '+ str(len(links)-idx) +' ] __________________________________')
print('dl-with: you-get --format='+str(len(links)-idx)+' [url]')
print('[ ' + str(len(links) - idx) +
' ] __________________________________')
print('dl-with: you-get --format=' + str(len(links) - idx) +
' [url]')
url = kissanime_url(html)[0]
title = get_title(html)
type, ext, size = url_info(url, faker=True)
Expand All @@ -126,7 +161,7 @@ def kissanime_download_playlist(html, search=False, output_dir='.', merge=True,
if stream_id > len(links) or int(stream_id) <= 0:
log.e('[Error] Invalid format id.')
exit(2)
url = 'https://kissanime.to' + links[len(links)-stream_id]
url = 'https://kissanime.to' + links[len(links) - stream_id]
html = scraper.get(url).content
url = kissanime_url(html)[0]
title = get_title(html)
Expand All @@ -137,11 +172,14 @@ def kissanime_download_playlist(html, search=False, output_dir='.', merge=True,
download_urls([url], title, ext, size, output_dir, merge)
else:
stream_id_range = input_var.partition('-')
if int(stream_id_range[2]) > len(links) or int(stream_id_range[0]) <= 0 or int(stream_id_range[0]) > int(stream_id_range[2]):
if int(stream_id_range[2]) > len(links) or int(stream_id_range[
0]) <= 0 or int(stream_id_range[0]) > int(stream_id_range[
2]):
log.e('[Error] Invalid format id range.')
exit(2)
for x in range(int(stream_id_range[2]), int(stream_id_range[0])-1, -1):
url = 'https://kissanime.to' + links[len(links)-x]
for x in range(
int(stream_id_range[2]), int(stream_id_range[0]) - 1, -1):
url = 'https://kissanime.to' + links[len(links) - x]
html = scraper.get(url).content
url = kissanime_url(html)[0]
title = get_title(html)
Expand All @@ -150,31 +188,45 @@ def kissanime_download_playlist(html, search=False, output_dir='.', merge=True,

if not info_only:
if x == int(stream_id_range[2]):
print("The download bar may look weird, but its fine. The console is just trying to update the same progress bar for multiple downloads.")
print(
"The download bar may look weird, but its fine. The console is just trying to update the same progress bar for multiple downloads."
)
elif x == int(stream_id_range[0]):
print("The download bar may look weird, but its fine.")
p = Process(target = download_urls, args=([url], title, ext, size, output_dir, merge))
p = Process(
target=download_urls,
args=([url], title, ext, size, output_dir, merge))
p.start()
else:
for idx, link in enumerate(links):
url = 'https://kissanime.to' + link
html = scraper.get(url).content
print('dl-with: you-get --format='+str(len(links)-idx)+' [url]')
print('dl-with: you-get --format=' + str(len(links) - idx) +
' [url]')
url = kissanime_url(html)[0]
title = get_title(html)
type, ext, size = url_info(url, faker=True)
print_info(site_info, title, type, size)

def kissanime_download_search(html, output_dir='.', merge=True, info_only=False, **kwargs):
playlist = re.sub( '\s+', ' ', (re.search(b'<table class="listing">(.*)</table>', html, flags=re.DOTALL).group(1).decode('UTF-8'))).strip()

def kissanime_download_search(html,
output_dir='.',
merge=True,
info_only=False,
**kwargs):
playlist = re.sub('\s+',
' ', (re.search(
b'<table class="listing">(.*)</table>',
html,
flags=re.DOTALL).group(1).decode('UTF-8'))).strip()
links = re.findall(u'<a href="([^."]+)', playlist)
url_list = []
if 'id=' not in links[0].lower():
for idx, link in enumerate(links):
if (idx < 20):
url = 'https://kissanime.to' + link
url_list.append(url)
print(str(idx+1) + '. ' + url)
print(str(idx + 1) + '. ' + url)
else:
print('20+ items not listed.')
break
Expand All @@ -184,25 +236,33 @@ def kissanime_download_search(html, output_dir='.', merge=True, info_only=False,
if stream_id > len(url_list) or int(stream_id) <= 0:
log.e('[Error] Invalid format id.')
exit(2)
kissanime_download(url_list[int(input_var)-1], True, output_dir, merge, info_only, **kwargs)
kissanime_download(url_list[int(input_var) - 1], True, output_dir,
merge, info_only, **kwargs)
else:
stream_id_range = input_var.partition('-')
if int(stream_id_range[2]) > len(url_list) or int(stream_id_range[0]) <= 0 or int(stream_id_range[0]) > int(stream_id_range[2]):
if int(stream_id_range[2]) > len(url_list) or int(stream_id_range[
0]) <= 0 or int(stream_id_range[0]) > int(stream_id_range[
2]):
log.e('[Error] Invalid format id range.')
exit(2)
for x in range(int(stream_id_range[2]), int(stream_id_range[0])-1, -1):
kissanime_download(url_list[int(x)-1], True, output_dir, merge, info_only, **kwargs)
for x in range(
int(stream_id_range[2]), int(stream_id_range[0]) - 1, -1):
kissanime_download(url_list[int(x) - 1], True, output_dir,
merge, info_only, **kwargs)
else:
url = 'https://kissanime.to/'+ re.search(r'([^/].+)/Episode-', links[0]).group(1)
url = 'https://kissanime.to/' + re.search(r'([^/].+)/Episode-',
links[0]).group(1)
kissanime_download(url, True, output_dir, merge, info_only, **kwargs)


def kissanime_url(html):
selectQuality = re.search(b'<select id="selectQuality">.*</select>', html).group(0)
selectQuality = re.search(b'<select id="selectQuality">.*</select>',
html).group(0)
options = re.findall(b'<option value="([^"]+)', selectQuality)
if options:
url = ""
for val in options:
url += "\n"+base64.b64decode(val).decode("UTF-8")
url += "\n" + base64.b64decode(val).decode("UTF-8")
url_list = [y for y in (x.strip() for x in url.splitlines()) if y]
return url_list

Expand Down

0 comments on commit 9fbe765

Please sign in to comment.