diff --git a/.gitignore b/.gitignore
index b4ba2e95..812513d2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,10 @@
*.pyc
*.pyo
+__pycache__
*.cookies
*.DS_Store
temp/*
-script.module.resolveurl/resources/settings.xml
+settings.xml
doc/build
doc/resources
.project
diff --git a/README.md b/README.md
index a0d59602..84888cfd 100644
--- a/README.md
+++ b/README.md
@@ -1,28 +1,45 @@
# script.module.resolveurl
+I am in no way responsible for the urls being resolved by 3rd parties. This script only resolves video content from legitimate file lockers without prejudice. If this script is being used by 3rd parties to resolve content that you feel infringes upon your Intellectual Property then please take your complaints to the actual website or developer linking to such content and not me. This script in no way searches for any content whatsoever.
+
Include the script in your addon.xml
-
-
-
-
+```xml
+
+
+
+```
Import ResolveUrl and use it the same as you would with the UrlResolver
- import resolveurl
- resolved = resolveurl.resolve(url)
+```python
+import resolveurl
+resolved = resolveurl.resolve(url)
+```
Or you can import ResolveUrl as UrlResolver to your existing addon that uses the UrlResolver
- import resolveurl as urlresolver
- resolved = urlresolver.resolve(url)
-
-Include my repo with your repo to always have the latest updates from me
-
-
- https://raw.githubusercontent.com/jsergio123/zips/master/addons.xml
- https://raw.githubusercontent.com/jsergio123/zips/master/addons.xml.md5
- https://raw.githubusercontent.com/jsergio123/zips/master/
-
-
-I am in no way responsible for the urls being resolved by 3rd parties. This script only resolves video content from legitimate file lockers without prejudice. If this script is being used by 3rd parties to resolve content that you feel infringes upon your Intellectual Property then please take your complaints to the actual website or developer linking to such content and not me. This script in no way searches for any content whatsoever.
+```python
+import resolveurl as urlresolver
+resolved = urlresolver.resolve(url)
+```
+
+Include my repo with your repo to always have the ~~latest~~ updates from me
+
+```xml
+
+ https://raw.githubusercontent.com/jsergio123/zips/master/addons.xml
+ https://raw.githubusercontent.com/jsergio123/zips/master/addons.xml.md5
+ https://raw.githubusercontent.com/jsergio123/zips/master/
+
+```
+
+Include gujal dir with your repo to always have the **latest** updates
+
+```xml
+
+ https://raw.githubusercontent.com/Gujal00/smrzips/master/addons.xml
+ https://raw.githubusercontent.com/Gujal00/smrzips/master/addons.xml.md5
+ https://raw.githubusercontent.com/Gujal00/smrzips/master/zips/
+
+```
diff --git a/addon.xml b/addon.xml
index 63086cfd..cfb00af1 100644
--- a/addon.xml
+++ b/addon.xml
@@ -1,21 +1,29 @@
-
+
-
+
+
+
-
-
+ all
- Resolve common video host URL's to be playable in XBMC/Kodi.
- Resolver enlaces a videos de sitios populares para poder reproducirlos en XBMC/Kodi.
- Prilagodi najčešće URL-ove video izvora kako bi radili u XBMC/Kodiju.
- Επίλυση συνδέσμων κοινών εξυπηρετητών σε μορφή βίντεο ικανό για αναπαραγωγή στο XBMC/Kodi.
- Resolve common video host URL's to be playable in XBMC/Kodi, simplify addon development of video plugins requiring multi video hosts.
- Prilagodi najčešće URL-ove video izvora kako bi radili u XBMC/Kodiju, pojednostavnjuje razvoj priključaka za video dodatke koji zahtjevaju višestruke video izvore.
+ Resolve common video host URL's to be playable in XBMC/Kodi.
+ Resolver enlaces a videos de sitios populares para poder reproducirlos en XBMC/Kodi.
+ Prilagodi najčešće URL-ove video izvora kako bi radili u XBMC/Kodiju.
+ Επίλυση συνδέσμων κοινών εξυπηρετητών σε μορφή βίντεο ικανό για αναπαραγωγή στο XBMC/Kodi.
+ Resolve common video host URL's to be playable in XBMC/Kodi, simplify addon development of video plugins requiring multi video hosts.
+ Επίλυση συνδέσμων κοινών εξυπηρετητών σε μορφή βίντεο ικανό για αναπαραγωγή στο XBMC/Kodi, απλοποίηση της ανάπτυξης των προσθέτων που χρησιμοποιούν πολλαπλούς εξυπηρετητές για υλικό βίντεο
+ Prilagodi najčešće URL-ove video izvora kako bi radili u XBMC/Kodiju, pojednostavnjuje razvoj priključaka za video dodatke koji zahtjevaju višestruke video izvore.
+ This script only resolves video content from legitimate file lockers without prejudice. If this script is being used by 3rd parties to resolve content that you feel infringes upon your Intellectual Property then please take your complaints to the actual website or developer linking to such content and not here. This script in no way searches for any content whatsoever.
+ Το ανωτέρο σενάριο το μόνο που κάνει είναι να επιλύει οπτικοακουστικό περιεχόμενο από νόμιμες πηγές χωρίς προκατάληψη. Εάν το ανωτέρο σενάριο χρησιμοποιηθεί από τρίτους ώστε να επιλύσει περιεχόμενο το οποίο σας προκαλεί αντίδραση ότι παραβιάζει την πνευματική σας ιδιοκτησία παρακαλώ μεταφέρετε τα παραπονά σας στον ιστότοπο τον οποίο βρίσκεται το εν λόγω περιεχόμενο η τον δημιουργό ο οποίος πραγματοποιεί την σύνδεση και όχι εδω. Το σενάριο αυτό ουδεμία περίπτωση πραγματοποιεί αναζήτηση περιεχομένου.
+
+ icon.png
+ fanart.jpg
+
diff --git a/changelog.txt b/changelog.txt
index 3dcbb533..d8d8457e 100644
--- a/changelog.txt
+++ b/changelog.txt
@@ -1,3 +1,15 @@
-[B]Version 5.0.38 (Feb 20, 2020)[/B]
-- See GitHub for changes
- - https://github.com/jsergio123/script.module.resolveurl/commits/master
+Version 5.1.27 (March 14, 2021)
+- Code Base Changes:
+ - None
+
+- Resolvers Added:
+ - vidmojo
+
+- Resolvers Fixed/Updated:
+ - None
+
+- Resolvers Removed
+ - None
+
+- Languages Added/Updated
+ - None
diff --git a/lib/default.py b/lib/default.py
index d83b6e33..271482ec 100644
--- a/lib/default.py
+++ b/lib/default.py
@@ -32,8 +32,9 @@ def __enum(**enums):
MODES = __enum(
- AUTH_PM='auth_pm', RESET_PM='reset_pm', AUTH_RD='auth_rd', RESET_RD='reset_rd', RESET_CACHE='reset_cache',
- AUTH_AD='auth_ad', RESET_AD='reset_ad', AUTH_LS='auth_ls', RESET_LS='reset_ls'
+ AUTH_PM='auth_pm', RESET_PM='reset_pm', AUTH_RD='auth_rd', RESET_RD='reset_rd',
+ AUTH_AD='auth_ad', RESET_AD='reset_ad', AUTH_LS='auth_ls', RESET_LS='reset_ls',
+ AUTH_DL='auth_dl', RESET_DL='reset_dl', RESET_CACHE='reset_cache'
)
@@ -121,6 +122,25 @@ def reset_ls():
kodi.notify(msg=kodi.i18n('ls_auth_reset'), duration=5000)
+@url_dispatcher.register(MODES.AUTH_DL)
+def auth_dl():
+ kodi.close_all()
+ kodi.sleep(500) # sleep or authorize won't work for some reason
+ from resolveurl.plugins import debrid_link
+ if debrid_link.DebridLinkResolver().authorize_resolver():
+ kodi.notify(msg=kodi.i18n('dl_authorized'), duration=5000)
+
+
+@url_dispatcher.register(MODES.RESET_DL)
+def reset_dl():
+ kodi.close_all()
+ kodi.sleep(500) # sleep or reset won't work for some reason
+ from resolveurl.plugins import debrid_link
+ dl = debrid_link.DebridLinkResolver()
+ dl.reset_authorization()
+ kodi.notify(msg=kodi.i18n('dl_auth_reset'), duration=5000)
+
+
def main(argv=None):
if sys.argv:
diff --git a/lib/resolveurl/__init__.py b/lib/resolveurl/__init__.py
index b4294332..c89095b3 100644
--- a/lib/resolveurl/__init__.py
+++ b/lib/resolveurl/__init__.py
@@ -14,33 +14,30 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
-"""
-'''
+
This module provides the main API for accessing the resolveurl features.
For most cases you probably want to use :func:`resolveurl.resolve` or
:func:`resolveurl.choose_source`.
.. seealso::
-
+
:class:`HostedMediaFile`
-'''
+"""
import re
-import urlparse
+from six.moves import urllib_parse
+import six
import sys
-import os
-import xbmc
-import xbmcvfs
-import xbmcgui
-import common
-from hmf import HostedMediaFile
+from kodi_six import xbmcvfs, xbmcgui
+from resolveurl import common
+from resolveurl.hmf import HostedMediaFile
from resolveurl.resolver import ResolveUrl
from resolveurl.plugins.__resolve_generic__ import ResolveGeneric
-from plugins import *
+from resolveurl.plugins import * # NOQA
-common.logger.log_notice('Initializing ResolveURL version: %s' % common.addon_version)
+common.logger.log_debug('Initializing ResolveURL version: %s' % common.addon_version)
MAX_SETTINGS = 75
PLUGIN_DIRS = []
@@ -49,7 +46,7 @@
def add_plugin_dirs(dirs):
global PLUGIN_DIRS
- if isinstance(dirs, basestring):
+ if isinstance(dirs, six.string_types):
PLUGIN_DIRS.append(dirs)
else:
PLUGIN_DIRS += dirs
@@ -59,7 +56,7 @@ def load_external_plugins():
for d in PLUGIN_DIRS:
common.logger.log_debug('Adding plugin path: %s' % d)
sys.path.insert(0, d)
- for filename in os.listdir(d):
+ for filename in xbmcvfs.listdir(d)[1]:
if not filename.startswith('__') and filename.endswith('.py'):
mod_name = filename[:-3]
imp = __import__(mod_name, globals(), locals())
@@ -70,8 +67,8 @@ def load_external_plugins():
def relevant_resolvers(domain=None, include_universal=None, include_popups=None, include_external=False, include_disabled=False, order_matters=False):
if include_external:
load_external_plugins()
-
- if isinstance(domain, basestring):
+
+ if isinstance(domain, six.string_types):
domain = domain.lower()
if include_universal is None:
@@ -81,7 +78,7 @@ def relevant_resolvers(domain=None, include_universal=None, include_popups=None,
include_popups = common.get_setting('allow_popups') == "true"
if include_popups is False:
common.logger.log_debug('Resolvers that require popups have been disabled')
-
+
classes = ResolveUrl.__class__.__subclasses__(ResolveUrl) + ResolveUrl.__class__.__subclasses__(ResolveGeneric)
relevant = []
for resolver in classes:
@@ -199,22 +196,23 @@ def scrape_supported(html, regex=None, host_only=False):
args:
html: the html to be scraped
- regex: an optional argument to override the default regex which is: href\s*=\s*["']([^'"]+
+ regex: an optional argument to override the default regex which is: href *= *["']([^'"]+
host_only: an optional argument if true to do only host validation vs full url validation (default False)
Returns:
a list of links scraped from the html that passed validation
"""
- if regex is None: regex = '''href\s*=\s*['"]([^'"]+)'''
+ if regex is None:
+ regex = r'''href\s*=\s*['"]([^'"]+)'''
links = []
for match in re.finditer(regex, html):
stream_url = match.group(1)
- host = urlparse.urlparse(stream_url).hostname
+ host = urllib_parse.urlparse(stream_url).hostname
if host_only:
if host is None:
continue
-
+
if host in host_cache:
if host_cache[host]:
links.append(stream_url)
@@ -223,7 +221,7 @@ def scrape_supported(html, regex=None, host_only=False):
hmf = HostedMediaFile(host=host, media_id='dummy') # use dummy media_id to allow host validation
else:
hmf = HostedMediaFile(url=stream_url)
-
+
is_valid = hmf.valid_url()
host_cache[host] = is_valid
if is_valid:
@@ -254,7 +252,7 @@ def _update_settings_xml():
all settings for this addon and its plugins.
"""
try:
- os.makedirs(os.path.dirname(common.settings_file))
+ xbmcvfs.mkdirs(common.settings_path)
except OSError:
pass
@@ -301,17 +299,26 @@ def _update_settings_xml():
new_xml.append('')
try:
- with open(common.settings_file, 'r') as f:
- old_xml = f.read()
+ if six.PY3:
+ with open(common.settings_file, 'r', encoding='utf-8') as f:
+ old_xml = f.read()
+ else:
+ with open(common.settings_file, 'r') as f:
+ old_xml = f.read()
except:
- old_xml = ''
+ old_xml = u''
+ old_xml = six.ensure_text(old_xml)
- new_xml = '\n'.join(new_xml)
+ new_xml = six.ensure_text('\n'.join(new_xml))
if old_xml != new_xml:
common.logger.log_debug('Updating Settings XML')
try:
- with open(common.settings_file, 'w') as f:
- f.write(new_xml)
+ if six.PY3:
+ with open(common.settings_file, 'w', encoding='utf-8') as f:
+ f.write(new_xml)
+ else:
+ with open(common.settings_file, 'w') as f:
+ f.write(new_xml.encode('utf8'))
except:
raise
else:
diff --git a/lib/resolveurl/common.py b/lib/resolveurl/common.py
index 71cc05f0..afac16aa 100644
--- a/lib/resolveurl/common.py
+++ b/lib/resolveurl/common.py
@@ -17,11 +17,11 @@
"""
import os
import hashlib
-from lib import log_utils
-from lib.net import Net, get_ua # @UnusedImport
-from lib import cache # @UnusedImport
-from lib import kodi
-from lib import pyaes
+from resolveurl.lib import log_utils
+from resolveurl.lib.net import Net, get_ua # @UnusedImport # NOQA
+from resolveurl.lib import cache # @UnusedImport # NOQA
+from resolveurl.lib import kodi
+from resolveurl.lib import pyaes
from random import choice
logger = log_utils.Logger.get_logger()
@@ -29,6 +29,7 @@
plugins_path = os.path.join(addon_path, 'lib', 'resolveurl', 'plugins')
profile_path = kodi.translate_path(kodi.get_profile())
settings_file = os.path.join(addon_path, 'resources', 'settings.xml')
+settings_path = os.path.join(addon_path, 'resources')
addon_version = kodi.get_version()
get_setting = kodi.get_setting
set_setting = kodi.set_setting
@@ -40,28 +41,31 @@
VIDEO_FORMATS = ['.aac', '.asf', '.avi', '.flv', '.m4a', '.m4v', '.mka', '.mkv', '.mp4', '.mpeg', '.nut', '.ogg']
# RAND_UA = get_ua()
-FF_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0'
-OPERA_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36 OPR/55.0.2994.37'
-IOS_USER_AGENT = 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1'
-ANDROID_USER_AGENT = 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'
-EDGE_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134 '
-CHROME_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
-SAFARI_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.1 Safari/605.1.15'
+IE_USER_AGENT = 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'
+FF_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'
+OPERA_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36 OPR/67.0.3575.97'
+IOS_USER_AGENT = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Mobile/15E148 Safari/604.1'
+ANDROID_USER_AGENT = 'Mozilla/5.0 (Linux; Android 9; SM-G973F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Mobile Safari/537.36'
+EDGE_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
+CHROME_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4136.7 Safari/537.36'
+SAFARI_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Safari/605.1.15'
SMR_USER_AGENT = 'ResolveURL for Kodi/%s' % (addon_version)
# Quick hack till I decide how to handle this
_USER_AGENTS = [FF_USER_AGENT, OPERA_USER_AGENT, EDGE_USER_AGENT, CHROME_USER_AGENT, SAFARI_USER_AGENT]
RAND_UA = choice(_USER_AGENTS)
+
def log_file_hash(path):
try:
with open(path, 'r') as f:
py_data = f.read()
except:
py_data = ''
-
+
logger.log('%s hash: %s' % (os.path.basename(path), hashlib.md5(py_data).hexdigest()))
+
def file_length(py_path, key=''):
try:
with open(py_path, 'r') as f:
@@ -71,9 +75,10 @@ def file_length(py_path, key=''):
old_len = len(old_py)
except:
old_len = -1
-
+
return old_len
+
def decrypt_py(cipher_text, key):
if cipher_text:
try:
@@ -92,6 +97,7 @@ def decrypt_py(cipher_text, key):
return plain_text
+
def encrypt_py(plain_text, key):
if plain_text:
try:
diff --git a/lib/resolveurl/hmf.py b/lib/resolveurl/hmf.py
index 1f248e29..c2d3e111 100644
--- a/lib/resolveurl/hmf.py
+++ b/lib/resolveurl/hmf.py
@@ -15,10 +15,9 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
"""
-import urllib2
-import urlparse
import re
-import urllib
+import six
+from six.moves import urllib_error, urllib_request, urllib_parse
import traceback
import resolveurl
from resolveurl import common
@@ -114,14 +113,15 @@ def __get_resolvers(self, include_disabled, include_universal, include_popups):
resolver_cache[klass] = klass()
resolvers.append(resolver_cache[klass])
return resolvers
-
+
def __top_domain(self, url):
- elements = urlparse.urlparse(url)
+ elements = urllib_parse.urlparse(url)
domain = elements.netloc or elements.path
domain = domain.split('@')[-1].split(':')[0]
- regex = "(?:www\.)?([\w\-]*\.[\w\-]{2,5}(?:\.[\w\-]{2,5})?)$"
+ regex = r"(?:www\.)?([\w\-]*\.[\w\-]{2,5}(?:\.[\w\-]{2,5})?)$"
res = re.search(regex, domain)
- if res: domain = res.group(1)
+ if res:
+ domain = res.group(1)
domain = domain.lower()
return domain
@@ -147,9 +147,10 @@ def get_resolvers(self, validated=False):
"""
Returns the list of resolvers of this :class:`HostedMediaFile`.
"""
- if validated: self.valid_url()
+ if validated:
+ self.valid_url()
return self.__resolvers
-
+
def resolve(self, include_universal=True, allow_popups=True):
"""
Resolves this :class:`HostedMediaFile` to a media URL.
@@ -184,13 +185,14 @@ def resolve(self, include_universal=True, allow_popups=True):
resolver.login()
self._host, self._media_id = resolver.get_host_and_id(self._url)
stream_url = resolver.get_media_url(self._host, self._media_id)
- if stream_url.startswith("//"): stream_url = 'http:%s' % stream_url
- if stream_url and self.__test_stream(stream_url):
+ if stream_url.startswith("//"):
+ stream_url = 'http:%s' % stream_url
+ if stream_url and test_stream(stream_url):
self.__resolvers = [resolver] # Found a working resolver, throw out the others
self._valid_url = True
return stream_url
except Exception as e:
- url = self._url.encode('utf-8') if isinstance(self._url, unicode) else self._url
+ url = self._url.encode('utf-8') if isinstance(self._url, six.text_type) and six.PY2 else self._url
common.logger.log_error('%s Error - From: %s Link: %s: %s' % (type(e).__name__, resolver.name, url, e))
if resolver == self.__resolvers[-1]:
common.logger.log_debug(traceback.format_exc())
@@ -222,70 +224,12 @@ def valid_url(self):
if resolver.valid_url(self._url, self._domain):
resolvers.append(resolver)
except:
- # print sys.exc_info()
continue
-
+
self.__resolvers = resolvers
self._valid_url = True if resolvers else False
return self._valid_url
- def __test_stream(self, stream_url):
- """
- Returns True if the stream_url gets a non-failure http status (i.e. <400) back from the server
- otherwise return False
-
- Intended to catch stream urls returned by resolvers that would fail to playback
- """
- # parse_qsl doesn't work because it splits elements by ';' which can be in a non-quoted UA
- try: headers = dict([item.split('=') for item in (stream_url.split('|')[1]).split('&')])
- except: headers = {}
- for header in headers:
- headers[header] = urllib.unquote_plus(headers[header])
- common.logger.log_debug('Setting Headers on UrlOpen: %s' % headers)
-
- try:
- import ssl
- ssl_context = ssl.create_default_context()
- ssl_context.check_hostname = False
- ssl_context.verify_mode = ssl.CERT_NONE
- opener = urllib2.build_opener(urllib2.HTTPSHandler(context=ssl_context))
- urllib2.install_opener(opener)
- except:
- pass
-
- try:
- msg = ''
- request = urllib2.Request(stream_url.split('|')[0], headers=headers)
- # only do a HEAD request. gujal
- request.get_method = lambda : 'HEAD'
- # set urlopen timeout to 15 seconds
- http_code = urllib2.urlopen(request, timeout=15).getcode()
- except urllib2.URLError as e:
- if hasattr(e, 'reason'):
- # treat an unhandled url type as success
- if 'unknown url type' in str(e.reason).lower():
- return True
- else:
- msg = e.reason
-
- if isinstance(e, urllib2.HTTPError):
- http_code = e.code
- else:
- http_code = 600
- if not msg: msg = str(e)
- except Exception as e:
- http_code = 601
- msg = str(e)
- if msg == "''":
- http_code = 504
-
- # added this log line for now so that we can catch any logs on streams that are rejected due to test_stream failures
- # we can remove it once we are sure this works reliably
- if int(http_code) >= 400 and int(http_code) != 504:
- common.logger.log_warning('Stream UrlOpen Failed: Url: %s HTTP Code: %s Msg: %s' % (stream_url, http_code, msg))
-
- return int(http_code) < 400 or int(http_code) == 504
-
def __nonzero__(self):
if self._valid_url is None:
return self.valid_url()
@@ -297,3 +241,68 @@ def __str__(self):
def __repr__(self):
return self.__str__()
+
+
+def test_stream(stream_url):
+ """
+ Returns True if the stream_url gets a non-failure http status (i.e. <400) back from the server
+ otherwise return False
+
+ Intended to catch stream urls returned by resolvers that would fail to playback
+ """
+ # parse_qsl doesn't work because it splits elements by ';' which can be in a non-quoted UA
+ try:
+ headers = dict([item.split('=') for item in (stream_url.split('|')[1]).split('&')])
+ except:
+ headers = {}
+ for header in headers:
+ headers[header] = urllib_parse.unquote_plus(headers[header])
+ common.logger.log_debug('Setting Headers on UrlOpen: %s' % headers)
+
+ try:
+ import ssl
+ ssl_context = ssl.create_default_context()
+ ssl_context.check_hostname = False
+ ssl_context.verify_mode = ssl.CERT_NONE
+ opener = urllib_request.build_opener(urllib_request.HTTPSHandler(context=ssl_context))
+ urllib_request.install_opener(opener)
+ except:
+ pass
+
+ try:
+ msg = ''
+ request = urllib_request.Request(stream_url.split('|')[0], headers=headers)
+ # only do a HEAD request. gujal
+ request.get_method = lambda: 'HEAD'
+ # set urlopen timeout to 15 seconds
+ http_code = urllib_request.urlopen(request, timeout=15).getcode()
+ except urllib_error.HTTPError as e:
+ if isinstance(e, urllib_error.HTTPError):
+ http_code = e.code
+ if http_code == 405:
+ http_code = 200
+ else:
+ http_code = 600
+ except urllib_error.URLError as e:
+ http_code = 500
+ if hasattr(e, 'reason'):
+ # treat an unhandled url type as success
+ if 'unknown url type' in str(e.reason).lower():
+ return True
+ else:
+ msg = e.reason
+ if not msg:
+ msg = str(e)
+
+ except Exception as e:
+ http_code = 601
+ msg = str(e)
+ if msg == "''":
+ http_code = 504
+
+ # added this log line for now so that we can catch any logs on streams that are rejected due to test_stream failures
+ # we can remove it once we are sure this works reliably
+ if int(http_code) >= 400 and int(http_code) != 504:
+ common.logger.log_warning('Stream UrlOpen Failed: Url: %s HTTP Code: %s Msg: %s' % (stream_url, http_code, msg))
+
+ return int(http_code) < 400 or int(http_code) == 504
diff --git a/lib/resolveurl/lib/CustomProgressDialog.py b/lib/resolveurl/lib/CustomProgressDialog.py
index db05761e..fe2ab1dd 100644
--- a/lib/resolveurl/lib/CustomProgressDialog.py
+++ b/lib/resolveurl/lib/CustomProgressDialog.py
@@ -16,38 +16,48 @@
along with this program. If not, see .
"""
import xbmcgui
-import kodi
-import log_utils
+import xbmcaddon
+import six
+from resolveurl.lib import log_utils
logger = log_utils.Logger.get_logger(__name__)
-
+addon = xbmcaddon.Addon('script.module.resolveurl')
DIALOG_XML = 'ProgressDialog.xml'
+
class ProgressDialog(object):
dialog = None
+ def get_path(self):
+ return addon.getAddonInfo('path') if six.PY3 else addon.getAddonInfo('path').decode('utf-8')
+
def create(self, heading, line1='', line2='', line3=''):
- try: self.dialog = ProgressDialog.Window(DIALOG_XML, kodi.get_setting('xml_folder'))
- except: self.dialog = ProgressDialog.Window(DIALOG_XML, kodi.get_path())
+ try:
+ self.dialog = ProgressDialog.Window(DIALOG_XML, addon.getSetting('xml_folder'))
+ except:
+ self.dialog = ProgressDialog.Window(DIALOG_XML, self.get_path())
self.dialog.show()
self.dialog.setHeading(heading)
self.dialog.setLine1(line1)
self.dialog.setLine2(line2)
self.dialog.setLine3(line3)
-
+
def update(self, percent, line1='', line2='', line3=''):
if self.dialog is not None:
self.dialog.setProgress(percent)
- if line1: self.dialog.setLine1(line1)
- if line2: self.dialog.setLine2(line2)
- if line3: self.dialog.setLine3(line3)
-
+ if line1:
+ self.dialog.setLine1(line1)
+ if line2:
+ self.dialog.setLine2(line2)
+ if line3:
+ self.dialog.setLine3(line3)
+
def iscanceled(self):
if self.dialog is not None:
return self.dialog.cancel
else:
return False
-
+
def close(self):
if self.dialog is not None:
self.dialog.close()
@@ -63,44 +73,44 @@ class Window(xbmcgui.WindowXMLDialog):
ACTION_BACK = 92
CANCEL_BUTTON = 200
cancel = False
-
+
def onInit(self):
pass
-
+
def onAction(self, action):
# logger.log('Action: %s' % (action.getId()), log_utils.LOGDEBUG, COMPONENT)
if action == self.ACTION_PREVIOUS_MENU or action == self.ACTION_BACK:
self.cancel = True
self.close()
-
+
def onControl(self, control):
# logger.log('onControl: %s' % (control), log_utils.LOGDEBUG, COMPONENT)
pass
-
+
def onFocus(self, control):
# logger.log('onFocus: %s' % (control), log_utils.LOGDEBUG, COMPONENT)
pass
-
+
def onClick(self, control):
# logger.log('onClick: %s' % (control), log_utils.LOGDEBUG, COMPONENT)
if control == self.CANCEL_BUTTON:
self.cancel = True
self.close()
-
+
def setHeading(self, heading):
self.setLabel(self.HEADING_CTRL, heading)
-
+
def setProgress(self, progress):
self.getControl(self.PROGRESS_CTRL).setPercent(progress)
-
+
def setLine1(self, line):
self.setLabel(self.LINE1_CTRL, line)
-
+
def setLine2(self, line):
self.setLabel(self.LINE2_CTRL, line)
-
+
def setLine3(self, line):
self.setLabel(self.LINE3_CTRL, line)
-
+
def setLabel(self, ctrl, line):
self.getControl(ctrl).setLabel(line)
diff --git a/lib/resolveurl/lib/cache.py b/lib/resolveurl/lib/cache.py
index a07277a6..6c678dbb 100644
--- a/lib/resolveurl/lib/cache.py
+++ b/lib/resolveurl/lib/cache.py
@@ -16,13 +16,14 @@
along with this program. If not, see .
"""
import functools
-import log_utils
+from resolveurl.lib import log_utils
import time
import pickle
import hashlib
import os
import shutil
-import kodi
+import six
+from resolveurl.lib import kodi
logger = log_utils.Logger.get_logger(__name__)
@@ -32,9 +33,10 @@
os.makedirs(cache_path)
except Exception as e:
logger.log('Failed to create cache: %s: %s' % (cache_path, e), log_utils.LOGWARNING)
-
+
cache_enabled = kodi.get_setting('use_cache') == 'true'
-
+
+
def reset_cache():
try:
shutil.rmtree(cache_path)
@@ -42,39 +44,58 @@ def reset_cache():
except Exception as e:
logger.log('Failed to Reset Cache: %s' % (e), log_utils.LOGWARNING)
return False
-
+
+
def _get_func(name, args=None, kwargs=None, cache_limit=1):
- if not cache_enabled: return False, None
+ if not cache_enabled:
+ return False, None
now = time.time()
max_age = now - (cache_limit * 60 * 60)
- if args is None: args = []
- if kwargs is None: kwargs = {}
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
full_path = os.path.join(cache_path, _get_filename(name, args, kwargs))
if os.path.exists(full_path):
mtime = os.path.getmtime(full_path)
if mtime >= max_age:
- with open(full_path, 'r') as f:
- pickled_result = f.read()
- # logger.log('Returning cached result: |%s|%s|%s| - modtime: %s max_age: %s age: %ss' % (name, args, kwargs, mtime, max_age, now - mtime), log_utils.LOGDEBUG)
+ if six.PY2:
+ with open(full_path, 'r') as f:
+ pickled_result = f.read()
+ else:
+ with open(full_path, 'rb') as f:
+ pickled_result = f.read()
return True, pickle.loads(pickled_result)
-
+
return False, None
-
+
+
def _save_func(name, args=None, kwargs=None, result=None):
try:
- if args is None: args = []
- if kwargs is None: kwargs = {}
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
pickled_result = pickle.dumps(result)
full_path = os.path.join(cache_path, _get_filename(name, args, kwargs))
- with open(full_path, 'w') as f:
- f.write(pickled_result)
+ if six.PY2:
+ with open(full_path, 'w') as f:
+ f.write(pickled_result)
+ else:
+ with open(full_path, 'wb') as f:
+ f.write(pickled_result)
except Exception as e:
logger.log('Failure during cache write: %s' % (e), log_utils.LOGWARNING)
+
def _get_filename(name, args, kwargs):
- arg_hash = hashlib.md5(name).hexdigest() + hashlib.md5(str(args)).hexdigest() + hashlib.md5(str(kwargs)).hexdigest()
+ if six.PY2:
+ arg_hash = hashlib.md5(name).hexdigest() + hashlib.md5(str(args)).hexdigest() + hashlib.md5(str(kwargs)).hexdigest()
+ else:
+ arg_hash = hashlib.md5(name.encode('utf8')).hexdigest() + hashlib.md5(str(args).encode('utf8')).hexdigest() + hashlib.md5(str(kwargs).encode('utf8')).hexdigest()
return arg_hash
+
def cache_method(cache_limit):
def wrap(func):
@functools.wraps(func)
@@ -97,6 +118,7 @@ def memoizer(*args, **kwargs):
return memoizer
return wrap
+
# do not use this with instance methods the self parameter will cause args to never match
def cache_function(cache_limit):
def wrap(func):
diff --git a/lib/resolveurl/lib/kodi.py b/lib/resolveurl/lib/kodi.py
index f9132218..3b529434 100644
--- a/lib/resolveurl/lib/kodi.py
+++ b/lib/resolveurl/lib/kodi.py
@@ -15,19 +15,15 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
"""
-import xbmcaddon
-import xbmcplugin
-import xbmcgui
-import xbmc
-import xbmcvfs
-import urllib
-import urlparse
+from kodi_six import xbmc, xbmcgui, xbmcplugin, xbmcaddon, xbmcvfs
+from six.moves import urllib_parse
+import six
import sys
import os
import re
import time
-import strings
-import CustomProgressDialog
+from resolveurl.lib import strings
+from resolveurl.lib import CustomProgressDialog
addon = xbmcaddon.Addon('script.module.resolveurl')
get_setting = addon.getSetting
@@ -37,19 +33,19 @@
def get_path():
- return addon.getAddonInfo('path').decode('utf-8')
+ return addon.getAddonInfo('path')
def get_profile():
- return addon.getAddonInfo('profile').decode('utf-8')
+ return addon.getAddonInfo('profile')
def translate_path(path):
- return xbmc.translatePath(path).decode('utf-8')
+ return xbmcvfs.translatePath(path) if six.PY3 else xbmc.translatePath(path)
def set_setting(id, value):
- if not isinstance(value, basestring):
+ if not isinstance(value, six.string_types):
value = str(value)
addon.setSetting(id, value)
@@ -130,7 +126,7 @@ def get_keyboard_new(heading, default='', hide_input=False):
def i18n(string_id):
try:
- return addon.getLocalizedString(strings.STRINGS[string_id]).encode('utf-8', 'ignore')
+ return six.ensure_str(addon.getLocalizedString(strings.STRINGS[string_id]))
except Exception as e:
_log('Failed String Lookup: %s (%s)' % (string_id, e))
return string_id
@@ -138,12 +134,12 @@ def i18n(string_id):
def get_plugin_url(queries):
try:
- query = urllib.urlencode(queries)
+ query = urllib_parse.urlencode(queries)
except UnicodeEncodeError:
for k in queries:
- if isinstance(queries[k], unicode):
+ if isinstance(queries[k], six.text_type) and six.PY2:
queries[k] = queries[k].encode('utf-8')
- query = urllib.urlencode(queries)
+ query = urllib_parse.urlencode(queries)
return sys.argv[0] + '?' + query
@@ -162,7 +158,8 @@ def create_item(queries, label, thumb='', fanart='', is_folder=None, is_playable
def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
- if menu_items is None: menu_items = []
+ if menu_items is None:
+ menu_items = []
if is_folder is None:
is_folder = False if is_playable else True
@@ -172,7 +169,8 @@ def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, to
playable = 'true' if is_playable else 'false'
liz_url = get_plugin_url(queries)
- if fanart: list_item.setProperty('fanart_image', fanart)
+ if fanart:
+ list_item.setProperty('fanart_image', fanart)
list_item.setInfo('video', {'title': list_item.getLabel()})
list_item.setProperty('isPlayable', playable)
list_item.addContextMenuItems(menu_items, replaceItems=replace_menu)
@@ -181,8 +179,9 @@ def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, to
def parse_query(query):
q = {'mode': 'main'}
- if query.startswith('?'): query = query[1:]
- queries = urlparse.parse_qs(query)
+ if query.startswith('?'):
+ query = query[1:]
+ queries = urllib_parse.parse_qs(query)
for key in queries:
if len(queries[key]) == 1:
q[key] = queries[key][0]
@@ -192,8 +191,10 @@ def parse_query(query):
def notify(header=None, msg='', duration=2000, sound=None):
- if header is None: header = get_name()
- if sound is None: sound = get_setting('mute_notifications') == 'false'
+ if header is None:
+ header = get_name()
+ if sound is None:
+ sound = get_setting('mute_notifications') == 'false'
icon_path = os.path.join(get_path(), 'icon.png')
try:
xbmcgui.Dialog().notification(header, msg, icon_path, duration, sound)
@@ -231,10 +232,10 @@ def get_current_view():
class WorkingDialog(object):
def __init__(self):
xbmc.executebuiltin('ActivateWindow(busydialog)')
-
+
def __enter__(self):
return self
-
+
def __exit__(self, type, value, traceback):
xbmc.executebuiltin('Dialog.Close(busydialog)')
@@ -245,6 +246,9 @@ def has_addon(addon_id):
class ProgressDialog(object):
def __init__(self, heading, line1='', line2='', line3='', background=False, active=True, timer=0):
+ self.line1 = line1
+ self.line2 = line2
+ self.line3 = line3
self.begin = time.time()
self.timer = timer
self.background = background
@@ -265,50 +269,77 @@ def __create_dialog(self, line1, line2, line3):
pd = CustomProgressDialog.ProgressDialog()
else:
pd = xbmcgui.DialogProgress()
- pd.create(self.heading, line1, line2, line3)
+ if six.PY2:
+ pd.create(self.heading, line1, line2, line3)
+ else:
+ pd.create(self.heading,
+ line1 + '\n'
+ + line2 + '\n'
+ + line3)
return pd
-
+
def __enter__(self):
return self
-
+
def __exit__(self, type, value, traceback):
if self.pd is not None:
self.pd.close()
del self.pd
-
+
def is_canceled(self):
if self.pd is not None and not self.background:
return self.pd.iscanceled()
else:
return False
-
+
def update(self, percent, line1='', line2='', line3=''):
+ if not line1:
+ line1 = self.line1
+ if not line2:
+ line2 = self.line2
+ if not line3:
+ line3 = self.line3
if self.pd is None and self.timer and (time.time() - self.begin) >= self.timer:
self.pd = self.__create_dialog(line1, line2, line3)
-
+
if self.pd is not None:
if self.background:
msg = line1 + line2 + line3
self.pd.update(percent, self.heading, msg)
else:
- self.pd.update(percent, line1, line2, line3)
+ if six.PY2:
+ self.pd.update(percent, line1, line2, line3)
+ else:
+ self.pd.update(percent,
+ line1 + '\n'
+ + line2 + '\n'
+ + line3)
class CountdownDialog(object):
__INTERVALS = 5
-
+
def __init__(self, heading, line1='', line2='', line3='', active=True, countdown=60, interval=5):
self.heading = heading
self.countdown = countdown
self.interval = interval
+ self.line1 = line1
+ self.line2 = line2
self.line3 = line3
if active:
if xbmc.getCondVisibility('Window.IsVisible(progressdialog)'):
pd = CustomProgressDialog.ProgressDialog()
else:
pd = xbmcgui.DialogProgress()
- if not self.line3: line3 = 'Expires in: %s seconds' % countdown
- pd.create(self.heading, line1, line2, line3)
+ if not self.line3:
+ line3 = 'Expires in: %s seconds' % countdown
+ if six.PY2:
+ pd.create(self.heading, line1, line2, line3)
+ else:
+ pd.create(self.heading,
+ line1 + '\n'
+ + line2 + '\n'
+ + line3)
pd.update(100)
self.pd = pd
else:
@@ -316,43 +347,59 @@ def __init__(self, heading, line1='', line2='', line3='', active=True, countdown
def __enter__(self):
return self
-
+
def __exit__(self, type, value, traceback):
if self.pd is not None:
self.pd.close()
del self.pd
-
+
def start(self, func, args=None, kwargs=None):
- if args is None: args = []
- if kwargs is None: kwargs = {}
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
result = func(*args, **kwargs)
if result:
return result
-
+
if self.pd is not None:
start = time.time()
expires = time_left = self.countdown
interval = self.interval
while time_left > 0:
for _ in range(CountdownDialog.__INTERVALS):
- sleep(interval * 1000 / CountdownDialog.__INTERVALS)
- if self.is_canceled(): return
+ sleep(int(interval * 1000 / CountdownDialog.__INTERVALS))
+ if self.is_canceled():
+ return
time_left = expires - int(time.time() - start)
- if time_left < 0: time_left = 0
- progress = time_left * 100 / expires
+ if time_left < 0:
+ time_left = 0
+ progress = int(time_left * 100 / expires)
line3 = 'Expires in: %s seconds' % time_left if not self.line3 else ''
self.update(progress, line3=line3)
-
+
result = func(*args, **kwargs)
if result:
return result
-
+
def is_canceled(self):
if self.pd is None:
return False
else:
return self.pd.iscanceled()
-
+
def update(self, percent, line1='', line2='', line3=''):
+ if not line1:
+ line1 = self.line1
+ if not line2:
+ line2 = self.line2
+ if not line3:
+ line3 = self.line3
if self.pd is not None:
- self.pd.update(percent, line1, line2, line3)
+ if six.PY2:
+ self.pd.update(percent, line1, line2, line3)
+ else:
+ self.pd.update(percent,
+ line1 + '\n'
+ + line2 + '\n'
+ + line3)
diff --git a/lib/resolveurl/lib/log_utils.py b/lib/resolveurl/lib/log_utils.py
index e18b0f45..1d3d46dd 100644
--- a/lib/resolveurl/lib/log_utils.py
+++ b/lib/resolveurl/lib/log_utils.py
@@ -18,47 +18,55 @@
import json
import xbmc
import xbmcaddon
-from xbmc import LOGDEBUG, LOGERROR, LOGFATAL, LOGINFO, LOGNONE, LOGNOTICE, LOGSEVERE, LOGWARNING # @UnusedImport
+import six
+
+LOGDEBUG = xbmc.LOGDEBUG
+LOGERROR = xbmc.LOGERROR
+LOGWARNING = xbmc.LOGWARNING
+LOGINFO = xbmc.LOGINFO if six.PY3 else xbmc.LOGNOTICE
addonsmr = xbmcaddon.Addon('script.module.resolveurl')
+
def execute_jsonrpc(command):
- if not isinstance(command, basestring):
+ if not isinstance(command, six.string_types):
command = json.dumps(command)
response = xbmc.executeJSONRPC(command)
return json.loads(response)
+
def _is_debugging():
command = {'jsonrpc': '2.0', 'id': 1, 'method': 'Settings.getSettings', 'params': {'filter': {'section': 'system', 'category': 'logging'}}}
js_data = execute_jsonrpc(command)
for item in js_data.get('result', {}).get('settings', {}):
if item['id'] == 'debug.showloginfo':
return item['value']
-
+
return False
+
class Logger(object):
__loggers = {}
__name = addonsmr.getAddonInfo('name')
__addon_debug = addonsmr.getSetting('addon_debug') == 'true'
__debug_on = _is_debugging()
__disabled = set()
-
+
@staticmethod
def get_logger(name=None):
if name not in Logger.__loggers:
Logger.__loggers[name] = Logger()
-
+
return Logger.__loggers[name]
-
+
def disable(self):
if self not in Logger.__disabled:
Logger.__disabled.add(self)
-
+
def enable(self):
if self in Logger.__disabled:
Logger.__disabled.remove(self)
-
+
def log(self, msg, level=LOGDEBUG):
# if debug isn't on, skip disabled loggers unless addon_debug is on
if not self.__debug_on:
@@ -66,28 +74,30 @@ def log(self, msg, level=LOGDEBUG):
return
elif level == LOGDEBUG:
if self.__addon_debug:
- level = LOGNOTICE
+ level = LOGINFO
else:
return
-
+
try:
- if isinstance(msg, unicode):
+ if isinstance(msg, six.text_type) and six.PY2:
msg = '%s (ENCODED)' % (msg.encode('utf-8'))
-
+
xbmc.log('%s: %s' % (self.__name, msg), level)
-
+
except Exception as e:
- try: xbmc.log('Logging Failure: %s' % (e), level)
- except: pass # just give up
-
+ try:
+ xbmc.log('Logging Failure: %s' % (e), level)
+ except:
+ pass # just give up
+
def log_debug(self, msg):
self.log(msg, level=LOGDEBUG)
-
+
def log_notice(self, msg):
- self.log(msg, level=LOGNOTICE)
-
+ self.log(msg, level=LOGINFO)
+
def log_warning(self, msg):
self.log(msg, level=LOGWARNING)
-
+
def log_error(self, msg):
self.log(msg, level=LOGERROR)
diff --git a/lib/resolveurl/lib/net.py b/lib/resolveurl/lib/net.py
index dbe2a30b..9ec14007 100644
--- a/lib/resolveurl/lib/net.py
+++ b/lib/resolveurl/lib/net.py
@@ -1,4 +1,4 @@
-'''
+"""
common XBMC Module
Copyright (C) 2011 t0mm0
@@ -14,23 +14,23 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
-'''
+"""
import random
-import cookielib
+from six.moves import http_cookiejar
import gzip
import re
-import StringIO
-import urllib
-import urllib2
+import json
+import six
+from six.moves import urllib_request, urllib_parse
import socket
import time
-import kodi
+from resolveurl.lib import kodi
# Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(10)
BR_VERS = [
- ['%s.0' % i for i in xrange(18, 50)],
+ ['%s.0' % i for i in range(18, 50)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80', '48.0.2564.116', '49.0.2623.112', '50.0.2661.86'],
@@ -42,9 +42,13 @@
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko',
'Mozilla/5.0 (compatible; MSIE {br_ver}; {win_ver}{feature}; Trident/6.0)']
+
+
def get_ua():
- try: last_gen = int(kodi.get_setting('last_ua_create'))
- except: last_gen = 0
+ try:
+ last_gen = int(kodi.get_setting('last_ua_create'))
+ except:
+ last_gen = 0
if not kodi.get_setting('current_ua') or last_gen < (time.time() - (7 * 24 * 60 * 60)):
index = random.randrange(len(RAND_UAS))
versions = {'win_ver': random.choice(WIN_VERS), 'feature': random.choice(FEATURES), 'br_ver': random.choice(BR_VERS[index])}
@@ -56,8 +60,9 @@ def get_ua():
user_agent = kodi.get_setting('current_ua')
return user_agent
+
class Net:
- '''
+ """
This class wraps :mod:`urllib2` and provides an easy way to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
@@ -68,15 +73,15 @@ class Net:
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
- '''
+ """
- _cj = cookielib.LWPCookieJar()
+ _cj = http_cookiejar.LWPCookieJar()
_proxy = None
_user_agent = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='', ssl_verify=True, http_debug=False):
- '''
+ """
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
@@ -89,7 +94,7 @@ def __init__(self, cookie_file='', proxy='', user_agent='', ssl_verify=True, htt
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
- '''
+ """
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
@@ -101,13 +106,13 @@ def __init__(self, cookie_file='', proxy='', user_agent='', ssl_verify=True, htt
self._update_opener()
def set_cookies(self, cookie_file):
- '''
+ """
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
- '''
+ """
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
@@ -116,59 +121,59 @@ def set_cookies(self, cookie_file):
return False
def get_cookies(self, as_dict=False):
- '''Returns A dictionary containing all cookie information by domain.'''
+ """Returns A dictionary containing all cookie information by domain."""
if as_dict:
return dict((cookie.name, cookie.value) for cookie in self._cj)
else:
return self._cj._cookies
def save_cookies(self, cookie_file):
- '''
+ """
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
- '''
+ """
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
- '''
+ """
Args:
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
- '''
+ """
self._proxy = proxy
self._update_opener()
def get_proxy(self):
- '''Returns string containing proxy details.'''
+ """Returns string containing proxy details."""
return self._proxy
def set_user_agent(self, user_agent):
- '''
+ """
Args:
user_agent (str): String to use as the User Agent header.
- '''
+ """
self._user_agent = user_agent
def get_user_agent(self):
- '''Returns user agent string.'''
+ """Returns user agent string."""
return self._user_agent
def _update_opener(self):
- '''
+ """
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
- '''
- handlers = [urllib2.HTTPCookieProcessor(self._cj), urllib2.HTTPBasicAuthHandler()]
+ """
+ handlers = [urllib_request.HTTPCookieProcessor(self._cj), urllib_request.HTTPBasicAuthHandler()]
if self._http_debug:
- handlers += [urllib2.HTTPHandler(debuglevel=1)]
+ handlers += [urllib_request.HTTPHandler(debuglevel=1)]
else:
- handlers += [urllib2.HTTPHandler()]
+ handlers += [urllib_request.HTTPHandler()]
if self._proxy:
- handlers += [urllib2.ProxyHandler({'http': self._proxy})]
+ handlers += [urllib_request.ProxyHandler({'http': self._proxy})]
try:
import platform
@@ -183,17 +188,17 @@ def _update_opener(self):
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
if self._http_debug:
- handlers += [urllib2.HTTPSHandler(context=ctx, debuglevel=1)]
+ handlers += [urllib_request.HTTPSHandler(context=ctx, debuglevel=1)]
else:
- handlers += [urllib2.HTTPSHandler(context=ctx)]
+ handlers += [urllib_request.HTTPSHandler(context=ctx)]
except:
pass
- opener = urllib2.build_opener(*handlers)
- urllib2.install_opener(opener)
+ opener = urllib_request.build_opener(*handlers)
+ urllib_request.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
- '''
+ """
Perform an HTTP GET request.
Args:
@@ -209,11 +214,11 @@ def http_GET(self, url, headers={}, compression=True):
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
- '''
+ """
return self._fetch(url, headers=headers, compression=compression)
- def http_POST(self, url, form_data, headers={}, compression=True):
- '''
+ def http_POST(self, url, form_data, headers={}, compression=True, jdata=False):
+ """
Perform an HTTP POST request.
Args:
@@ -231,11 +236,11 @@ def http_POST(self, url, form_data, headers={}, compression=True):
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
- '''
- return self._fetch(url, form_data, headers=headers, compression=compression)
+ """
+ return self._fetch(url, form_data, headers=headers, compression=compression, jdata=jdata)
def http_HEAD(self, url, headers={}):
- '''
+ """
Perform an HTTP HEAD request.
Args:
@@ -248,17 +253,17 @@ def http_HEAD(self, url, headers={}):
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
- '''
- request = urllib2.Request(url)
+ """
+ request = urllib_request.Request(url)
request.get_method = lambda: 'HEAD'
request.add_header('User-Agent', self._user_agent)
for key in headers:
request.add_header(key, headers[key])
- response = urllib2.urlopen(request)
+ response = urllib_request.urlopen(request)
return HttpResponse(response)
def http_DELETE(self, url, headers={}):
- '''
+ """
Perform an HTTP DELETE request.
Args:
@@ -271,17 +276,17 @@ def http_DELETE(self, url, headers={}):
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
- '''
- request = urllib2.Request(url)
+ """
+ request = urllib_request.Request(url)
request.get_method = lambda: 'DELETE'
request.add_header('User-Agent', self._user_agent)
for key in headers:
request.add_header(key, headers[key])
- response = urllib2.urlopen(request)
+ response = urllib_request.urlopen(request)
return HttpResponse(response)
- def _fetch(self, url, form_data={}, headers={}, compression=True):
- '''
+ def _fetch(self, url, form_data={}, headers={}, compression=True, jdata=False):
+ """
Perform an HTTP GET or POST request.
Args:
@@ -300,44 +305,52 @@ def _fetch(self, url, form_data={}, headers={}, compression=True):
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
- '''
- req = urllib2.Request(url)
+ """
+ req = urllib_request.Request(url)
if form_data:
- if isinstance(form_data, basestring):
+ if jdata:
+ form_data = json.dumps(form_data)
+ elif isinstance(form_data, six.string_types):
form_data = form_data
else:
- form_data = urllib.urlencode(form_data, True)
- req = urllib2.Request(url, form_data)
+ form_data = urllib_parse.urlencode(form_data, True)
+ form_data = form_data.encode('utf-8') if six.PY3 else form_data
+ req = urllib_request.Request(url, form_data)
req.add_header('User-Agent', self._user_agent)
for key in headers:
req.add_header(key, headers[key])
if compression:
req.add_header('Accept-Encoding', 'gzip')
- req.add_unredirected_header('Host', req.get_host())
- response = urllib2.urlopen(req)
+ if jdata:
+ req.add_header('Content-Type', 'application/json')
+ host = req.host if six.PY3 else req.get_host()
+ req.add_unredirected_header('Host', host)
+ response = urllib_request.urlopen(req, timeout=15)
return HttpResponse(response)
+
class HttpResponse:
- '''
+ """
This class represents a resoponse from an HTTP request.
- The content is examined and every attempt is made to properly encode it to
- Unicode.
+ The content is examined and every attempt is made to properly decode it to
+ Unicode unless the nodecode property flag is set to True.
.. seealso::
:meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
- '''
+ """
- content = ''
- '''Unicode encoded string containing the body of the reposne.'''
+ # content = ''
+ """Unicode encoded string containing the body of the reponse."""
def __init__(self, response):
- '''
+ """
Args:
response (:class:`mimetools.Message`): The object returned by a call
to :func:`urllib2.urlopen`.
- '''
+ """
self._response = response
+ self._nodecode = False
@property
def content(self):
@@ -345,10 +358,13 @@ def content(self):
encoding = None
try:
if self._response.headers['content-encoding'].lower() == 'gzip':
- html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
+ html = gzip.GzipFile(fileobj=six.BytesIO(html)).read()
except:
pass
+ if self._nodecode:
+ return html
+
try:
content_type = self._response.headers['content-type']
if 'charset=' in content_type:
@@ -356,26 +372,46 @@ def content(self):
except:
pass
- r = re.search('.
+"""
+
+from six.moves import urllib_parse
+from resolveurl.plugins.lib import helpers
+from resolveurl import common
+from resolveurl.resolver import ResolveUrl, ResolverError
+
+
+class ABCVideoResolver(ResolveUrl):
+ name = "abcvideo"
+ domains = ['abcvideo.cc']
+ pattern = r'(?://|\.)(abcvideo\.cc)/(?:embed-)?([0-9a-zA-Z]+)'
+
+ def get_media_url(self, host, media_id):
+ surl = 'https://abcvideo.cc/dl'
+ domain = 'aHR0cHM6Ly9hYmN2aWRlby5jYzo0NDM.'
+ web_url = self.get_url(host, media_id)
+ rurl = 'https://{0}/'.format(host)
+ headers = {'User-Agent': common.FF_USER_AGENT,
+ 'Referer': rurl}
+ html = self.net.http_GET(web_url, headers).content
+ token = helpers.girc(html, rurl, domain)
+ if token:
+ data = {'op': 'video_src',
+ 'file_code': media_id,
+ 'g-recaptcha-response': token}
+ headers.update({'X-Requested-With': 'XMLHttpRequest'})
+ shtml = self.net.http_GET('{0}?{1}'.format(surl, urllib_parse.urlencode(data)), headers=headers).content
+ sources = helpers.scrape_sources(shtml)
+ if sources:
+ headers.pop('X-Requested-With')
+ return helpers.pick_source(sources) + helpers.append_headers(headers)
+
+ raise ResolverError('File Not Found or removed')
+
+ def get_url(self, host, media_id):
+ return self._default_get_url(host, media_id, template='https://{host}/embed-{media_id}.html')
diff --git a/lib/resolveurl/plugins/adultswim.py b/lib/resolveurl/plugins/adultswim.py
index 1778bfd4..162dd6c3 100644
--- a/lib/resolveurl/plugins/adultswim.py
+++ b/lib/resolveurl/plugins/adultswim.py
@@ -12,37 +12,36 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
-import re, json
-from lib import helpers
+import re
+import json
+from resolveurl.plugins.lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
+
class AdultSwimResolver(ResolveUrl):
name = "AdultSwim"
domains = ["adultswim.com"]
- pattern = "(?://|\.)(adultswim\.com)/videos/((?!streams)[a-z\-]+/[a-z\-]+)"
-
- def __init__(self):
- self.net = common.Net()
+ pattern = r"(?://|\.)(adultswim\.com)/videos/((?!streams)[a-z\-]+/[a-z\-]+)"
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
-
+
if html:
try:
- json_data = re.search("""__AS_INITIAL_DATA__\s*=\s*({.*?});""", html).groups()[0]
- json_data = json_data.replace("\/", "/")
+ json_data = re.search(r"""__AS_INITIAL_DATA__\s*=\s*({.*?});""", html).groups()[0]
+ json_data = json_data.replace(r"\/", "/")
a = json.loads(json_data)
ep_id = a["show"]["sluggedVideo"]["id"]
api_url = 'http://www.adultswim.com/videos/api/v0/assets?platform=desktop&id=%s&phds=true' % ep_id
-
- return helpers.get_media_url(api_url, patterns=["""[^"]+).+?>(?P[^<\s]+)"""], result_blacklist=[".f4m"]).replace(' ', '%20')
-
+
+ return helpers.get_media_url(api_url, patterns=[r"""[^"]+).+?>(?P[^<\s]+)"""], result_blacklist=[".f4m"]).replace(' ', '%20')
+
except Exception as e:
raise ResolverError(e)
-
+
raise ResolverError('Video not found')
def get_url(self, host, media_id):
diff --git a/lib/resolveurl/plugins/aliez.py b/lib/resolveurl/plugins/aliez.py
index 03830f35..f31d7ac5 100644
--- a/lib/resolveurl/plugins/aliez.py
+++ b/lib/resolveurl/plugins/aliez.py
@@ -2,7 +2,7 @@
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
- resolveurl XBMC Addon
+ Plugin for ResolveURL
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
@@ -19,16 +19,19 @@
along with this program. If not, see .
"""
import re
-from lib import helpers
-from __resolve_generic__ import ResolveGeneric
+from resolveurl.plugins.lib import helpers
+from resolveurl.plugins.__resolve_generic__ import ResolveGeneric
+
class AliezResolver(ResolveGeneric):
name = "aliez"
domains = ['aliez.me']
- pattern = '(?://|\.)(aliez\.me)/(?:(?:player/video\.php\?id=([0-9]+)&s=([A-Za-z0-9]+))|(?:video/([0-9]+)/([A-Za-z0-9]+)))'
-
+ pattern = r'(?://|\.)(aliez\.me)/(?:(?:player/video\.php\?id=([0-9]+)&s=([A-Za-z0-9]+))|(?:video/([0-9]+)/([A-Za-z0-9]+)))'
+
def get_media_url(self, host, media_id):
- return helpers.get_media_url(self.get_url(host, media_id), patterns=['''file:\s*['"](?P[^'"]+)''']).replace(' ', '%20')
+ return helpers.get_media_url(self.get_url(host, media_id),
+ patterns=[r'''file:\s*['"](?P[^'"]+)'''],
+ generic_patterns=False).replace(' ', '%20')
def get_host_and_id(self, url):
r = re.search(self.pattern, url, re.I)
@@ -41,4 +44,4 @@ def get_host_and_id(self, url):
def get_url(self, host, media_id):
media_id = media_id.split("|")
- return self._default_get_url(host, media_id, 'http://emb.%s/player/video.php?id=%s&s=%s&w=590&h=332' % (host, media_id[0], media_id[1]))
+ return self._default_get_url(host, media_id, 'http://emb.apl5.me/player/video.php?id=%s&s=%s&w=590&h=332' % (media_id[0], media_id[1]))
diff --git a/lib/resolveurl/plugins/alldebrid.py b/lib/resolveurl/plugins/alldebrid.py
index 675c951e..ba29e542 100644
--- a/lib/resolveurl/plugins/alldebrid.py
+++ b/lib/resolveurl/plugins/alldebrid.py
@@ -1,5 +1,7 @@
"""
- resolveurl Kodi Addon
+ Plugin for ResolveURL
+ v4 api updates
+ Copyright (c) 2020 gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,9 +18,9 @@
"""
import re
-from urllib import quote_plus
-from urllib2 import HTTPError
+from six.moves import urllib_parse, urllib_error
import json
+from resolveurl.plugins.lib import helpers
from resolveurl import common
from resolveurl.common import i18n
from resolveurl.resolver import ResolveUrl, ResolverError
@@ -28,10 +30,10 @@
AGENT = 'ResolveURL for Kodi'
VERSION = common.addon_version
-USER_AGENT = '%s/%s' % (AGENT, VERSION)
+USER_AGENT = '{0}/{1}'.format(AGENT, VERSION)
FORMATS = common.VIDEO_FORMATS
-api_url = 'https://api.alldebrid.com'
+api_url = 'https://api.alldebrid.com/v4'
class AllDebridResolver(ResolveUrl):
@@ -49,9 +51,9 @@ def get_media_url(self, host, media_id, cached_only=False):
if media_id.lower().startswith('magnet:'):
r = re.search('''magnet:.+?urn:([a-zA-Z0-9]+):([a-zA-Z0-9]+)''', media_id, re.I)
if r:
- _hash, _format = r.group(2), r.group(1)
+ _hash = r.group(2)
if self.__check_cache(_hash):
- logger.log_debug('AllDebrid: BTIH %s is readily available to stream' % _hash)
+ logger.log_debug('AllDebrid: BTIH {0} is readily available to stream'.format(_hash))
transfer_id = self.__create_transfer(_hash)
else:
if self.get_setting('cached_only') == 'true' or cached_only:
@@ -61,71 +63,83 @@ def get_media_url(self, host, media_id, cached_only=False):
self.__initiate_transfer(transfer_id)
transfer_info = self.__list_transfer(transfer_id)
- for _link, _file in transfer_info.get('links').items():
- if any(_file.lower().endswith(x) for x in FORMATS):
- media_id = _link.replace("\/", "/")
- break
-
+ sources = [(link.get('size'), link.get('link'))
+ for link in transfer_info.get('links')
+ if any(link.get('filename').lower().endswith(x) for x in FORMATS)]
+ media_id = max(sources)[1]
self.__delete_transfer(transfer_id)
- url = '%s/link/unlock?agent=%s&version=%s&token=%s&link=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id)
+ url = '{0}/link/unlock?agent={1}&apikey={2}&link={3}'.format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), urllib_parse.quote_plus(media_id))
result = self.net.http_GET(url, headers=self.headers).content
- except HTTPError as e:
+ except urllib_error.HTTPError as e:
try:
js_result = json.loads(e.read())
if 'error' in js_result:
- msg = '%s (%s)' % (js_result.get('error'), js_result.get('errorCode'))
+ msg = '{0} ({1})'.format(js_result.get('error'), js_result.get('errorCode'))
else:
msg = 'Unknown Error (1)'
except:
msg = 'Unknown Error (2)'
- raise ResolverError('AllDebrid Error: %s (%s)' % (msg, e.code))
+ raise ResolverError('AllDebrid Error: {0} ({1})'.format(msg, e.code))
else:
js_result = json.loads(result)
- logger.log_debug('AllDebrid resolve: [%s]' % js_result)
+ logger.log_debug('AllDebrid resolve: [{0}]'.format(js_result))
if 'error' in js_result:
- raise ResolverError('AllDebrid Error: %s (%s)' % (js_result.get('error'), js_result.get('errorCode')))
- elif js_result.get('success', False):
- if js_result.get('infos').get('link'):
- return js_result.get('infos').get('link')
+ e = js_result.get('error')
+ raise ResolverError('AllDebrid Error: {0} ({1})'.format(e.get('message'), e.get('code')))
+ elif js_result.get('status', False) == "success":
+ if js_result.get('data').get('link'):
+ return js_result.get('data').get('link')
+ elif js_result.get('data').get('host') == "stream":
+ sources = js_result.get('data').get('streams')
+ fid = js_result.get('data').get('id')
+ sources = [(str(source.get("quality")), source.get("id")) for source in sources if '+' not in source.get("id")]
+ sid = helpers.pick_source(helpers.sort_sources_list(sources))
+ url = '{0}/link/streaming?agent={1}&apikey={2}&id={3}&stream={4}' \
+ .format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), fid, sid)
+ result = self.net.http_GET(url, headers=self.headers).content
+ js_data = json.loads(result)
+ if js_data.get('data').get('link'):
+ return js_data.get('data').get('link')
raise ResolverError('AllDebrid: no stream returned')
def __check_cache(self, media_id):
- try:
- url = '%s/magnet/instant?agent=%s&version=%s&token=%s&magnet=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id)
- result = self.net.http_GET(url, headers=self.headers).content
- result = json.loads(result)
- if result.get('success', False):
- response = result.get('instant', False)
- return response
- except:
- pass
+ url = '{0}/magnet/instant?agent={1}&apikey={2}&magnets[]={3}'.format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), media_id.lower())
+ result = self.net.http_GET(url, headers=self.headers).content
+ result = json.loads(result)
+ if result.get('status') == "success":
+ magnets = result.get('data').get('magnets')
+ for magnet in magnets:
+ if media_id.lower() == magnet.get('magnet').lower() or media_id.lower() == magnet.get('hash').lower():
+ response = magnet.get('instant', False)
+ return response
return False
def __list_transfer(self, transfer_id):
- try:
- url = '%s/magnet/status?agent=%s&version=%s&token=%s&id=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), transfer_id)
- response = self.net.http_GET(url, headers=self.headers).content
- result = json.loads(response)
- if result.get('success', False):
- return result
- except:
- pass
+ url = '{0}/magnet/status?agent={1}&apikey={2}&id={3}'.format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), transfer_id)
+ result = json.loads(self.net.http_GET(url, headers=self.headers).content)
+ if result.get('status', False) == "success":
+ magnets = result.get('data').get('magnets')
+ if type(magnets) is list:
+ for magnet in magnets:
+ if transfer_id == magnet.get('id'):
+ return magnet
+ else:
+ return magnets
return {}
def __create_transfer(self, media_id):
- try:
- url = '%s/magnet/upload?agent=%s&version=%s&token=%s&magnet=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id)
- response = self.net.http_GET(url, headers=self.headers).content
- result = json.loads(response)
- if result.get('success', False):
- logger.log_debug('Transfer successfully started to the AllDebrid cloud')
- return result.get('id', "")
- except:
- pass
+ url = '{0}/magnet/upload?agent={1}&apikey={2}&magnets[]={3}'.format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), media_id)
+ result = json.loads(self.net.http_GET(url, headers=self.headers).content)
+ if result.get('status', False) == "success":
+ logger.log_debug('Transfer successfully started to the AllDebrid cloud')
+ magnets = result.get('data').get('magnets')
+ for magnet in magnets:
+ if media_id in magnet.get('magnet') or media_id.lower() == magnet.get('hash').lower():
+ return magnet.get('id')
return ""
@@ -136,20 +150,21 @@ def __initiate_transfer(self, transfer_id, interval=5):
line1 = transfer_info.get('filename')
line2 = 'Saving torrent to UptoBox via AllDebrid'
line3 = transfer_info.get('status')
- with common.kodi.ProgressDialog('Resolve URL AllDebrid Transfer', line1, line2, line3) as pd:
+ with common.kodi.ProgressDialog('ResolveURL AllDebrid Transfer', line1, line2, line3) as pd:
while not transfer_info.get('statusCode') == 4:
common.kodi.sleep(1000 * interval)
transfer_info = self.__list_transfer(transfer_id)
file_size = transfer_info.get('size')
+ file_size2 = round(float(file_size) / (1000 ** 3), 2)
line1 = transfer_info.get('filename')
if transfer_info.get('statusCode') == 1:
download_speed = round(float(transfer_info.get('downloadSpeed')) / (1000**2), 2)
progress = int(float(transfer_info.get('downloaded')) / file_size * 100) if file_size > 0 else 0
- line3 = "Downloading at %s MB/s from %s peers, %s%% of %sGB completed" % (download_speed, transfer_info.get('seeders'), progress, round(float(file_size) / (1000 ** 3), 2))
+ line3 = "Downloading at {0}MB/s from {1} peers, {2}% of {3}GB completed".format(download_speed, transfer_info.get('seeders'), progress, file_size2)
elif transfer_info.get('statusCode') == 3:
upload_speed = round(float(transfer_info.get('uploadSpeed')) / (1000 ** 2), 2)
progress = int(float(transfer_info.get('uploaded')) / file_size * 100) if file_size > 0 else 0
- line3 = "Uploading at %s MB/s, %s%% of %s GB completed" % (upload_speed, progress, round(float(file_size) / (1000 ** 3), 2))
+ line3 = "Uploading at {0}MB/s, {1}% of {2}GB completed".format(upload_speed, progress, file_size2)
else:
line3 = transfer_info.get('status')
progress = 0
@@ -157,12 +172,10 @@ def __initiate_transfer(self, transfer_id, interval=5):
pd.update(progress, line1=line1, line3=line3)
if pd.is_canceled():
self.__delete_transfer(transfer_id)
- # self.__delete_folder()
- raise ResolverError('Transfer ID %s :: Canceled by user' % transfer_id)
+ raise ResolverError('Transfer ID {0} :: Canceled by user'.format(transfer_id))
elif 5 <= transfer_info.get('statusCode') <= 10:
self.__delete_transfer(transfer_id)
- # self.__delete_folder()
- raise ResolverError('Transfer ID %s :: %s' % (transfer_id, transfer_info.get('status')))
+ raise ResolverError('Transfer ID {0} :: {1}'.format(transfer_id, transfer_info.get('status')))
common.kodi.sleep(1000 * interval) # allow api time to generate the links
@@ -170,16 +183,17 @@ def __initiate_transfer(self, transfer_id, interval=5):
except Exception as e:
self.__delete_transfer(transfer_id)
- raise ResolverError('Transfer ID %s :: %s' % (transfer_id, e))
+ raise ResolverError('Transfer ID {0} :: {1}'.format(transfer_id, e))
def __delete_transfer(self, transfer_id):
try:
- url = '%s/magnet/delete?agent=%s&version=%s&token=%s&id=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), transfer_id)
+ url = '{0}/magnet/delete?agent={1}&apikey={2}&id={3}'.format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), transfer_id)
response = self.net.http_GET(url, headers=self.headers).content
result = json.loads(response)
- if result.get('success', False):
- logger.log_debug('Transfer ID "%s" deleted from the AllDebrid cloud' % transfer_id)
- return True
+ if result.get('status', False) == "success":
+ if 'deleted' in response.get('data').get('message'):
+ logger.log_debug('Transfer ID "{0}" deleted from the AllDebrid cloud'.format(transfer_id))
+ return True
except:
pass
@@ -194,41 +208,54 @@ def get_host_and_id(self, url):
@common.cache.cache_method(cache_limit=8)
def get_all_hosters(self):
hosters = []
- url = '%s/user/hosts?agent=%s&version=%s&token=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'))
+ url = '{0}/user/hosts?agent={1}&apikey={2}'.format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'))
try:
- js_result = self.net.http_GET(url, headers=self.headers).content
- js_data = json.loads(js_result)
- if js_data.get('success', False):
- regexes = [value.get('regexp').replace('\/', '/') for key, value in js_data.get('hosts', {}).iteritems()
+ result = self.net.http_GET(url, headers=self.headers).content
+ js_data = json.loads(result)
+ if js_data.get('status', False) == "success":
+ js_data = js_data.get('data')
+ regexes = [value.get('regexp') for _, value in js_data.get('hosts', {}).items()
if value.get('status', False)]
- logger.log_debug('AllDebrid hosters : %s' % regexes)
hosters = [re.compile(regex) for regex in regexes]
+ logger.log_debug('AllDebrid hosters : {0}'.format(len(hosters)))
+ regexes = [value.get('regexp') for _, value in js_data.get('streams', {}).items()]
+ streamers = []
+ for regex in regexes:
+ try:
+ streamers.append(re.compile(regex))
+ except:
+ pass
+ logger.log_debug('AllDebrid Streamers : {0}'.format(len(streamers)))
+ hosters.extend(streamers)
+ logger.log_debug('AllDebrid Total hosters : {0}'.format(len(hosters)))
else:
logger.log_error('Error getting AD Hosters')
except Exception as e:
- logger.log_error('Error getting AD Hosters: %s' % e)
+ logger.log_error('Error getting AD Hosters: {0}'.format(e))
return hosters
@common.cache.cache_method(cache_limit=8)
def get_hosts(self):
hosts = []
- url = '%s/hosts/domains' % api_url
+ url = '{0}/hosts/domains?agent={1}&apikey={2}'.format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'))
try:
js_result = self.net.http_GET(url, headers=self.headers).content
js_data = json.loads(js_result)
- if js_data.get('success', False):
- hosts = [host.replace('www.', '') for host in js_data.get('hosts', [])]
+ if js_data.get('status', False) == "success":
+ # hosts = [host.replace('www.', '') for host in js_data.get('hosts', [])]
+ hosts = js_data.get('data').get('hosts')
+ hosts.extend(js_data.get('data').get('streams'))
if self.get_setting('torrents') == 'true':
- hosts.extend([u'torrent', u'magnet'])
- logger.log_debug('AllDebrid hosts : %s' % hosts)
+ hosts.extend(['torrent', 'magnet'])
+ logger.log_debug('AllDebrid hosts : {0}'.format(hosts))
else:
- logger.log_error('Error getting AD Hosters')
+ logger.log_error('Error getting AD Hosts')
except Exception as e:
- logger.log_error('Error getting AD Hosts: %s' % e)
+ logger.log_error('Error getting AD Hosts: {0}'.format(e))
return hosts
def valid_url(self, url, host):
- logger.log_debug('in valid_url %s : %s' % (url, host))
+ logger.log_debug('in valid_url {0} : {1}'.format(url, host))
if url:
if url.lower().startswith('magnet:') and self.get_setting('torrents') == 'true':
return True
@@ -236,7 +263,6 @@ def valid_url(self, url, host):
self.hosters = self.get_all_hosters()
for regexp in self.hosters:
- # logger.log_debug('AllDebrid checking host : %s' %str(regexp))
if re.search(regexp, url):
logger.log_debug('AllDebrid Match found')
return True
@@ -258,53 +284,52 @@ def reset_authorization(self):
self.set_setting('token', '')
def authorize_resolver(self):
- url = '%s/pin/get?agent=%s&version=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION))
+ url = '{0}/pin/get?agent={1}'.format(api_url, urllib_parse.quote_plus(AGENT))
js_result = self.net.http_GET(url, headers=self.headers).content
- js_data = json.loads(js_result)
- line1 = 'Go to URL: %s' % (js_data.get('base_url').replace('\/', '/'))
- line2 = 'When prompted enter: %s' % (js_data.get('pin'))
- with common.kodi.CountdownDialog('Resolve Url All Debrid Authorization', line1, line2,
- countdown=js_data.get('expired_in', 120)) as cd:
- result = cd.start(self.__check_auth, [js_data.get('check_url').replace('\/', '/')])
+ js_data = json.loads(js_result).get('data')
+ line1 = 'Go to URL: {0}'.format(js_data.get('base_url'))
+ line2 = 'When prompted enter: {0}'.format(js_data.get('pin'))
+ with common.kodi.CountdownDialog('ResolveUrl AllDebrid Authorization', line1, line2,
+ countdown=js_data.get('expires_in', 120)) as cd:
+ result = cd.start(self.__check_auth, [js_data.get('check_url')])
# cancelled
if result is None:
return
- return self.__get_token(js_data.get('check_url').replace('\/', '/'))
+ return self.__get_token(js_data.get('check_url'))
def __get_token(self, url):
try:
- js_result = self.net.http_GET(url, headers=self.headers).content
- js_data = json.loads(js_result)
- if js_data.get("success", False):
- token = js_data.get('token', '')
- logger.log_debug('Authorizing All Debrid Result: |%s|' % token)
+ js_data = json.loads(self.net.http_GET(url, headers=self.headers).content)
+ if js_data.get("status", False) == "success":
+ js_data = js_data.get('data')
+ token = js_data.get('apikey', '')
+ logger.log_debug('Authorizing All Debrid Result: |{0}|'.format(token))
self.set_setting('token', token)
return True
except Exception as e:
- logger.log_debug('All Debrid Authorization Failed: %s' % e)
+ logger.log_debug('All Debrid Authorization Failed: {0}'.format(e))
return False
def __check_auth(self, url):
activated = False
try:
- js_result = self.net.http_GET(url, headers=self.headers).content
- js_data = json.loads(js_result)
- if js_data.get("success", False):
+ js_data = json.loads(self.net.http_GET(url, headers=self.headers).content)
+ if js_data.get("status", False) == "success":
+ js_data = js_data.get('data')
activated = js_data.get('activated', False)
except Exception as e:
- logger.log_debug('Exception during AD auth: %s' % e)
+ logger.log_debug('Exception during AD auth: {0}'.format(e))
return activated
@classmethod
def get_settings_xml(cls):
xml = super(cls, cls).get_settings_xml()
- # xml.append('' % (cls.__name__, i18n('auto_primary_link')))
- xml.append('' % (cls.__name__, i18n('torrents')))
- xml.append('' % (cls.__name__, i18n('cached_only')))
- xml.append('' % (cls.__name__, i18n('auth_my_account')))
- xml.append('' % (cls.__name__, i18n('reset_my_auth')))
- xml.append('' % cls.__name__)
+ xml.append(''.format(cls.__name__, i18n('torrents')))
+ xml.append(''.format(cls.__name__, i18n('cached_only')))
+ xml.append(''.format(cls.__name__, i18n('auth_my_account')))
+ xml.append(''.format(cls.__name__, i18n('reset_my_auth')))
+ xml.append(''.format(cls.__name__))
return xml
@classmethod
@@ -312,5 +337,5 @@ def _is_enabled(cls):
return cls.get_setting('enabled') == 'true' and cls.get_setting('token')
@classmethod
- def isUniversal(self):
+ def isUniversal(cls):
return True
diff --git a/lib/resolveurl/plugins/amazon.py b/lib/resolveurl/plugins/amazon.py
index 521c38a5..8cb53592 100644
--- a/lib/resolveurl/plugins/amazon.py
+++ b/lib/resolveurl/plugins/amazon.py
@@ -1,5 +1,5 @@
"""
- Kodi resolveurl plugin
+ Plugin for ResolveURL
Copyright (C) 2016 script.module.resolveurl
This program is free software: you can redistribute it and/or modify
@@ -15,24 +15,23 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
"""
-import time, json
-from lib import helpers
+import time
+import json
+from resolveurl.plugins.lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
+
class AmazonCloudResolver(ResolveUrl):
name = 'amazon_clouddrive'
domains = ['amazon.com']
- pattern = '(?://|\.)(amazon\.com)/clouddrive/share/([0-9a-zA-Z]+)'
-
- def __init__(self):
- self.net = common.Net()
-
+ pattern = r'(?://|\.)(amazon\.com)/clouddrive/share/([0-9a-zA-Z]+)'
+
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': 'https://www.amazon.com/'}
html = self.net.http_GET(web_url, headers=headers).content
-
+
if html:
try:
node_info = json.loads(html)
@@ -43,14 +42,14 @@ def get_media_url(self, host, media_id):
if html:
source_info = json.loads(html)
source = source_info["data"][0]["tempLink"]
-
+
if source:
source = "%s?download=true" % source
return source + helpers.append_headers(headers)
except:
raise ResolverError('Unable to locate video')
-
+
raise ResolverError('Unable to locate video')
-
+
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://www.{host}/drive/v1/shares/{media_id}?shareId={media_id}&resourceVersion=V2&ContentType=JSON&_=%s322' % time.time())
diff --git a/lib/resolveurl/plugins/irshare.py b/lib/resolveurl/plugins/anavids.py
similarity index 63%
rename from lib/resolveurl/plugins/irshare.py
rename to lib/resolveurl/plugins/anavids.py
index aa8977d8..c17dca2d 100644
--- a/lib/resolveurl/plugins/irshare.py
+++ b/lib/resolveurl/plugins/anavids.py
@@ -1,6 +1,6 @@
-'''
-Plugin for ResolveURL
-Copyright (C) 2018 gujal
+"""
+Plugin for ResolveUrl
+Copyright (C) 2020 gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -14,38 +14,30 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see .
-'''
+"""
-import re
-from lib import helpers
-from lib import jsunpack
+from resolveurl.plugins.lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
-class IRShareResolver(ResolveUrl):
- name = "irshare"
- domains = ["irshare.net"]
- pattern = '(?://|\.)(irshare\.net)/embed/([0-9a-zA-Z]+)'
+
+class AnaVidsResolver(ResolveUrl):
+ name = "anavids.com"
+ domains = ['anavids.com']
+ pattern = r'(?://|\.)(anavids\.com)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
- headers = {'User-Agent': common.RAND_UA,
- 'Referer': web_url}
+ headers = {'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
-
- r = re.search('JuicyCodes\.Run\("([^)]+)"\)', html)
-
- if r:
- jc = r.group(1).replace('"+"', '').decode('base64')
- jc = jsunpack.unpack(jc)
- sources = helpers.scrape_sources(jc)
- headers.update({'Range': 'bytes=0-'})
+ sources = helpers.scrape_sources(html)
+ if sources:
+ headers.update({'verifypeer': 'false'})
return helpers.pick_source(sources) + helpers.append_headers(headers)
-
raise ResolverError('Video cannot be located.')
def get_url(self, host, media_id):
- return self._default_get_url(host, media_id, template='https://{host}/embed/{media_id}/')
+ return self._default_get_url(host, media_id, template='https://{host}/embed-{media_id}.html')
diff --git a/lib/resolveurl/plugins/anistream.py b/lib/resolveurl/plugins/anistream.py
index b9f66c75..ec75c987 100644
--- a/lib/resolveurl/plugins/anistream.py
+++ b/lib/resolveurl/plugins/anistream.py
@@ -1,23 +1,44 @@
"""
-ani-stream resolveurl plugin
-Copyright (C) 2016 quartoxuna
+ Plugin for ResolveUrl
+ Copyright (C) 2020 Anis
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
-You should have received a copy of the GNU General Public License
-along with this program. If not, see .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
"""
-from __resolve_generic__ import ResolveGeneric
-class AniStreamResolver(ResolveGeneric):
+from resolveurl.plugins.lib import helpers
+import re
+import base64
+from resolveurl import common
+from resolveurl.resolver import ResolveUrl, ResolverError
+
+
+class AniStreamResolver(ResolveUrl):
name = "ani-stream"
domains = ["ani-stream.com"]
- pattern = '(?://|\.)(ani-stream\.com)/(?:embed-)?([0-9a-zA-Z-]+)'
+ pattern = r'(?://|\.)(ani-stream\.com)/(?:embed-)?([0-9a-zA-Z-]+)'
+
+ def get_media_url(self, host, media_id):
+ web_url = self.get_url(host, media_id)
+ headers = {'User-Agent': common.FF_USER_AGENT}
+ html = self.net.http_GET(web_url, headers=headers).content
+ r = re.search(r'base64,([^"]+)', html)
+ if r:
+ html = base64.b64decode(r.group(1)).decode('utf-8')
+ sources = helpers.scrape_sources(html)
+ if sources:
+ return helpers.pick_source(helpers.sort_sources_list(sources)) + helpers.append_headers(headers)
+ raise ResolverError('Video Link Not Found')
+
+ def get_url(self, host, media_id):
+ return self._default_get_url(host, media_id, 'https://{host}/embed-{media_id}.html')
diff --git a/lib/resolveurl/plugins/anyfiles.py b/lib/resolveurl/plugins/anyfiles.py
deleted file mode 100644
index 613ea909..00000000
--- a/lib/resolveurl/plugins/anyfiles.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
- Copyright (C) 2014 smokdpi
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-"""
-import re
-import urlparse
-from lib import helpers
-from resolveurl import common
-from resolveurl.resolver import ResolveUrl, ResolverError
-
-class AnyFilesResolver(ResolveUrl):
- name = "anyfiles"
- domains = ["anyfiles.pl"]
- pattern = '(?://|\.)(anyfiles\.pl)/.*?(?:id=|v=|/)([0-9]+)'
-
- def __init__(self):
- self.net = common.Net()
- self.user_agent = common.EDGE_USER_AGENT
- self.headers = {'User-Agent': self.user_agent}
-
- def get_media_url(self, host, media_id):
- web_url = self.get_url(host, media_id)
- hostname = urlparse.urlparse(web_url).hostname
- self.headers['Referer'] = web_url
- response = self.net.http_GET(web_url, headers=self.headers)
- response_headers = response.get_headers(as_dict=True)
- cookie = response_headers.get('Set-Cookie')
- if cookie:
- self.headers.update({'Cookie': cookie.split(';')[0]})
- html = response.content
- for match in re.finditer('''', html, re.DOTALL | re.I):
+ for match in re.finditer(r'(eval\s*\(function.*?)', html, re.DOTALL | re.I):
try:
js_data = jsunpack.unpack(match.group(1))
js_data = js_data.replace('\\', '')
packed_data += js_data
except:
pass
-
+
return packed_data
+
def sort_sources_list(sources):
if len(sources) > 1:
try:
- sources.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True)
+ sources.sort(key=lambda x: int(re.sub(r"\D", "", x[0])), reverse=True)
except:
- common.logger.log_debug('Scrape sources sort failed |int(re.sub("\D", "", x[0])|')
+ common.logger.log_debug(r'Scrape sources sort failed |int(re.sub("\D", "", x[0])|')
try:
sources.sort(key=lambda x: re.sub("[^a-zA-Z]", "", x[0].lower()))
except:
common.logger.log_debug('Scrape sources sort failed |re.sub("[^a-zA-Z]", "", x[0].lower())|')
return sources
+
def parse_sources_list(html):
sources = []
- match = re.search('''['"]?sources['"]?\s*:\s*\[(.*?)\]''', html, re.DOTALL)
- if match:
- sources = [(match[1], match[0].replace('\/', '/')) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', match.group(1), re.DOTALL)]
+ r = re.search(r'''['"]?sources['"]?\s*:\s*\[(.*?)\]''', html, re.DOTALL)
+ if r:
+ sources = [(match[1], match[0].replace(r'\/', '/')) for match in re.findall(r'''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', r.group(1), re.DOTALL)]
return sources
+
def parse_html5_source_list(html):
- label_attrib = 'type' if not re.search('''''', html) else 'data-res'
- sources = [(match[1], match[0].replace('\/', '/')) for match in re.findall('''''', html) else 'data-res'
+ sources = [(match[1], match[0].replace(r'\/', '/')) for match in re.findall(r''' 1) and (i.group(2) is not None):
label = i.group(2)
sources += [(label, '%s playpath=%s' % (base, i.group(1)))]
return sources
+
def scrape_sources(html, result_blacklist=None, scheme='http', patterns=None, generic_patterns=True):
- if patterns is None: patterns = []
-
+ if patterns is None:
+ patterns = []
+
def __parse_to_list(_html, regex):
_blacklist = ['.jpg', '.jpeg', '.gif', '.png', '.js', '.css', '.htm', '.html', '.php', '.srt', '.sub', '.xml', '.swf', '.vtt', '.mpd']
_blacklist = set(_blacklist + result_blacklist)
@@ -130,18 +143,19 @@ def __parse_to_list(_html, regex):
for r in re.finditer(regex, _html, re.DOTALL):
match = r.groupdict()
stream_url = match['url'].replace('&', '&')
- file_name = urlparse(stream_url[:-1]).path.split('/')[-1] if stream_url.endswith("/") else urlparse(stream_url).path.split('/')[-1]
- blocked = not file_name or any(item in file_name.lower() for item in _blacklist)
- if stream_url.startswith('//'): stream_url = scheme + ':' + stream_url
+ file_name = urllib_parse.urlparse(stream_url[:-1]).path.split('/')[-1] if stream_url.endswith("/") else urllib_parse.urlparse(stream_url).path.split('/')[-1]
+ label = match.get('label', file_name)
+ if label is None:
+ label = file_name
+ blocked = not file_name or any(item in file_name.lower() for item in _blacklist) or any(item in label for item in _blacklist)
+ if stream_url.startswith('//'):
+ stream_url = scheme + ':' + stream_url
if '://' not in stream_url or blocked or (stream_url in streams) or any(stream_url == t[1] for t in source_list):
continue
-
- label = match.get('label', file_name)
- if label is None: label = file_name
labels.append(label)
streams.append(stream_url)
-
- matches = zip(labels, streams)
+
+ matches = zip(labels, streams) if six.PY2 else list(zip(labels, streams))
if matches:
common.logger.log_debug('Scrape sources |%s| found |%s|' % (regex, matches))
return matches
@@ -150,32 +164,33 @@ def __parse_to_list(_html, regex):
result_blacklist = []
elif isinstance(result_blacklist, str):
result_blacklist = [result_blacklist]
-
- html = html.replace("\/", "/")
+
+ html = html.replace(r"\/", "/")
html += get_packed_data(html)
source_list = []
if generic_patterns or not patterns:
- source_list += __parse_to_list(html, '''["']?label\s*["']?\s*[:=]\s*["']?(?P