Skip to content

Commit

Permalink
Merge pull request #5 from delirious-lettuce/fix_typos
Browse files Browse the repository at this point in the history
Fix typos
  • Loading branch information
1N3 authored Jan 9, 2018
2 parents 0f0b86d + 134b37a commit b3af99c
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 43 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ docker run -it BlackWidow # Defaults to --help
This software is released under the GNU General Public License v3.0. See LICENSE.md for details.

## DONATIONS:
Donations are welcome. This will help fascilitate improved features, frequent updates and better overall support.
Donations are welcome. This will help facilitate improved features, frequent updates and better overall support.
- [+] BTC 1Fav36btfmdrYpCAR65XjKHhxuJJwFyKum
- [+] ETH 0x20bB09273702eaBDFbEE9809473Fd04b969a794d
- [+] LTC LQ6mPewec3xeLBYMdRP4yzeta6b9urqs2f
Expand Down
70 changes: 35 additions & 35 deletions blackwidow
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
# blackwidow by 1N3
# https://crowdshield.com
#
# ABOUT:
# A python based web spider to gather all sub-domains, static and dynamic URL's, web forms, email addresses, phone numbers and more.
# ABOUT:
# A python based web spider to gather all sub-domains, static and dynamic URL's, web forms, email addresses, phone numbers and more.
#
# USAGE:
# blackwidow -u https://target.com -l 2 <- spider https://target.com with 2 levels of depth (Default depth is 3).
# blackwidow -u https://target.com -l 2 <- spider https://target.com with 2 levels of depth (Default depth is 3).
# blackwidow -d target.com -l 5 -s y <- spider domain: target.com with 5 levels of depth and fuzz all dynamic URL's.
#

Expand All @@ -32,25 +32,25 @@ def readlinks (url):
except Exception as ex:
print(ex)

urls = open("/tmp/" + domain + "-urls.txt","w+")
urls_saved = open(save_dir + domain + "-urls.txt","a")
urls = open("/tmp/" + domain + "-urls.txt","w+")
urls_saved = open(save_dir + domain + "-urls.txt","a")
forms_saved = open(save_dir + domain + "-forms.txt","a")
dynamic_saved = open(save_dir + domain + "-dynamic.txt","a")
dynamic_saved = open(save_dir + domain + "-dynamic.txt","a")
emails_saved = open(save_dir + domain + "-emails.txt","a")
phones_saved = open(save_dir + domain + "-phones.txt","a")
subdomains_saved = open(save_dir + domain + "-subdomains.txt","a")

print ""
print OKGREEN + "==================================================================================================" + RESET
print OKGREEN + url
print OKGREEN + url
print OKGREEN + "==================================================================================================" + RESET
for form in soup.find_all('form'):
print OKBLUE + "[+] Extracting form values..."
print OKBLUE + "[+] Extracting form values..."
print "__________________________________________________________________________________________________" + OKORANGE
print form
print OKBLUE + "__________________________________________________________________________________________________"
print RESET
forms_saved.write(url + "\n")
forms_saved.write(url + "\n")

# PARSE LINKS
for link in soup.find_all('a'):
Expand All @@ -59,85 +59,85 @@ def readlinks (url):
parsed_uri = urlparse(link.get('href'))
linkdomain = '{uri.netloc}'.format(uri=parsed_uri)
if (domain != linkdomain) and (linkdomain != "") and (domain in linkdomain):
print COLOR1 + "[+] Sub-domain found! " + linkdomain + " " + RESET
print COLOR1 + "[+] Sub-domain found! " + linkdomain + " " + RESET
subdomains_saved.write(linkdomain + "\n")
# IF LINK STARTS WITH HTTP
if link.get('href')[:4] == "http":
# SAME ORIGIN
if domain in link.get('href'):
# IF URL IS DYNAMIC
if "?" in link.get('href'):
print OKRED + "[+] Dynamic URL found! " + link.get('href') + " " + RESET
urls.write(link.get('href') + "\n")
urls_saved.write(link.get('href') + "\n")
dynamic_saved.write(link.get('href') + "\n")
print OKRED + "[+] Dynamic URL found! " + link.get('href') + " " + RESET
urls.write(link.get('href') + "\n")
urls_saved.write(link.get('href') + "\n")
dynamic_saved.write(link.get('href') + "\n")
# DOM BASED LINK
elif link.get('href')[:1] == "#":
print OKBLUE + "[i] DOM based link found! " + link.get('href') + " " + RESET
print OKBLUE + "[i] DOM based link found! " + link.get('href') + " " + RESET
# TELEPHONE
elif link.get('href')[:4] == "tel:":
s = link.get('href')
phonenum = s.split(':')[1]
print OKORANGE + "[i] Telephone # found! " + phonenum + " " + RESET
phones_saved.write(phonenum + "\n")
print OKORANGE + "[i] Telephone # found! " + phonenum + " " + RESET
phones_saved.write(phonenum + "\n")
# EMAIL
elif link.get('href')[:7] == "mailto:":
s = link.get('href')
email = s.split(':')[1]
print OKORANGE + "[i] Email found! " + email + " " + RESET
print OKORANGE + "[i] Email found! " + email + " " + RESET
emails_saved.write(email + "\n")
# FULL URI OF SAME ORIGIN
else:
print link.get('href')
urls.write(link.get('href') + "\n")
urls_saved.write(link.get('href') + "\n")
urls.write(link.get('href') + "\n")
urls_saved.write(link.get('href') + "\n")
# EXTERNAL LINK FOUND
else:
# IF URL IS DYNAMIC
if "?" in link.get('href'):
print COLOR2 + "[+] External Dynamic URL found! " + link.get('href') + " " + RESET
print COLOR2 + "[+] External Dynamic URL found! " + link.get('href') + " " + RESET
# DOM BASED LINK
elif link.get('href')[:1] == "#":
print COLOR2 + "[i] External DOM based link found! " + link.get('href') + " " + RESET
print COLOR2 + "[i] External DOM based link found! " + link.get('href') + " " + RESET
# TELEPHONE
elif link.get('href')[:4] == "tel:":
s = link.get('href')
phonenum = s.split(':')[1]
print OKORANGE + "[i] External Telephone # found! " + phonenum + " " + RESET
print OKORANGE + "[i] External Telephone # found! " + phonenum + " " + RESET
# EMAIL
elif link.get('href')[:7] == "mailto:":
s = link.get('href')
email = s.split(':')[1]
print OKORANGE + "[i] External Email found! " + email + " " + RESET
print OKORANGE + "[i] External Email found! " + email + " " + RESET
# FULL URI OF EXTERNAL ORIGIN
else:
print COLOR2 + "[i] External link found! " + link.get('href') + " " + RESET
# IF URL IS DYNAMIC
elif "?" in link.get('href'):
print OKRED + "[+] Dynamic URL found! " + url + link.get('href') + " " + RESET
urls.write(url + "/" + link.get('href') + "\n")
urls_saved.write(url + "/" + link.get('href') + "\n")
print OKRED + "[+] Dynamic URL found! " + url + link.get('href') + " " + RESET
urls.write(url + "/" + link.get('href') + "\n")
urls_saved.write(url + "/" + link.get('href') + "\n")
dynamic_saved.write(url + "/" + link.get('href') + "\n")
# DOM BASED LINK
elif link.get('href')[:1] == "#":
print OKBLUE + "[i] DOM based link found! " + link.get('href') + " " + RESET
print OKBLUE + "[i] DOM based link found! " + link.get('href') + " " + RESET
# TELEPHONE
elif link.get('href')[:4] == "tel:":
s = link.get('href')
phonenum = s.split(':')[1]
print OKORANGE + "[i] Telephone # found! " + phonenum + " " + RESET
print OKORANGE + "[i] Telephone # found! " + phonenum + " " + RESET
phones_saved.write(phonenum + "\n")
# EMAIL
elif link.get('href')[:7] == "mailto:":
s = link.get('href')
email = s.split(':')[1]
print OKORANGE + "[i] Email found! " + email + " " + RESET
print OKORANGE + "[i] Email found! " + email + " " + RESET
emails_saved.write(email + "\n")
# ELSE NORMAL LINK FOUND
else:
print url + "/" + link.get('href')
urls.write(url + "/" + link.get('href') + "\n")
urls_saved.write(url + "/" + link.get('href') + "\n")
urls.write(url + "/" + link.get('href') + "\n")
urls_saved.write(url + "/" + link.get('href') + "\n")
print OKGREEN + "__________________________________________________________________________________________________" + RESET

def readfile():
Expand All @@ -164,7 +164,7 @@ def logo():
print OKRED + " 1N3 / /` '' `\ \ "
print OKRED + " | |"
print OKRED + " \ /"
print OKRED + ""
print OKRED + ""
print RESET
print OKORANGE + " + -- --=[https://crowdshield.com" + RESET
print OKORANGE + " + -- --=[blackwidow v" + version + RESET
Expand All @@ -175,7 +175,7 @@ def donations():
print COLOR1 + " HACK THE PLANET!!!!!"
print "**************************************************************************************************"
print "If you haven't already, please donate to this project using the addresses below."
print "This will help fascilitate improved features and ongoing support."
print "This will help facilitate improved features and ongoing support."
print ""
print "[+] BTC 1Fav36btfmdrYpCAR65XjKHhxuJJwFyKum"
print "[+] ETH 0x20bB09273702eaBDFbEE9809473Fd04b969a794d"
Expand Down Expand Up @@ -306,7 +306,7 @@ else:
urls = open(urls_file,"w+")
urls.close()
urls_saved = open(urls_saved_file,"w+")
urls_saved.close()
urls_saved.close()
forms_saved = open(forms_saved_file,"w+")
forms_saved.close()
subdomains = open(subdomain_file,"w+")
Expand Down
14 changes: 7 additions & 7 deletions injectx.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def logo():
print OKORANGE + ' /___/_/ /_/_/ /\___/\___/\__/_/|_| ' + RESET
print OKORANGE + ' /_____/ ' + RESET
print ''
print OKBLUE + '--== Inject-X Fuzzer by 1N3@CrowdShield ==-- ' + RESET
print OKBLUE + '--== Inject-X Fuzzer by 1N3@CrowdShield ==-- ' + RESET
print OKBLUE + ' --== https://crowdshield.com ==-- ' + RESET
print ''

Expand Down Expand Up @@ -120,7 +120,7 @@ def active_scan():
xss_url2 = new_url.replace("INJECTX", payload_exploit2)
print OKRED + "[+] XSS Found! ", str(payload_exploit2) + RESET
print OKRED + "[+] Vulnerable URL: " + xss_url2 + RESET
print OKGREEN + "[c] Exloit Command: firefox '" + xss_url2 + "' & "
print OKGREEN + "[c] Exploit Command: firefox '" + xss_url2 + "' & "
#os.system("curl -s '" + xss_url2 + "' | egrep alert\(1\) --color=auto")
#os.system("firefox '" + xss_url2 + "' > /dev/null 2> /dev/null")
else:
Expand Down Expand Up @@ -391,7 +391,7 @@ def active_scan():
idor_length_list.append(http_length)
http_length_diff = str(http_length_base - http_length)
print COLOR2 + "[i] New URL: " + idor_url + " [" + OKRED + str(http_status) + COLOR2 + COLOR2 + "]" + " [" + COLOR3 + str(http_length) + COLOR2 + "]" + " [" + COLOR1 + http_length_diff + COLOR2 + "]" + RESET

if (idor_length_list[0] != idor_length_list[1]) or (idor_status_list[1] != idor_status_list[2]) or (idor_length_list[0] != idor_length_list[2]):
print OKRED + "[+] Possible IDOR Found! " + RESET
print OKRED + "[+] Vulnerable URL: " + idor_url + RESET
Expand Down Expand Up @@ -597,10 +597,10 @@ def active_scan():
print COLOR3 + "======================================================================================================" + RESET

if str(http_status_base) == "404":
print COLOR1 + "[F] Recieved HTTP Status 404 - Page Not Found. Skipping..." + RESET
print COLOR1 + "[F] Received HTTP Status 404 - Page Not Found. Skipping..." + RESET

elif str(http_status_base) == "403":
print COLOR1 + "[F] Recieved HTTP Status 403 - Page Not Found. Skipping..." + RESET
print COLOR1 + "[F] Received HTTP Status 403 - Page Not Found. Skipping..." + RESET

else:
if "=" in full_url:
Expand All @@ -614,15 +614,15 @@ def active_scan():
param_list.extend([str(x + "=")])
param_vals.extend([str(urllib.quote_plus(y))])
param_length = param_length + 1

# FIND BASE URL
dynamic_url = full_url.find("?")
base_url = str(full_url[:dynamic_url + 1])

# LIST EACH PARAMETER
active_fuzz = 1
i = 1

while i <= param_length and active_fuzz <= param_length:
# DETERMINE FUZZ PARAMETER SELECTED
# IF CURRENT POSITION IS THE ACTIVE FUZZ POSITION
Expand Down

0 comments on commit b3af99c

Please sign in to comment.