Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
thisisshubhamkumar authored Aug 16, 2023
1 parent 455cf72 commit 1eb698c
Show file tree
Hide file tree
Showing 13 changed files with 1,733 additions and 0 deletions.
98 changes: 98 additions & 0 deletions buildwith.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import requests
import builtwith
from bs4 import BeautifulSoup
import re

R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow

def analyze_website(url, timeout=10):
try:
response = requests.get(url, timeout=timeout)
html_content = response.text

# Detect programming languages
programming_languages = detect_programming_language(html_content)

# Use builtwith library to get website details
technologies = builtwith.builtwith(url)

# Parse HTML to extract JavaScript libraries
javascript_libraries = extract_javascript_libraries(html_content)

# Get web server information from response headers
web_server = response.headers.get("Server", "Unknown")

return programming_languages, technologies, javascript_libraries, web_server
except requests.Timeout:
return None, None, None, None, "Request timed out"
except Exception as e:
return None, None, None, None, str(e)


# Rest of the code remains unchanged


def detect_programming_language(content):
# Define patterns for various programming languages
patterns = {
"PHP": r"<\?php|\.php",
"Python": r"python",
"Ruby": r"ruby",
"Java": r"\bjava\b",
"JavaScript": r"javascript",
"ASP.NET": r"asp\.net",
}

detected_languages = []

for language, pattern in patterns.items():
if re.search(pattern, content, re.IGNORECASE):
detected_languages.append(language)

return detected_languages


def extract_javascript_libraries(content):
soup = BeautifulSoup(content, "html.parser")
script_tags = soup.find_all("script")

libraries = set()

for script in script_tags:
src = script.get("src")
if src:
match = re.search(r"/(.*?)(?:\.min)?\.js$", src)
if match:
libraries.add(match.group(1))

return list(libraries)


if __name__ == "__main__":
website_url = input("Enter the website URL: ")
programming_languages, technologies, javascript_libraries, web_server = analyze_website(website_url)

if programming_languages:
print("Detected programming languages:", ", ".join(programming_languages))
else:
print("No programming language detected or an error occurred.")

if technologies:
print("\nWebsite technologies:")
for tech, details in technologies.items():
print(f"{tech}: {details}")
else:
print("An error occurred while fetching technologies.")

if javascript_libraries:
print("\nJavaScript libraries:")
for library in javascript_libraries:
print("- " + library)
else:
print("No JavaScript libraries detected.")

print("\nWeb server:", web_server)
43 changes: 43 additions & 0 deletions dmarc_record.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import dns.resolver
import re

R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow

def fetch_dmarc_links(domain):
try:
print(f'\n{Y}[!] DMARC record :{W}\n')
# Set a timeout value in seconds
timeout = 10

# Query DMARC record for the domain
query_result = dns.resolver.resolve('_dmarc.' + domain, 'TXT', lifetime=timeout)

# Extract DMARC policy from the TXT record
dmarc_record = query_result.rrset[0].to_text()

# Extract links using regular expressions
link_pattern = r'https?://[^\s/$.?#].[^\s]*'
links = re.findall(link_pattern, dmarc_record)

return links
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
print(f"{G}[+] {R}No DMARC record found for {domain}")
return []
except dns.exception.DNSException as e:
print(f"{G}[+] {R}An error occurred: {e}")
return []

if __name__ == "__main__":
domain_to_check = input("Enter the domain to check DMARC record and fetch links: ")
links = fetch_dmarc_links(domain_to_check)

if links:
print("Links found in DMARC record:")
for link in links:
print(link)
else:
print("No links found in DMARC record.")
51 changes: 51 additions & 0 deletions dns_enumeration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import dns.resolver

R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow

def dnsrec(domain):
result = {'dns': [], 'dmarc': []}
print(f'\n{Y}[!] Starting DNS Enumeration...{W}\n')
types = ['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'TXT']

# Set a timeout value in seconds
timeout = 10

resolver = dns.resolver.Resolver()
resolver.nameservers = ['8.8.8.8']
resolver.timeout = timeout
resolver.lifetime = timeout

for record_type in types:
try:
response = resolver.resolve(domain, record_type)
for answer in response:
print(f'{G}[+] {C}{record_type}:{W} {answer}')
result['dns'].append(f'{record_type}: {answer}')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):
pass

dmarc_target = f'_dmarc.{domain}'
try:
dmarc_response = resolver.resolve(dmarc_target, 'TXT')
for answer in dmarc_response:
print(f'{G}[+] {C}DMARC:{W} {answer}')
result['dmarc'].append(f'DMARC: {answer}')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):
pass

if result['dns'] or result['dmarc']:
result['exported'] = True
else:
print(f'\n{R}[-] {C}No DNS Records or DMARC Record Found!{W}')
result['exported'] = False

return result

if __name__ == "__main__":
target_domain = input("Enter the domain to perform DNS enumeration: ")
result = dnsrec(target_domain)
print(result)
28 changes: 28 additions & 0 deletions header.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import requests

R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow


def fetch_headers(url):
result = {}
try:
response = requests.get(url, verify=True, timeout=10) # Enable certificate verification
headers = response.headers
print(f'\n{Y}[!] Headers :{W}\n')
for key, value in headers.items():
result[key] = value
print(f'{G}[+] {C}{key}:{W} {value}')
except requests.exceptions.RequestException as e:
print(f'\n{R}[-] {C}Exception :{W} {e}\n')
result['Exception'] = str(e)
result['exported'] = bool(result)
return result

if __name__ == "__main__":
target_url = input("Enter the URL to fetch headers from: ")
headers_data = fetch_headers(target_url)
print(headers_data)
Loading

0 comments on commit 1eb698c

Please sign in to comment.