# Reversed by CrackerMain.
Net reversers iseeu && Example
import requests
import uuid
import os
from bs4 import BeautifulSoup
import threading
from tqdm import tqdm
import time
from colorama import Fore, Style, init
init(autoreset=True)
PREMIUM_GROUP_IDS = {'12', '101', '94', '4', '96', '93', '100', '3', '6', '92',
'11', '99', '97'}
RED = Fore.RED + Style.BRIGHT
YELLOW = Fore.YELLOW + Style.BRIGHT
GREY = '[38;5;246m'
engadget_domains = ['apple.com', 'google.com', 'facebook.com', 'twitter.com',
'linkedin.com', 'youtube.com', 'instagram.com', 'wikipedia.org', 'reddit.com',
'icloud.com', 'amazon.com', 'microsoft.com', 'netflix.com', 'pinterest.com',
'tumblr.com', 'quora.com', 'ebay.com', 'flickr.com', 'bbc.co.uk', 'cnn.com',
'nytimes.com', 'forbes.com', 'huffpost.com', 'buzzfeed.com', 'stackoverflow.com',
'github.com', 'whatsapp.com', 'vk.com', 't.me', 'yahoo.com', 'hotmail.com',
'outlook.com', 'paypal.com', 'dropbox.com', 'salesforce.com', 'shopify.com',
'wordpress.com', 'medium.com', 'craigslist.org', 'bloomberg.com',
'businessinsider.com', 'theverge.com', 'vox.com', 'cnet.com', 'techcrunch.com',
'wired.com', 'gizmodo.com', 'lifehacker.com', 'engadget.com', 'arstechnica.com',
'reuters.com']
def get_hwid():
return str(uuid.getnode())
def get_proxies_from_geonode():
url = 'https://proxylist.geonode.com/api/proxy-list?
limit=500&page=1&sort_by=lastChecked&sort_type=desc&speed=slow&
protocols=http'
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
proxies = []
for proxy in data['data']:
ip = proxy['ip']
port = proxy['port']
if 'http' in proxy['protocols']:
proxy_address =
f'http://{ip}:{port}'
proxies.append(proxy_address)
return proxies
except requests.exceptions.RequestException as e:
print(f'Error fetching proxies from Geonode: {e}')
return []
def get_proxies_from_proxyscrape():
url = 'https://api.proxyscrape.com/v4/free-proxy-list/get?
request=display_proxies&proxy_format=ipport&format=text&timeout=10000'
try:
response = requests.get(url)
response.raise_for_status()
proxies = response.text.splitlines()
return proxies
except requests.exceptions.RequestException as e:
print(f'Error fetching proxies from ProxyScrape: {e}')
return []
def get_all_proxies():
proxies_from_geonode = get_proxies_from_geonode()
proxies_from_proxyscrape = get_proxies_from_proxyscrape()
return proxies_from_geonode + proxies_from_proxyscrape
def load_dorks(file_path):
with open(file_path, 'r') as file:
return [line.strip() for line in file.readlines()]
def process_dork(dork, output_file, proxy, mode, failed_dorks, progress_bar,
retries_counter):
url = f'https://www.google.com/search?q={dork}'
try:
response = requests.get(url, proxies={'http': proxy,
'https': proxy}, timeout=10)
soup = BeautifulSoup(response.text, 'html.parser')
results = soup.find_all('a')
success = False
with open(output_file, 'a') as f:
for link in results:
href = link.get('href')
if href and
href.startswith('/url?q='):
clean_link =
href.split('/url?q=')[1].split('&')[0]
domain =
clean_link.split('/')[2] if '://' in clean_link else clean_link.split('/')[0]
if mode ==
'Full URL Mode':
f.write(clean_link + '\n')
elif mode ==
'Only Parameter Mode' and '?' in clean_link:
f.write(clean_link + '\n')
success =
True
if success:
with open('processed_dorks.txt', 'a') as
processed_file:
processed_file.write(dork +
'\n')
else:
raise Exception('No results')
progress_bar.update(1)
except Exception:
failed_dorks.append(dork)
retries_counter[0] += 1
def start_threads(dorks, num_threads, output_file, proxies, mode, progress_bar,
retries_counter):
threads = []
failed_dorks = []
for dork in dorks:
proxy = proxies[len(threads) % len(proxies)]
thread = threading.Thread(target=process_dork,
args=(dork, output_file, proxy, mode, failed_dorks, progress_bar, retries_counter))
threads.append(thread)
thread.start()
if len(threads) >= num_threads:
for thread in threads:
thread.join()
threads = []
if failed_dorks:
start_threads(failed_dorks, num_threads, output_file,
proxies, mode, progress_bar, retries_counter)
def main():
dork_file = 'dorks.txt'
output_file = 'results.txt'
processed_dorks_file = 'processed_dorks.txt'
try:
with open(processed_dorks_file, 'r') as f:
processed_dorks =
set(f.read().splitlines())
except FileNotFoundError:
processed_dorks = set()
dorks = [dork for dork in load_dorks(dork_file) if dork not in
processed_dorks]
if not dorks:
print('No dorks to process. Exiting.')
return
proxies = get_all_proxies()
if not proxies:
print('No proxies found. Exiting.')
return
print(Fore.YELLOW + 'Reversed by CrackerMain.Net reversers iseeu
&& Example')
print('')
print(Fore.RED + 'Note: Please make sure that the Parser and the
dork.txt file are the same!')
print(Fore.RED + 'The program is in Beta Version. There may be
errors!')
mode = input('Select a mode (1 - Full URL Mode, 2 - Only Parameter
Mode): ')
mode = 'Full URL Mode' if mode == '1' else 'Only Parameter Mode'
num_threads = int(input('How many threads to use? '))
retries_counter = [0]
progress_bar = tqdm(total=len(dorks), desc=GREY + 'Processing',
dynamic_ncols=True)
threading.Thread(target=start_threads, args=(dorks, num_threads,
output_file, proxies, mode, progress_bar, retries_counter)).start()
while progress_bar.n < progress_bar.total or
threading.active_count() > 1:
progress_bar.set_postfix_str(YELLOW + f'Retries:
{retries_counter[0]}')
time.sleep(0.1)
progress_bar.close()
print(f'Completed! Total retries: {retries_counter[0]}')
if __name__ == '__main__':