Untitled
unknown
plain_text
6 months ago
3.2 kB
5
Indexable
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import requests
import time
import re
# Configurations
chrome_profile_path = r"user-data-dir=C:\\Users\\himan\\AppData\\Local\\Google\\Chrome\\User Data"
profile_directory = "--profile-directory=Profile 3"
chrome_driver_path = r"C:\\Users\\himan\\.cache\\selenium\\chromedriver\\win64\\135.0.7049.84\\chromedriver.exe"
# URL to process
post_url = input("Enter Post URL : ")
# Set up Selenium driver
options = webdriver.ChromeOptions()
options.add_argument(chrome_profile_path)
options.add_argument(profile_directory)
driver = webdriver.Chrome(service=Service(chrome_driver_path), options=options)
def get_modrefer_links(post_url):
print("[*] Fetching post page...")
response = requests.get(post_url)
soup = BeautifulSoup(response.text, "html.parser")
links = [a['href'] for a in soup.find_all("a", href=True) if "modrefer.in" in a['href']]
print(f"[*] Found {len(links)} modrefer link(s).")
return links
def follow_redirect(url):
print(f"[*] Following modrefer link: {url}")
driver.get(url)
time.sleep(5) # Wait for redirect to complete
redirected_url = driver.current_url
print(f"[*] Redirected to: {redirected_url}")
return redirected_url
def extract_technews_url():
print("[*] Looking for technews link...")
soup = BeautifulSoup(driver.page_source, "html.parser")
tech_links = [a['href'] for a in soup.find_all("a", href=True) if "technews.unblockedgames.world" in a['href']]
if tech_links:
print(f"[*] Found technews link: {tech_links[0]}")
return tech_links[0]
else:
print("[!] No technews link found.")
return None
def extract_driveseed_url():
print("[*] Waiting for the DriveSeed URL to appear...")
try:
WebDriverWait(driver, 15).until(
lambda d: "driveseed.org/file/" in d.current_url
)
if "driveseed.org/file/" in driver.current_url:
print(f"[+] Final DriveSeed link: {driver.current_url}")
return driver.current_url
except:
print("[!] Timed out waiting for DriveSeed URL.")
return None
# Main flow
driveseed_links = []
modrefer_links = get_modrefer_links(post_url)
for mod_link in modrefer_links:
retry = 0
while retry < 3:
redirected = follow_redirect(mod_link)
tech_url = extract_technews_url()
if tech_url:
print(f"[*] Opening Technews URL: {tech_url}")
driver.get(tech_url)
time.sleep(10) # Wait for bypass to happen automatically
final_link = extract_driveseed_url()
if final_link:
driveseed_links.append(final_link)
break # success, break retry loop
print("[!] Retry due to failure or timeout...")
retry += 1
print("\n[*] All DriveSeed Links Found:")
for link in driveseed_links:
print(link)
driver.quit()Editor is loading...
Leave a Comment