from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
import pandas as pd
import time
# Setup Chrome driver (headless)
options = Options()
options.headless = True
service = Service('/path/to/chromedriver') # Update path
driver = webdriver.Chrome(service=service, options=options)
driver.get("http://www.wspdp2c.org/wantedlist.aspx")
# Wait for page to fully load dynamic content
time.sleep(5)
# Scrape table data
rows = driver.find_elements("xpath", '//table[@id="gvWarrants"]/tbody/tr')
data = []
for row in rows[1:]: # Skip header row
cols = row.find_elements("tag name", "td")
if len(cols) >= 3:
name = cols[0].text.strip()
charges = cols[1].text.strip()
warrant_date = cols[2].text.strip()
data.append([name, charges, warrant_date])
driver.quit()
# Save to CSV
df = pd.DataFrame(data, columns=["Name", "Charges", "Warrant Issued Date"])
df.to_csv("wspd_most_wanted.csv", index=False)
print("CSV saved as wspd_most_wanted.csv")