In [1]:
# Code attribution: Yiyin Shen, Tyler Caraza-Harter
# Imports
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
import matplotlib.pyplot as plt
from collections import deque
In [2]:
# Get the button and input and output fields
service = Service(executable_path="chromedriver-win64/chromedriver.exe")
driver = webdriver.Chrome(service=service)
url = "https://pages.cs.wisc.edu/~yw/CS320F23TH1.html"
driver.get(url)
print("DONE")
DONE
In [3]:
# Get the links on the page
def get_links():
links = driver.find_elements("tag name", "a")
pages = list(filter(lambda x : x.text == "Link", links))
hrefs = list(map(lambda x : x.get_attribute("href"), pages))
return hrefs
get_links()
Out[3]:
['https://pages.cs.wisc.edu/~yw/CS320F23TH2.html', 'https://pages.cs.wisc.edu/~yw/CS320F23TH3.html', 'https://pages.cs.wisc.edu/~yw/CS320F23TH4.html']
In [4]:
# Check if there is an additional image on the page
def goal_check():
image = driver.find_elements("tag name", "img")
if len(image) == 2:
image[0].screenshot("image.png")
print(image[0].get_attribute("src"))
plt.imshow(plt.imread("image.png"))
return True
return False
goal_check()
Out[4]:
False
In [5]:
# BFS to find the goal page
def crawl(url):
visited = set()
queue = deque([url])
found = False
count = 0 # avoid infinite loop
while count < 100:
count = count + 1
current = queue.popleft()
visited.add(current)
driver.get(current)
found = goal_check()
if found:
return current
links = get_links()
for link in links:
if not link in visited and not link in queue:
queue.append(link)
if len(queue) == 0:
break
url = "https://pages.cs.wisc.edu/~yw/CS320F23TH1.html"
crawl(url)
https://pages.cs.wisc.edu/~yw/CS320/amongus.png
Out[5]:
'https://pages.cs.wisc.edu/~yw/CS320F23TH23.html'
In [6]:
# Remember to quit when it's done
driver.quit()