Scraper

class WikiPage():
    def __init__(self, url, wiki_url):
        self.url = url
        self.wiki_url = wiki_url
        self.crawled = self.crawl()
        self.title = self.crawled.title.text.replace(" - Wikipedia", "")

    def get_beginning_links(self):
        self.page_beginning = self.crawled.find_all("div", {"id": "mw-content-text"})[0]
        self.beginning_links = []
        for i in range(2):
            try:
                self.beginning_links += self.page_beginning.find_all("p")[i].find_all("a", {'title':True})
            except:
                continue
        self.beginning_links = [self.wiki_url + a.get("href")[1:] for a in self.beginning_links]
        return self.beginning_links

    def crawl(self):
        url = self.url
        response = requests.get(url)
        soup = bs4.BeautifulSoup(response.text, "html.parser")
        return soup

    def __str__(self):
        return self.title

    __repr__ = __str__

WikiPage

 WikiPage (url, wiki_url)

Initialize self. See help(type(self)) for accurate signature.

class Crawler():
    def __init__(self, output_path, lang="en", first_n=10):
        self.lang = lang
        self.search_url = f"https://{lang}.wikipedia.org/w/index.php?search="
        self.wiki_url = f"https://{lang}.wikipedia.org/"
        self.pages = {}
        self.first_n = first_n
        self.output_path = Path(output_path)
        if not self.output_path.exists():
            self.output_path.mkdir(parents=True, exist_ok=True)
    
    def create_page_from_query(self, query):
        url = self.search_url + query.replace(" ", "+")
        return self.create_page(url)

    def create_page(self, url):
        return WikiPage(url, wiki_url=self.wiki_url)

    def create_card(self, url, final=False):
        starting_page = self.create_page(url)
        out = {"starting_page": starting_page, "children_pages":{}}
        count = 0
        for url in out["starting_page"].get_beginning_links():
            try:
                if final:
                    out["children_pages"][url] = self.create_page(url)
                else:
                    out["children_pages"][url] = None
            except:
                continue
            count += 1
            if len(out["children_pages"]) >= self.first_n:
                break
        return out
    
    def create_cards(self, query):
        url = self.search_url + query.replace(" ", "+")
        cards = self.create_card(url, final=False)
        out = {"query": str(cards["starting_page"]), "cards":[]}
        for url in cards["children_pages"].keys():
            try:
                new_card = self.create_card(url, final=True)
            except:
                continue
            out["cards"].append({str(new_card["starting_page"]): [str(i) for i in new_card["children_pages"].values()]})
        return out

    def save_cards(self, cards):
        with open(self.output_path / f"{cards['query']}.json", "w") as f:
            json.dump(cards, f)

Crawler

 Crawler (output_path, lang='en', first_n=10)

Initialize self. See help(type(self)) for accurate signature.

Tests

query = "ecology"
crawler = Crawler("./test_data/temp_files", "en")
page = crawler.create_page_from_query("football")
cards = crawler.create_cards(query)
crawler.save_cards(cards)

test_eq(crawler.search_url + "football", "https://en.wikipedia.org/w/index.php?search=football")
test_eq(crawler.wiki_url + "wiki/football", "https://en.wikipedia.org/wiki/football")
test_eq(page.url, "https://en.wikipedia.org/w/index.php?search=football")
assert os.path.exists(f"./test_data/temp_files/{query.replace(' ', '_').capitalize()}.json")
# rmtree("./test_data/temp_files")