Coverage for src / crawler / by_source / arsia_crawler.py: 20%
45 statements
« prev ^ index » next coverage.py v7.12.0, created at 2025-12-11 14:57 +0000
« prev ^ index » next coverage.py v7.12.0, created at 2025-12-11 14:57 +0000
1from bs4 import BeautifulSoup
2from ptf.external.arxiv import get_arxiv_article
3from ptf.external.datacite import get_datacite_articles_in_journal
4from ptf.model_data import create_issuedata
6from crawler.base_crawler import BaseCollectionCrawler, add_pdf_link_to_xarticle
9class ArsiaCrawler(BaseCollectionCrawler):
10 source_name = "Ars Inveniendi Analytica website"
11 source_domain = "ARSIA"
12 source_website = "https://ars-inveniendi-analytica.com/"
14 # def __init__(self, *args, **kwargs):
15 # # We want to skip the init of DaCrawler and go straight to BaseCollectionCrawler
16 # super(DaCrawler, self).__init__(*args, **kwargs)
18 def parse_collection_content(self, content):
19 """
20 Discrete Analysis.
21 We ignore the journal web page and query Crossref to get the list of articles.
22 We query crossref for each article to get the list of xissues based on the publication date.
23 Each xissue has its year + list of articles with their URLs
24 """
26 what = ["published", "year", "primary_url"]
27 xarticles = get_datacite_articles_in_journal("Ars Inveniendi Analytica", what)
29 xarticles = [article for article in xarticles if article.url != self.source_website]
30 xissues = []
31 years = {}
33 for xarticle in xarticles:
34 year = str(xarticle.year)
35 if year not in years:
36 xissue = create_issuedata()
37 xissue.pid = self.collection_id + "_" + year + "__"
38 xissue.year = year
40 years[year] = xissue
41 xissues.append(xissue)
42 else:
43 xissue = years[year]
45 xissue.articles.append(xarticle)
47 return xissues
49 def parse_article_content(self, content, xissue, xarticle, url):
50 """
51 Parse the content with Beautifulsoup and returns an ArticleData
52 """
54 # We only parse the arXiv id in the Discrete Analysis article page
55 soup = BeautifulSoup(content, "html.parser")
57 a_node = soup.select_one("div.main_entry a:-soup-contains-own('Read article')")
58 if a_node is None:
59 raise ValueError("a_node is None")
61 href = a_node.get("href")
62 if not isinstance(href, str):
63 raise ValueError("href is not a string")
64 id = href.split("/")[-1]
66 new_xarticle = get_arxiv_article(id)
67 if new_xarticle is None:
68 raise ValueError("new_xarticle is None")
69 new_xarticle.doi = xarticle.doi
70 new_xarticle.ext_links = xarticle.ext_links
71 new_xarticle.url = url
72 new_xarticle.lang = "en"
73 new_xarticle.date_published_iso_8601_date_str = xarticle.date_published_iso_8601_date_str
75 add_pdf_link_to_xarticle(new_xarticle, new_xarticle.pdf_url)
77 return new_xarticle