Coverage for src/crawler/by_source/arsia_crawler.py: 100%
28 statements
« prev ^ index » next coverage.py v7.6.4, created at 2025-01-15 14:09 +0000
« prev ^ index » next coverage.py v7.6.4, created at 2025-01-15 14:09 +0000
1from ptf.external.datacite import get_datacite_articles_in_journal
2from ptf.model_data import create_issuedata
4from crawler.by_source.da_crawler import DaCrawler
7class ArsiaCrawler(DaCrawler):
8 source_name = "Ars Inveniendi Analytica website"
9 source_domain = "ARSIA"
10 source_website = "https://ars-inveniendi-analytica.com/"
11 periode_begin = 2021
12 periode_end = 2024
14 # def __init__(self, *args, **kwargs):
15 # # We want to skip the init of DaCrawler and go straight to BaseCollectionCrawler
16 # super(DaCrawler, self).__init__(*args, **kwargs)
18 def parse_collection_content(self, content):
19 """
20 Discrete Analysis.
21 We ignore the journal web page and query Crossref to get the list of articles.
22 We query crossref for each article to get the list of xissues based on the publication date.
23 Each xissue has its year + list of articles with their URLs
24 """
26 what = ["published", "year", "primary_url"]
27 xarticles = get_datacite_articles_in_journal("Ars Inveniendi Analytica", what)
29 xarticles = [article for article in xarticles if article.url != self.source_website]
30 xissues = []
31 years = {}
33 for xarticle in xarticles:
34 year = str(xarticle.year)
35 if year not in years:
36 xissue = create_issuedata()
37 xissue.pid = self.collection_id + "_" + year + "__"
38 xissue.year = year
40 years[year] = xissue
41 xissues.append(xissue)
42 else:
43 xissue = years[year]
45 xissue.articles.append(xarticle)
47 return xissues
49 def parse_article_content(self, content, xissue, xarticle, url, pid):
50 return super().parse_article_content(content, xissue, xarticle, url, pid)