Coverage for src/crawler/by_source/dmlcz_crawler.py: 77%
105 statements
« prev ^ index » next coverage.py v7.9.0, created at 2025-10-08 15:14 +0000
« prev ^ index » next coverage.py v7.9.0, created at 2025-10-08 15:14 +0000
1import re
3from bs4 import BeautifulSoup, Tag
4from ptf.model_data import create_abstract, create_articledata, create_subj
6from crawler.base_crawler import BaseCollectionCrawler
9class DmlczCrawler(BaseCollectionCrawler):
10 source_name = "Czech Digital Mathematics Library"
11 source_domain = "DMLCZ"
12 source_website = "https://dml.cz"
14 issue_href = r"/handle/\d+.dmlcz/\d+"
16 def parse_collection_content(self, content):
17 """
18 Parse the HTML page of Annals of Math and returns a list of xissue.
19 Each xissue has its pid/volume/number/year metadata + its url
20 """
21 soup = BeautifulSoup(content, "html.parser")
22 xissues = []
24 issue_nodes = [volume for volume in soup.find_all("td", {"class": "volume"})]
26 for issue_node in issue_nodes:
27 reg_year = re.compile(r"\d{4}")
28 reg_volume = re.compile(r"Volume \d+")
29 issue_text = issue_node.get_text()
30 if re.compile(r"\d+").search(issue_text): 30 ↛ 26line 30 didn't jump to line 26 because the condition on line 30 was always true
31 elem = issue_node.find("a")
32 dates = reg_year.search(issue_text)
33 volume = reg_volume.search(elem.get_text())
34 issues = issue_node.findNext("td")
35 issues = issues.findAll("a")
36 if volume: 36 ↛ 38line 36 didn't jump to line 38 because the condition on line 36 was always true
37 volume = volume[0].replace("Volume ", "")
38 if dates: 38 ↛ 42line 38 didn't jump to line 42 because the condition on line 38 was always true
39 search = reg_year.search(issue_text)
40 if search is not None: 40 ↛ 42line 40 didn't jump to line 42 because the condition on line 40 was always true
41 dates = search[0]
42 for issue in issues:
43 link = issue.get("href")
44 number = issue.get_text()
45 xissue = self.create_dmlcz_xissue(link, volume, number, dates)
46 if xissue: 46 ↛ 42line 46 didn't jump to line 42 because the condition on line 46 was always true
47 xissues.append(xissue)
49 return xissues
51 def get_year(self, year):
52 if "/" in year:
53 year = year.split("/")[0]
55 return year
57 def create_dmlcz_xissue(self, url, volume_str: str, number, dates):
58 year = dates.replace("/", "-")
59 number = number.replace(",", "-")
61 volume = volume_str
62 if not volume_str.isnumeric(): 62 ↛ 63line 62 didn't jump to line 63 because the condition on line 62 was never true
63 volume = None
64 self.logger.debug("Couldn't parse volume string", extra={"url": url})
65 else:
66 volume = str(int(volume_str))
68 xissue = super().create_xissue(self.source_website + url, year, volume, number)
70 return xissue
72 def parse_issue_content(self, content, xissue):
73 soup = BeautifulSoup(content, "html.parser")
74 article_nodes = soup.find_all("td", {"class": "article"})
76 # DML-CZ may list the same article multiple times (ex: https://dml.cz/handle/10338.dmlcz/149887)
77 # We need to ignore the articles already crawled
78 article_urls = []
80 for index_article, article_node in enumerate(article_nodes):
81 article_link_node = article_node.find("a")
82 if article_link_node: 82 ↛ 80line 82 didn't jump to line 80 because the condition on line 82 was always true
83 url = article_link_node.get("href")
84 if url not in article_urls: 84 ↛ 80line 84 didn't jump to line 80 because the condition on line 84 was always true
85 article_urls.append(url)
87 xarticle = create_articledata()
88 xarticle.pid = "a" + str(index_article)
89 xarticle.url = self.source_website + url
91 xissue.articles.append(xarticle)
93 def parse_article_content(self, content, xissue, xarticle, url):
94 """
95 Parse the content with Beautifulsoup and returns an ArticleData
96 """
97 soup = BeautifulSoup(content, "html.parser")
98 self.get_metadata_using_citation_meta(
99 xarticle,
100 xissue,
101 soup,
102 [
103 "lang",
104 "title",
105 "author",
106 "pdf",
107 "abstract",
108 "page",
109 "mr",
110 "zbl",
111 "publisher",
112 "keywords",
113 ],
114 )
116 bloc_ref_ids = soup.find("div", {"class": "item-refids"})
117 # TITLE
118 title_node = soup.find("span", {"class": "item-title"})
119 if title_node: 119 ↛ 123line 119 didn't jump to line 123 because the condition on line 119 was always true
120 xarticle.title_tex = title_node.get_text()
122 # ABSTRACT
123 abstract_section_node = soup.find("dim:field")
124 if abstract_section_node:
125 abstract = str(abstract_section_node.get_text())
127 xarticle.abstracts.append(
128 create_abstract(
129 value_tex=abstract,
130 lang=xarticle.lang,
131 )
132 )
134 # PDF
135 # link_nodes = soup.find_all("a")
136 # for link_node in link_nodes:
137 # pdf_url = link_node.get("href")
138 # if pdf_url.startswith("/bitstream/"):
139 # add_pdf_link_to_xarticle(xarticle, pdf_url)
140 reg_msc = re.compile("/browse-subject")
141 subjs_nodes = [a.get_text() for a in soup.find_all("a") if reg_msc.search(a.get("href"))]
143 # MSC
144 for subj in subjs_nodes:
145 subject = create_subj(value=subj, type="msc", lang=xarticle.lang)
146 xarticle.kwds.append(subject)
148 # PAGES
149 pages = soup.find("span", {"class": "item-pp"})
150 if pages: 150 ↛ 168line 150 didn't jump to line 168 because the condition on line 150 was always true
151 pages_to = re.compile(r"(\(?\d+\)?)?-?(\(?\d+\)?)").search(pages.get_text())
152 if pages_to: 152 ↛ 168line 152 didn't jump to line 168 because the condition on line 152 was always true
153 parts = pages_to[0].split("-")
154 first_page = parts[0].replace("(", "").replace(")", "")
155 if len(parts) > 1: 155 ↛ 159line 155 didn't jump to line 159 because the condition on line 155 was always true
156 last_page = parts[1].replace("(", "").replace(")", "")
157 xarticle.lpage = last_page
159 xarticle.fpage = first_page
161 # Biblio
162 # bibitems_tags = soup.select("div.references-inside div.reference")
163 # bibitems = [self.parse_bibitem_tag(item) for item in bibitems_tags]
164 # if len(bibitems) > 0:
165 # xarticle.abstracts.append(self.create_bibliography(bibitems))
167 # DOI
168 reg_doi = re.compile("dx.doi.org")
170 if bloc_ref_ids and isinstance(bloc_ref_ids, Tag): 170 ↛ 189line 170 didn't jump to line 189 because the condition on line 170 was always true
171 doi_node = [a for a in bloc_ref_ids.find_all("a") if reg_doi.search(a.get("href"))]
172 if len(doi_node) > 0: 172 ↛ 173line 172 didn't jump to line 173 because the condition on line 172 was never true
173 doi = doi_node[0].get_text()
174 pos = doi.find("10.")
175 if pos > 0:
176 doi = doi[pos:]
177 xarticle.doi = doi
179 # fix wrong doi attribution for article a14 of volume 62 number 1
180 # 10.1007/s10587-012-0005-x:
181 if xarticle.pid in ["CMJ_2012_62_1_a14", "ZCSUT_2012_22_3_a3"]:
182 xarticle.doi = None
183 else:
184 xarticle.pid = (
185 doi.replace("/", "_").replace(".", "_").replace("-", "_").replace(":", "_")
186 )
188 # Hack to handle articles with no titles
189 if not xarticle.title_tex: 189 ↛ 190line 189 didn't jump to line 190 because the condition on line 189 was never true
190 xarticle.title_tex = " "
192 return xarticle