Coverage for src / crawler / by_source / bdim_crawler.py: 8%

231 statements  

« prev     ^ index     » next       coverage.py v7.12.0, created at 2025-12-11 14:57 +0000

1import re 

2 

3import lingua 

4import regex 

5from bs4 import BeautifulSoup, Tag 

6from lingua import LanguageDetectorBuilder 

7from ptf.cmds.xml.jats.builder.references import ( 

8 # ContribAuthor, 

9 get_all_authors_xml, 

10 get_article_title_xml, 

11 get_ext_link_xml, 

12 get_publisher_xml, 

13 get_source_xml, 

14 get_volume_xml, 

15 get_year_xml, 

16) 

17from ptf.cmds.xml.jats.jats_parser import JatsBase 

18from ptf.cmds.xml.xml_utils import escape 

19from ptf.model_data import ( 

20 ArticleData, 

21 create_abstract, 

22 create_articledata, 

23 create_contributor, 

24 create_issuedata, 

25) 

26 

27from crawler.base_crawler import BaseCollectionCrawler 

28from crawler.utils import add_pdf_link_to_xarticle 

29 

30 

31class BdimCrawler(BaseCollectionCrawler): 

32 source_name = "Biblioteca Digitale Italiana di Matematica" 

33 source_domain = "BDIM" 

34 source_website = "http://www.bdim.eu" 

35 headers = {"accept_encoding": "utf-8", "cookie": "lingua=en; matematica=tex"} 

36 

37 title_corrections = { 

38 "RLINA_1965_8_39_5_a17": "Eventi fasici nel midollo spinale quali prove di inibizione presinaptica durante il sonno desincronizzato", 

39 "RLINA_1973_8_55_6_a0": "Complementarity between nilpotent selfmappings and periodic autohomeomorphisms.", 

40 "RLINA_1973_8_55_6_a2": "Sur une extension du lemme de Green.", 

41 "RLINA_1979_8_67_1-2_a6": "On the existence o f an unbounded connected set of solutions for nonlinear equations in Banach spaces.", 

42 "RLINA_1972_8_52_2_a5": "Sul carattere proiettivo del rapporto plurisezionale.", 

43 "RLINA_1980_8_69_1-2_a6": "A note on a variational formulation of the Einstein equations for thermo-elastic materials.", 

44 } 

45 

46 issue_href = r"\?id=(?P<col>\w+)(?P<issue>_\d{1,4})" 

47 

48 _language_detector_builder = LanguageDetectorBuilder.from_languages( 

49 lingua.Language.ENGLISH, lingua.Language.FRENCH, lingua.Language.ITALIAN 

50 ) 

51 

52 def __init__(self, *args, **kwargs): 

53 super().__init__(*args, **kwargs) 

54 

55 def parse_collection_content(self, content): 

56 """ 

57 Parse the HTML page of Annals of Math and returns a list of xissue. 

58 Each xissue has its pid/volume/number/year metadata + its url 

59 """ 

60 soup = BeautifulSoup(content, "html.parser") 

61 xissues = [] 

62 

63 reg_issue = regex.compile(self.issue_href) 

64 

65 issue_nodes = [] 

66 for issue in soup.select("div.listafascicoli a"): 

67 href = issue.get("href") 

68 if isinstance(href, str) and reg_issue.search(href): 

69 issue_nodes.append(issue) 

70 

71 for issue_node in issue_nodes: 

72 # issue_text = issue_node.get_text() 

73 

74 part_issue = issue_node.get("href").split("_") 

75 volume = part_issue[-2] 

76 number = part_issue[-1] 

77 year = part_issue[1] 

78 serie = part_issue[2] 

79 link = "/item" + issue_node.get("href") 

80 xissue = self.create_bdim_xissue(link, serie, volume, number, year) 

81 if xissue: 

82 xissues.append(xissue) 

83 

84 return xissues 

85 

86 def get_year(self, year): 

87 if "/" in year: 

88 year = year.split("/")[0] 

89 

90 return year 

91 

92 def create_bdim_xissue(self, url, serie, volume, number, dates): 

93 year = dates.replace("/", "-") 

94 

95 xissue = create_issuedata() 

96 xissue.pid = f"{self.collection_id}_{year}_{serie}_{volume}_{number}" 

97 xissue.year = year 

98 xissue.volume = volume 

99 xissue.number = number 

100 xissue.vseries = serie 

101 xissue.url = self.source_website + url 

102 

103 return xissue 

104 

105 def parse_issue_content(self, content, xissue): 

106 soup = BeautifulSoup(content, "html.parser") 

107 article_nodes = soup.find_all("div", {"class": "referenza"}) 

108 

109 for index_article, article_node in enumerate(article_nodes): 

110 article_link_node = article_node.find("a", text="full entry") 

111 if article_link_node: 

112 url = article_link_node.get("href") 

113 xarticle = create_articledata() 

114 xarticle.pid = "a" + str(index_article) 

115 xarticle.url = self.source_website + url 

116 

117 xissue.articles.append(xarticle) 

118 

119 def parse_article_content(self, content, xissue, xarticle, url): 

120 """ 

121 Parse the content with Beautifulsoup and returns an ArticleData 

122 """ 

123 soup = BeautifulSoup(content, "html.parser") 

124 # TITLE 

125 title_node = soup.select_one("span.titolo") 

126 if not title_node: 

127 raise ValueError("Couldn't find article title") 

128 xarticle.title_tex = title_node.get_text() 

129 

130 # Lang 

131 if "(Italian)" in title_node.parent.text: 

132 xarticle.lang = "it" 

133 elif "(English)" in title_node.parent.text: 

134 xarticle.lang = "en" 

135 # Authors 

136 reg_author_link = regex.compile(r"\?testo=\w+") 

137 text_author_bloc = soup.select_one("div.referenza p") 

138 if text_author_bloc: 

139 for link in text_author_bloc.select("a"): 

140 href = link.get("href") 

141 if isinstance(href, str) and reg_author_link.search(href): 

142 contrib_node = link.select_one("span.autore") 

143 if contrib_node is not None: 

144 surname_node = link.select_one("span.cognome") 

145 firstname_node = link.select_one("span.nome") 

146 author = create_contributor(role="author") 

147 

148 if surname_node is not None: 

149 surname = surname_node.get_text() 

150 author["last_name"] = surname 

151 

152 if firstname_node is not None: 

153 firstname = firstname_node.get_text() 

154 author["first_name"] = firstname 

155 

156 if not firstname_node or not surname_node: 

157 string_name = contrib_node.get_text() 

158 author["string_name"] = string_name 

159 

160 xarticle.contributors.append(author) 

161 

162 # ABSTRACT 

163 abstract_section_node = soup.select_one("div.sunto") 

164 if abstract_section_node: 

165 abstract = str(abstract_section_node.get_text()) 

166 

167 xarticle.abstracts.append( 

168 create_abstract(value_tex=abstract, lang=self.detect_language(abstract)) 

169 ) 

170 

171 # PDF 

172 pdf_url = soup.find_all("a", text="pdf") 

173 if len(pdf_url) > 0: 

174 pdf_url = self.source_website + pdf_url[0].get("href") 

175 add_pdf_link_to_xarticle(xarticle, pdf_url) 

176 

177 # PAGES 

178 pages = soup.select_one("span.pagine") 

179 if pages: 

180 pages_to = re.compile(r"(\(?\d+\)?)?-?(\(?\d+\)?)").search(pages.get_text()) 

181 if pages_to: 

182 parts = pages_to[0].split("-") 

183 first_page = parts[0].replace("(", "").replace(")", "") 

184 if len(parts) > 1: 

185 last_page = parts[1].replace("(", "").replace(")", "") 

186 xarticle.lpage = last_page 

187 xarticle.fpage = first_page 

188 

189 # Biblio 

190 bibitems_tags = soup.select("div.biblio div.bibitem") 

191 xarticle.bibitems = [self.parse_ref(item) for item in bibitems_tags] 

192 

193 # metadata 

194 bdim_mr_url = "http://www.ams.org/mathscinet-getitem?mr=" 

195 bdim_zbl_url = "https://zbmath.org/?q=an:" 

196 medata_bloc = soup.select_one("div.referenza > p") 

197 if not medata_bloc: 

198 raise ValueError("metadata_bloc cannot be found") 

199 

200 zbl_id = medata_bloc.select_one(f"a[href^='{bdim_zbl_url}']") 

201 

202 if zbl_id: 

203 extid_href = zbl_id.get("href") 

204 if isinstance(extid_href, str): 

205 xarticle.extids.append(("zbl-item-id", extid_href.removeprefix(bdim_zbl_url))) 

206 

207 mr_id = medata_bloc.select_one(f"a[href^='{bdim_mr_url}']") 

208 if mr_id: 

209 extid_href = mr_id.get("href") 

210 if isinstance(extid_href, str): 

211 xarticle.extids.append( 

212 ( 

213 "mr-item-id", 

214 extid_href.removeprefix(bdim_mr_url), 

215 ) 

216 ) 

217 

218 if xarticle.pid in self.title_corrections: 

219 xarticle.title_tex = self.title_corrections[xarticle.pid] 

220 

221 content = self.download_file(url, headers={"cookie": "lingua=it; matematica=tex"}) 

222 xarticle = self.parse_article_content_bdim_it(content, xissue, xarticle, url) 

223 return xarticle 

224 

225 def parse_article_content_bdim_it(self, content, xissue, xarticle: ArticleData, url): 

226 soup = BeautifulSoup(content, "html.parser") 

227 

228 # Trans_title 

229 trans_title_node = soup.select_one("span.titolo_trad") 

230 if trans_title_node: 

231 xarticle.trans_title_tex = trans_title_node.get_text() 

232 

233 # trans abstract 

234 abstract_section_node = soup.select_one("div.sunto") 

235 if abstract_section_node: 

236 abstract = str(abstract_section_node.get_text()) 

237 if xarticle.abstracts[0]["value_tex"] != abstract: 

238 xarticle.abstracts.append( 

239 create_abstract(value_tex=abstract, lang=self.detect_language(abstract)) 

240 ) 

241 

242 if xarticle.trans_title_tex or len(xarticle.abstracts) > 1: 

243 xarticle.trans_lang = "en" if xarticle.lang == "it" else "it" 

244 return xarticle 

245 

246 def parse_ref(self, item: Tag): 

247 value_xml = "" 

248 # First pass : we create an semi-complete XML Jats string, except for the authors 

249 # that we store inside authors_list to be serialized at the end 

250 authors_list = [] 

251 for c in item.children: 

252 c_text = escape(c.text) 

253 if isinstance(c, str): 

254 value_xml += c_text 

255 continue 

256 

257 if not isinstance(c, Tag): 

258 raise NotImplementedError("bibitem_tag is not a Tag or a string") 

259 

260 if c.name == "a": 

261 a_xml, is_badge = self.parse_a_tag(c) 

262 if is_badge: 

263 value_xml = regex.sub(r" \| $", "", value_xml) 

264 value_xml += a_xml 

265 continue 

266 

267 child_class = c.get("class") 

268 if not child_class: 

269 value_xml += c_text 

270 elif "bautore" in child_class: 

271 # TODO : parse firstname and lastname 

272 author_data, author_xml = self.parse_biblio_author_tag(c, len(authors_list)) 

273 authors_list.append(author_data) 

274 value_xml += author_xml 

275 

276 elif "titolo" in child_class: 

277 value_xml += get_article_title_xml(c_text) 

278 elif "rivista" in child_class: 

279 value_xml += get_source_xml(c_text) 

280 elif "anno" in child_class: 

281 value_xml += get_year_xml(c_text) 

282 elif "volume" in child_class: 

283 value_xml += get_volume_xml(c_text) 

284 elif "publisher" in child_class: 

285 value_xml += get_publisher_xml(c_text) 

286 else: 

287 # booktitle 

288 value_xml += c_text 

289 

290 # In order to have a valid Jats xml, we have to group all authors into the person-group xml tag. 

291 authors_occurence = regex.compile(r"{author_\d}").findall(value_xml) 

292 if len(authors_occurence) > 0: 

293 first_author = value_xml.index(authors_occurence[0]) 

294 last_author = value_xml.index(authors_occurence[-1]) + len(authors_occurence[-1]) 

295 value_xml = ( 

296 value_xml[:first_author] 

297 + get_all_authors_xml(value_xml[first_author:last_author], authors_list) 

298 + value_xml[last_author:] 

299 ) 

300 

301 return JatsBase.bake_ref(value_xml) 

302 # return self.create_crawled_bibitem([*bib_elements, *bib_link_elements]) 

303 

304 def parse_a_tag(self, a_tag: Tag): 

305 a_text = escape(a_tag.text) 

306 href = a_tag.get("href") 

307 if not href: 

308 return a_text, False 

309 elif isinstance(href, list): 

310 raise ValueError("a tag has multiple href values !") 

311 else: 

312 a_type = "uri" 

313 if a_text.startswith("MR "): 

314 a_type = "mr-item-id" 

315 a_text = a_text.removeprefix("MR ") 

316 elif a_text.startswith("Zbl "): 

317 a_type = "zbl-item-id" 

318 a_text = a_text.removeprefix("Zbl ") 

319 elif a_text == "fulltext (doi)": 

320 a_type = "doi" 

321 a_text = a_text.removeprefix("http://dx.doi.org/") 

322 return get_ext_link_xml(escape(href), a_text, a_type), a_type != "uri" 

323 

324 def parse_biblio_author_tag(self, author_tag: Tag, index: int = 0): 

325 value_xml = "" 

326 author_data = {"template_str": ""} 

327 for c in author_tag.children: 

328 c_text = escape(c.text) 

329 if isinstance(c, str): 

330 author_data["template_str"] += c_text 

331 continue 

332 

333 if not isinstance(c, Tag): 

334 raise NotImplementedError("author_tag is not a Tag or a string") 

335 # given name = cognome = prénom 

336 # surname = nome = nom de famille 

337 child_class = c.get("class") 

338 if not child_class: 

339 value_xml += c_text 

340 elif "cognome" in child_class: 

341 c.replace_with("{given_names}") 

342 author_data["given_names"] = c_text 

343 author_data["template_str"] += "{given_names}" 

344 elif "nome" in child_class: 

345 c.replace_with("{surname}") 

346 author_data["surname"] = c_text 

347 author_data["template_str"] += "{surname}" 

348 value_xml += "{author_" + str(index) + "}" 

349 

350 return author_data, value_xml