Coverage for src / crawler / by_source / mathbas_crawler.py: 85%
89 statements
« prev ^ index » next coverage.py v7.12.0, created at 2025-12-11 14:57 +0000
« prev ^ index » next coverage.py v7.12.0, created at 2025-12-11 14:57 +0000
1from bs4 import BeautifulSoup, Tag
2from ptf.model_data import IssueData, create_articledata, create_contributor, create_issuedata
3from requests import Response
5from crawler.base_crawler import BaseCollectionCrawler
6from crawler.crawler_utils import get_issue_pid
7from crawler.utils import add_pdf_link_to_xarticle, cleanup_str, regex_to_dict
10class MathbasCrawler(BaseCollectionCrawler):
11 source_name = "Mathematicac Balkanica website"
12 source_domain = "MATHBAS"
13 source_website = "http://www.math.bas.bg/infres/MathBalk/"
15 volume_regex = r"Vol\. (?P<volume>\d+) \((?P<year>\d+)\), Fasc\. (?P<number>[\d\-]+)"
17 def parse_collection_content(self, content):
18 # We are forced to fetch all volume pages first, because some volumes declare multiple issues.
19 soup = BeautifulSoup(content, "html.parser")
20 xissues = []
22 issues_tags = soup.select("#table4 td a")
23 for tag in issues_tags:
24 href = tag.get("href")
25 if not isinstance(href, str): 25 ↛ 26line 25 didn't jump to line 26 because the condition on line 25 was never true
26 raise ValueError(
27 f"[{self.source_domain}] {self.collection_id} : Invalid volume href"
28 )
29 text = cleanup_str(tag.text)
30 if not text.startswith("Volume"):
31 continue
33 fake_issue = create_issuedata()
34 fake_issue.url = self.source_website + href
35 volume_content = self.download_file(self.source_website + href)
36 xissues.extend(
37 self.parse_mathbas_volume(
38 volume_content, fake_issue, skip_articles=True, only_pid=False
39 )
40 )
42 return xissues
44 def parse_issue_content(self, content, xissue):
45 target_issue = self.parse_mathbas_volume(
46 content, xissue, skip_articles=False, only_pid=True
47 )
48 if not isinstance(target_issue, IssueData): 48 ↛ 49line 48 didn't jump to line 49 because the condition on line 48 was never true
49 raise ValueError("Couldn't filter issue by PID")
51 def parse_mathbas_volume(self, content, xissue, skip_articles=False, only_pid=False):
52 """Must handle parsing the issues titles/number, not issue contents/articles
54 only_pid tries to fill the input issue based on pid instead of creating a new one.
55 """
56 soup = BeautifulSoup(content, "html.parser")
57 table = soup.select_one("#table3 td[bgcolor='#F9FCC5']")
58 xissues: list[IssueData] = []
59 if not table: 59 ↛ 60line 59 didn't jump to line 60 because the condition on line 59 was never true
60 raise ValueError(
61 f"[{self.source_domain}] {self.collection_id} {xissue.url} : Volume cannot be parsed"
62 )
63 current_issue = None
64 for child in table.findChildren(recursive=False):
65 if "heading" in (child.get("class", [])):
66 text = cleanup_str(child.text)
67 if not text.startswith("Vol"): 67 ↛ 68line 67 didn't jump to line 68 because the condition on line 67 was never true
68 continue
70 volume_dict = regex_to_dict(
71 self.volume_regex,
72 text,
73 error_msg=f"[{self.source_domain}] {self.collection_id} : Couldn't parse volume",
74 )
75 if only_pid and current_issue and current_issue.pid == xissue.pid: 75 ↛ 76line 75 didn't jump to line 76 because the condition on line 75 was never true
76 return current_issue
78 if (
79 get_issue_pid(
80 self.collection_id,
81 volume_dict["year"],
82 volume_dict["volume"],
83 volume_dict["number"],
84 )
85 == xissue.pid
86 ):
87 current_issue = xissue
88 else:
89 current_issue = self.create_xissue(
90 xissue.url,
91 volume_dict["year"],
92 volume_dict["volume"],
93 volume_dict["number"],
94 )
95 xissues.append(current_issue)
97 elif child.get("id") == "table5" and not skip_articles:
98 if not current_issue: 98 ↛ 99line 98 didn't jump to line 99 because the condition on line 98 was never true
99 raise ValueError(
100 f"[{self.source_domain}] {self.collection_id} {xissue.url} : Couldn't parse volume page : article declared before issue"
101 )
102 self.parse_mathbas_issue(child, current_issue)
103 else:
104 continue
106 if only_pid:
107 return xissues[-1]
108 return xissues
110 def parse_mathbas_issue(self, tag: Tag, xissue: IssueData):
111 lines = tag.select("tr")
112 # Parse article
113 for index, line in enumerate(lines):
114 names = line.select_one(".names")
115 if not names: 115 ↛ 116line 115 didn't jump to line 116 because the condition on line 115 was never true
116 raise ValueError(f"[{self.source_domain}] {xissue.pid} : Couldn't parse authors")
117 title = line.select_one(".title")
118 if not title: 118 ↛ 119line 118 didn't jump to line 119 because the condition on line 118 was never true
119 raise ValueError(
120 f"[{self.source_domain}] {xissue.pid} : Couldn't parse article title"
121 )
122 pages = line.select_one(".pages")
123 if not pages: 123 ↛ 124line 123 didn't jump to line 124 because the condition on line 123 was never true
124 raise ValueError(
125 f"[{self.source_domain}] {xissue.pid} : Couldn't parse article pages"
126 )
127 pdf_url = title.get("href")
128 if not isinstance(pdf_url, str): 128 ↛ 129line 128 didn't jump to line 129 because the condition on line 128 was never true
129 raise ValueError(f"[{self.source_domain}] {xissue.pid} : Couldn't extract pdf url")
131 xarticle = create_articledata()
132 authors = cleanup_str(names.text)
133 if authors.endswith("."):
134 authors = authors[:-1]
136 if authors != "":
137 authors = authors.split(", ")
138 for a in authors:
139 xarticle.contributors.append(create_contributor(string_name=a, role="author"))
141 xarticle.title_tex = cleanup_str(title.text)
142 xarticle.fpage = cleanup_str(pages.text)
143 xarticle.url = xissue.url
144 add_pdf_link_to_xarticle(pdf_url=self.source_website + pdf_url, xarticle=xarticle)
145 xarticle.pid = f"a{index}"
146 xissue.articles.append(xarticle)
148 def decode_response(self, response: Response, encoding: str = "utf-8"):
149 return super().decode_response(response, "windows-1252")