Coverage for src/crawler/by_source/nsjom/nsjom_xml_crawler.py: 85%
124 statements
« prev ^ index » next coverage.py v7.9.0, created at 2025-08-29 13:43 +0000
« prev ^ index » next coverage.py v7.9.0, created at 2025-08-29 13:43 +0000
1import re
2import typing
4from bs4 import BeautifulSoup, Tag
6# from ptf.model_data import create_publisherdata
7from ptf.model_data import (
8 IssueData,
9 create_abstract,
10 create_articledata,
11 create_contributor,
12 create_extlink,
13 create_issuedata,
14 create_publisherdata,
15 create_subj,
16)
18from crawler.utils import add_pdf_link_to_xarticle
20if typing.TYPE_CHECKING: 20 ↛ 21line 20 didn't jump to line 21 because the condition on line 20 was never true
21 from .nsjom_crawler import NsjomCrawler
23source_domain = "NSJOM"
26def parse_collection_content(
27 self: "NsjomCrawler",
28 _: str,
29 source_domain: str = "NSJOM",
30 xissue_pid_to_parse: str | None = None,
31):
32 """
33 Parses all articles from one xml file : https://sites.dmi.uns.ac.rs/nsjom/NSJOM.xml
34 From 2015 to today
35 """
36 xissues: dict[tuple[str, str], IssueData] = {}
37 url = "https://sites.dmi.uns.ac.rs/nsjom/NSJOM.xml"
38 content = self.download_file(url)
39 soup = BeautifulSoup(content, "lxml-xml")
40 record_container_element = soup.select_one("records")
41 if record_container_element is None: 41 ↛ 42line 41 didn't jump to line 42 because the condition on line 41 was never true
42 raise ValueError(f"[{source_domain}] Cannot parse source")
43 for record_element in record_container_element.select("record"):
44 publication_type_tag = record_element.select_one("publicationType")
45 if publication_type_tag is None: 45 ↛ 46line 45 didn't jump to line 46 because the condition on line 45 was never true
46 raise ValueError(f"[{source_domain}] Cannot determine article publicationType")
47 if publication_type_tag.text != "published":
48 continue
49 year_tag = record_element.select_one("year")
50 if year_tag is None or year_tag.text == "": 50 ↛ 51line 50 didn't jump to line 51 because the condition on line 50 was never true
51 raise ValueError(f"[{source_domain}] Cannot parse year from article")
52 year = int(year_tag.text)
53 xarticle, volume_number, issue_number = parse_article(
54 self, record_element, source_domain=source_domain
55 )
56 if (volume_number, issue_number) not in xissues:
57 pid = f"{source_domain}_{year}__{volume_number}_{issue_number}"
58 if xissue_pid_to_parse and xissue_pid_to_parse != pid:
59 continue
60 xissue = create_issuedata()
61 parse_issue_tag(xissue, record_element, year)
62 xissue.year = year_tag.text
63 xissue.volume = volume_number
64 xissue.number = issue_number
65 xissue.pid = pid
66 xissues[(volume_number, issue_number)] = xissue
67 xissues[(volume_number, issue_number)].articles.append(xarticle)
69 return list(xissues.values())
72def parse_issue_content(self: "NsjomCrawler", content: str, xissue: IssueData):
73 if not xissue.year: 73 ↛ 74line 73 didn't jump to line 74 because the condition on line 73 was never true
74 raise ValueError("Issue year is not set")
75 return parse_collection_content(self, content, source_domain, xissue.pid)
78def parse_issue_tag(xissue: IssueData, article_tag: Tag, year: int) -> IssueData:
79 publisher_tag = article_tag.select_one("publisher")
80 if publisher_tag: 80 ↛ 85line 80 didn't jump to line 85 because the condition on line 80 was always true
81 xpub = create_publisherdata()
82 xpub.name = publisher_tag.text
83 xissue.publisher = xpub
85 ext_link = create_extlink(
86 rel="source",
87 location=f"https://sites.dmi.uns.ac.rs/nsjom/issue.html?year={year}",
88 metadata=source_domain,
89 )
90 xissue.ext_links.append(ext_link)
91 return xissue
94def parse_article(self: "NsjomCrawler", article_tag: Tag, source_domain: str = "NSJOM"):
95 xarticle = create_articledata()
97 doi_tag = article_tag.select_one("doi")
98 if doi_tag is None: 98 ↛ 99line 98 didn't jump to line 99 because the condition on line 98 was never true
99 raise ValueError(f"[{source_domain}] : Article doi not found")
100 xarticle.doi = doi_tag.text
101 xarticle.pid = re.sub("\\/\\.-", "_", doi_tag.text)
103 page_start_tag = article_tag.select_one("startPage")
104 page_end_tag = article_tag.select_one("endPage")
105 if page_start_tag: 105 ↛ 107line 105 didn't jump to line 107 because the condition on line 105 was always true
106 xarticle.fpage = page_start_tag.text
107 if page_end_tag: 107 ↛ 110line 107 didn't jump to line 110 because the condition on line 107 was always true
108 xarticle.lpage = page_end_tag.text
110 date_published_tag = article_tag.select_one("publicationDate")
111 if date_published_tag: 111 ↛ 114line 111 didn't jump to line 114 because the condition on line 111 was always true
112 xarticle.date_published_iso_8601_date_str = date_published_tag.text
114 url_tag = article_tag.select_one("publisherRecordId")
115 if url_tag: 115 ↛ 123line 115 didn't jump to line 123 because the condition on line 115 was always true
116 ext_link = create_extlink(
117 rel="source",
118 location=f"https://sites.dmi.uns.ac.rs/nsjom/paper.html?noid={url_tag.text}",
119 metadata=source_domain,
120 )
121 xarticle.ext_links.append(ext_link)
123 title_tag = article_tag.select_one("title")
124 if title_tag: 124 ↛ 129line 124 didn't jump to line 129 because the condition on line 124 was always true
125 xarticle.title_tex = title_tag.text
127 # TODO : Affiliations ?
129 authors_container = article_tag.select_one("authors")
130 if authors_container: 130 ↛ 144line 130 didn't jump to line 144 because the condition on line 130 was always true
131 for author_tag in authors_container.select("author"):
132 author = create_contributor(role="author")
133 author_name_tag = author_tag.select_one("name")
134 if author_name_tag: 134 ↛ 136line 134 didn't jump to line 136 because the condition on line 134 was always true
135 author["string_name"] = author_name_tag.text
136 corresponding = author_tag.get("corresponding")
137 if corresponding == "1":
138 author["corresponding"] = True
139 email_tag = author_tag.select_one("email")
140 if email_tag:
141 author["email"] = email_tag.text
142 xarticle.contributors.append(author)
144 abstract_tag = article_tag.select_one("abstract")
145 if abstract_tag: 145 ↛ 156line 145 didn't jump to line 156 because the condition on line 145 was always true
146 abstract_language = abstract_tag.get("langauge", None)
147 if abstract_language is None or isinstance(abstract_language, list): 147 ↛ 149line 147 didn't jump to line 149 because the condition on line 147 was always true
148 abstract_language = "eng"
149 xarticle.abstracts.append(
150 create_abstract(
151 value_tex=abstract_tag.text,
152 lang=abstract_language or self.detect_language(abstract_tag.text) or "und",
153 )
154 )
156 keywords_tag = article_tag.select_one("keywords")
157 if keywords_tag: 157 ↛ 167line 157 didn't jump to line 167 because the condition on line 157 was always true
158 keywords_language = keywords_tag.get("language", "eng")
159 if keywords_language is None or isinstance(keywords_language, list): 159 ↛ 160line 159 didn't jump to line 160 because the condition on line 159 was never true
160 keywords_language = "eng"
161 for kwd_tag in keywords_tag.select("keyword"):
162 subject = create_subj()
163 subject["value"] = kwd_tag.text
164 subject["lang"] = "en"
165 xarticle.kwds.append(subject)
167 msc_tag = article_tag.select_one("MSCs")
168 if msc_tag: 168 ↛ 176line 168 didn't jump to line 176 because the condition on line 168 was always true
169 for msc_subj in msc_tag.select("MSC"):
170 subject = create_subj()
171 subject["value"] = msc_subj.text
172 subject["type"] = "msc"
173 subject["lang"] = "en"
174 xarticle.kwds.append(subject)
176 pdf_location_tag = article_tag.select_one("filelocation")
177 pdf_name_tag = article_tag.select_one("file")
178 if pdf_location_tag and pdf_name_tag:
179 pdf_url = "https://sites.dmi.uns.ac.rs/nsjom/" + pdf_location_tag.text + pdf_name_tag.text
180 add_pdf_link_to_xarticle(xarticle, pdf_url)
182 volume_tag = article_tag.select_one("volume")
183 issue_tag = article_tag.select_one("issue")
184 if volume_tag is None or issue_tag is None: 184 ↛ 185line 184 didn't jump to line 185 because the condition on line 184 was never true
185 raise ValueError(
186 f"[{source_domain}] {xarticle.doi} Cannot parse volume or issue from article"
187 )
189 # Citations ?
191 return xarticle, volume_tag.text, issue_tag.text