Coverage for src/crawler/by_source/aulfm_crawler.py: 85%

67 statements  

« prev     ^ index     » next       coverage.py v7.9.0, created at 2025-07-30 09:47 +0000

1from urllib.parse import urljoin 

2 

3from bs4 import BeautifulSoup, Tag 

4from ptf.model_data import ArticleData, create_abstract, create_articledata, create_subj 

5 

6from crawler.base_crawler import BaseCollectionCrawler 

7from crawler.utils import add_pdf_link_to_xarticle, cleanup_str, regex_to_dict 

8 

9 

10class AulfmCrawler(BaseCollectionCrawler): 

11 source_name = "University of Lodz Repository" 

12 source_domain = "AULFM" 

13 source_website = "https://dspace.uni.lodz.pl/xmlui/" 

14 

15 issue_re = r".+ vol. (?P<volume>\d+)\/(?P<year>\d+)" 

16 pages_re = r"Pages: (?P<fpage>\d+)–(?P<lpage>\d+)" 

17 

18 def parse_collection_content(self, content): 

19 xissues = [] 

20 soup = BeautifulSoup(content, "html.parser") 

21 issues = soup.select("h4.artifact-title a span.Z3988") 

22 for issue in issues: 

23 issue_dict = regex_to_dict( 

24 self.issue_re, issue.text, error_msg="Couldn't parse issue data" 

25 ) 

26 

27 parent = issue.parent.parent 

28 a_tag = issue.parent 

29 issue_href = a_tag.get("href") 

30 if not isinstance(issue_href, str): 30 ↛ 31line 30 didn't jump to line 31 because the condition on line 30 was never true

31 raise ValueError("Couldn't parse issue url") 

32 

33 a_tag.decompose() 

34 article_count = cleanup_str(parent.text).removeprefix("[").removesuffix("]") 

35 if article_count == "0": 

36 continue 

37 

38 xissues.append( 

39 self.create_xissue( 

40 urljoin(self.collection_url, issue_href), 

41 issue_dict["year"], 

42 issue_dict["volume"], 

43 None, 

44 ) 

45 ) 

46 return xissues 

47 

48 def parse_issue_content(self, content, xissue): 

49 "parse the article list" 

50 soup = BeautifulSoup(content, "html.parser") 

51 articles = soup.select("h4.artifact-title a") 

52 for index, article_tag in enumerate(articles): 

53 article_url = article_tag.get("href") 

54 if not isinstance(article_url, str): 54 ↛ 55line 54 didn't jump to line 55 because the condition on line 54 was never true

55 raise ValueError("Couldn't parse article data") 

56 xarticle = create_articledata() 

57 xarticle.pid = "a" + str(index) 

58 xarticle.url = urljoin(self.collection_url, article_url) 

59 xissue.articles.append(xarticle) 

60 

61 def parse_article_content(self, content, xissue, xarticle, url): 

62 "parse the article content" 

63 soup = BeautifulSoup(content, "html.parser") 

64 self.get_metadata_using_citation_meta( 

65 xarticle, xissue, soup, ["title", "publisher", "lang", "author"] 

66 ) 

67 

68 pdf_link_tag = soup.select( 

69 ".item-page-field-wrapper > div > a[href^='/xmlui/bitstream/handle']" 

70 ) 

71 if len(pdf_link_tag) != 1: 71 ↛ 72line 71 didn't jump to line 72 because the condition on line 71 was never true

72 raise ValueError("Error while trying to parse pdf url : found multiple <a> candidates") 

73 pdf_link = pdf_link_tag[0].get("href") 

74 if not isinstance(pdf_link, str): 74 ↛ 75line 74 didn't jump to line 75 because the condition on line 74 was never true

75 raise ValueError("Couldn't parse article pdf") 

76 add_pdf_link_to_xarticle(xarticle, urljoin(url, pdf_link)) 

77 

78 self.get_metadata_using_dcterms(soup, xarticle, ("abstract", "keywords")) 

79 

80 return xarticle 

81 

82 def get_metadata_using_dcterms(self, soup: Tag, xarticle: ArticleData, what): 

83 if "abstract" in what: 83 ↛ 93line 83 didn't jump to line 93 because the condition on line 83 was always true

84 abstract_tag = soup.select_one("meta[name='DCTERMS.abstract']") 

85 if abstract_tag: 85 ↛ 93line 85 didn't jump to line 93 because the condition on line 85 was always true

86 abstract_text = abstract_tag.get("content") 

87 if isinstance(abstract_text, str): 87 ↛ 93line 87 didn't jump to line 93 because the condition on line 87 was always true

88 xabstract = create_abstract( 

89 lang="en", tag="abstract", value_tex=cleanup_str(abstract_text) 

90 ) 

91 xarticle.abstracts.append(xabstract) 

92 

93 if "keywords" in what: 93 ↛ exitline 93 didn't return from function 'get_metadata_using_dcterms' because the condition on line 93 was always true

94 keyword_tags = soup.select("meta[name='DC.subject']") 

95 for tag in keyword_tags: 

96 kwd_text = tag.get("content") 

97 if not isinstance(kwd_text, str) or len(kwd_text) == 0: 97 ↛ 98line 97 didn't jump to line 98 because the condition on line 97 was never true

98 continue 

99 kwd = create_subj(value=kwd_text) 

100 xarticle.kwds.append(kwd)