Coverage for src/crawler/by_source/ams/ams_eraams_crawler.py: 33%
17 statements
« prev ^ index » next coverage.py v7.8.2, created at 2025-07-07 11:48 +0000
« prev ^ index » next coverage.py v7.8.2, created at 2025-07-07 11:48 +0000
1from urllib.parse import urljoin
3from bs4 import BeautifulSoup
4from ptf.model_data import create_articledata
6from crawler.by_source.ams.ams_base_crawler import AmsCrawler
9class Ams_eraamsCrawler(AmsCrawler):
10 source_domain = "AMS_ERAAMS"
12 def parse_issue_content(self, content, xissue):
13 soup = BeautifulSoup(content, "html.parser")
14 articles = soup.select(
15 "dd > a:-soup-contains-own('Abstract, references and article information')"
16 )
17 for index, a in enumerate(articles):
18 article_url = a.get("href")
19 if not isinstance(article_url, str):
20 raise ValueError("Couldn't parse article url")
21 xarticle = create_articledata()
22 xarticle.url = urljoin(self.collection_url, article_url)
23 xarticle.pid = "a" + str(index)
24 xissue.articles.append(xarticle)