mirror of
https://github.com/Garmelon/PFERD.git
synced 2023-12-21 10:23:01 +01:00
Compare commits
9 Commits
Author | SHA1 | Date | |
---|---|---|---|
259cfc20cc | |||
37b51a66d8 | |||
f47d2f11d8 | |||
1b6be6bd79 | |||
e1430e6298 | |||
5fdd40204b | |||
fb4631ba18 | |||
d72fc2760b | |||
4a51aaa4f5 |
13
CHANGELOG.md
13
CHANGELOG.md
@ -22,6 +22,19 @@ ambiguous situations.
|
|||||||
|
|
||||||
## Unreleased
|
## Unreleased
|
||||||
|
|
||||||
|
## 3.4.2 - 2022-10-26
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Recognize and crawl content pages in cards
|
||||||
|
- Recognize and ignore surveys
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Forum crawling crashing when a thread has no messages at all
|
||||||
|
- Forum crawling crashing when a forum has no threads at all
|
||||||
|
- Ilias login failing in some cases
|
||||||
|
- Crawling of paginated future meetings
|
||||||
|
- IPD crawler handling of URLs without trailing slash
|
||||||
|
|
||||||
## 3.4.1 - 2022-08-17
|
## 3.4.1 - 2022-08-17
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
@ -24,6 +24,7 @@ class IliasElementType(Enum):
|
|||||||
LINK = "link"
|
LINK = "link"
|
||||||
BOOKING = "booking"
|
BOOKING = "booking"
|
||||||
MEETING = "meeting"
|
MEETING = "meeting"
|
||||||
|
SURVEY = "survey"
|
||||||
VIDEO = "video"
|
VIDEO = "video"
|
||||||
VIDEO_PLAYER = "video_player"
|
VIDEO_PLAYER = "video_player"
|
||||||
VIDEO_FOLDER = "video_folder"
|
VIDEO_FOLDER = "video_folder"
|
||||||
@ -59,6 +60,7 @@ class IliasPageElement:
|
|||||||
class IliasDownloadForumData:
|
class IliasDownloadForumData:
|
||||||
url: str
|
url: str
|
||||||
form_data: Dict[str, Union[str, List[str]]]
|
form_data: Dict[str, Union[str, List[str]]]
|
||||||
|
empty: bool
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@ -130,24 +132,32 @@ class IliasPage:
|
|||||||
return None
|
return None
|
||||||
post_url = self._abs_url_from_relative(form["action"])
|
post_url = self._abs_url_from_relative(form["action"])
|
||||||
|
|
||||||
|
thread_ids = [f["value"] for f in form.find_all(attrs={"name": "thread_ids[]"})]
|
||||||
|
|
||||||
form_data: Dict[str, Union[str, List[ſtr]]] = {
|
form_data: Dict[str, Union[str, List[ſtr]]] = {
|
||||||
"thread_ids[]": [f["value"] for f in form.find_all(attrs={"name": "thread_ids[]"})],
|
"thread_ids[]": thread_ids,
|
||||||
"selected_cmd2": "html",
|
"selected_cmd2": "html",
|
||||||
"select_cmd2": "Ausführen",
|
"select_cmd2": "Ausführen",
|
||||||
"selected_cmd": "",
|
"selected_cmd": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
return IliasDownloadForumData(post_url, form_data)
|
return IliasDownloadForumData(url=post_url, form_data=form_data, empty=len(thread_ids) == 0)
|
||||||
|
|
||||||
def get_next_stage_element(self) -> Optional[IliasPageElement]:
|
def get_next_stage_element(self) -> Optional[IliasPageElement]:
|
||||||
if self._is_forum_page():
|
if self._is_forum_page():
|
||||||
if "trows=800" in self._page_url:
|
if "trows=800" in self._page_url:
|
||||||
return None
|
return None
|
||||||
|
log.explain("Requesting *all* forum threads")
|
||||||
return self._get_show_max_forum_entries_per_page_url()
|
return self._get_show_max_forum_entries_per_page_url()
|
||||||
if self._is_ilias_opencast_embedding():
|
if self._is_ilias_opencast_embedding():
|
||||||
|
log.explain("Unwrapping opencast embedding")
|
||||||
return self.get_child_elements()[0]
|
return self.get_child_elements()[0]
|
||||||
if self._page_type == IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED:
|
if self._page_type == IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED:
|
||||||
|
log.explain("Unwrapping video pagination")
|
||||||
return self._find_video_entries_paginated()[0]
|
return self._find_video_entries_paginated()[0]
|
||||||
|
if self._contains_collapsed_future_meetings():
|
||||||
|
log.explain("Requesting *all* future meetings")
|
||||||
|
return self._uncollapse_future_meetings_url()
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _is_forum_page(self) -> bool:
|
def _is_forum_page(self) -> bool:
|
||||||
@ -200,6 +210,16 @@ class IliasPage:
|
|||||||
return False
|
return False
|
||||||
return "target=copa_" in link.get("value")
|
return "target=copa_" in link.get("value")
|
||||||
|
|
||||||
|
def _contains_collapsed_future_meetings(self) -> bool:
|
||||||
|
return self._uncollapse_future_meetings_url() is not None
|
||||||
|
|
||||||
|
def _uncollapse_future_meetings_url(self) -> Optional[IliasPageElement]:
|
||||||
|
element = self._soup.find("a", attrs={"href": lambda x: x and "crs_next_sess=1" in x})
|
||||||
|
if not element:
|
||||||
|
return None
|
||||||
|
link = self._abs_url_from_link(element)
|
||||||
|
return IliasPageElement(IliasElementType.FOLDER, link, "show all meetings")
|
||||||
|
|
||||||
def _player_to_video(self) -> List[IliasPageElement]:
|
def _player_to_video(self) -> List[IliasPageElement]:
|
||||||
# Fetch the actual video page. This is a small wrapper page initializing a javscript
|
# Fetch the actual video page. This is a small wrapper page initializing a javscript
|
||||||
# player. Sadly we can not execute that JS. The actual video stream url is nowhere
|
# player. Sadly we can not execute that JS. The actual video stream url is nowhere
|
||||||
@ -711,6 +731,10 @@ class IliasPage:
|
|||||||
return IliasElementType.TEST
|
return IliasElementType.TEST
|
||||||
if "fold" in icon["class"]:
|
if "fold" in icon["class"]:
|
||||||
return IliasElementType.FOLDER
|
return IliasElementType.FOLDER
|
||||||
|
if "copa" in icon["class"]:
|
||||||
|
return IliasElementType.FOLDER
|
||||||
|
if "svy" in icon["class"]:
|
||||||
|
return IliasElementType.SURVEY
|
||||||
|
|
||||||
_unexpected_html_warning()
|
_unexpected_html_warning()
|
||||||
log.warn_contd(f"Could not extract type from {icon} for card title {card_title}")
|
log.warn_contd(f"Could not extract type from {icon} for card title {card_title}")
|
||||||
@ -790,6 +814,10 @@ class IliasPage:
|
|||||||
if img_tag is None:
|
if img_tag is None:
|
||||||
img_tag = found_parent.select_one("img.icon")
|
img_tag = found_parent.select_one("img.icon")
|
||||||
|
|
||||||
|
if img_tag is None and found_parent.find("a", attrs={"href": lambda x: x and "crs_next_sess=" in x}):
|
||||||
|
log.explain("Found session expansion button, skipping it as it has no content")
|
||||||
|
return None
|
||||||
|
|
||||||
if img_tag is None:
|
if img_tag is None:
|
||||||
_unexpected_html_warning()
|
_unexpected_html_warning()
|
||||||
log.warn_contd(f"Tried to figure out element type, but did not find an image for {url}")
|
log.warn_contd(f"Tried to figure out element type, but did not find an image for {url}")
|
||||||
@ -937,6 +965,13 @@ def parse_ilias_forum_export(forum_export: BeautifulSoup) -> List[IliasForumThre
|
|||||||
for p in forum_export.select("body > p"):
|
for p in forum_export.select("body > p"):
|
||||||
title_tag = p
|
title_tag = p
|
||||||
content_tag = p.find_next_sibling("ul")
|
content_tag = p.find_next_sibling("ul")
|
||||||
|
|
||||||
|
if not content_tag:
|
||||||
|
# ILIAS allows users to delete the initial post while keeping the thread open
|
||||||
|
# This produces empty threads without *any* content.
|
||||||
|
# I am not sure why you would want this, but ILIAS makes it easy to do.
|
||||||
|
continue
|
||||||
|
|
||||||
title = p.find("b").text
|
title = p.find("b").text
|
||||||
if ":" in title:
|
if ":" in title:
|
||||||
title = title[title.find(":") + 1:]
|
title = title[title.find(":") + 1:]
|
||||||
|
@ -23,6 +23,12 @@ from .kit_ilias_html import (IliasElementType, IliasForumThread, IliasPage, Ilia
|
|||||||
|
|
||||||
TargetType = Union[str, int]
|
TargetType = Union[str, int]
|
||||||
|
|
||||||
|
_ILIAS_URL = "https://ilias.studium.kit.edu"
|
||||||
|
|
||||||
|
|
||||||
|
class KitShibbolethBackgroundLoginSuccessful():
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class KitIliasWebCrawlerSection(HttpCrawlerSection):
|
class KitIliasWebCrawlerSection(HttpCrawlerSection):
|
||||||
def target(self) -> TargetType:
|
def target(self) -> TargetType:
|
||||||
@ -36,7 +42,7 @@ class KitIliasWebCrawlerSection(HttpCrawlerSection):
|
|||||||
if target == "desktop":
|
if target == "desktop":
|
||||||
# Full personal desktop
|
# Full personal desktop
|
||||||
return target
|
return target
|
||||||
if target.startswith("https://ilias.studium.kit.edu"):
|
if target.startswith(_ILIAS_URL):
|
||||||
# ILIAS URL
|
# ILIAS URL
|
||||||
return target
|
return target
|
||||||
|
|
||||||
@ -181,7 +187,7 @@ instance's greatest bottleneck.
|
|||||||
section.tfa_auth(authenticators),
|
section.tfa_auth(authenticators),
|
||||||
)
|
)
|
||||||
|
|
||||||
self._base_url = "https://ilias.studium.kit.edu"
|
self._base_url = _ILIAS_URL
|
||||||
|
|
||||||
self._target = section.target()
|
self._target = section.target()
|
||||||
self._link_file_redirect_delay = section.link_redirect_delay()
|
self._link_file_redirect_delay = section.link_redirect_delay()
|
||||||
@ -228,19 +234,28 @@ instance's greatest bottleneck.
|
|||||||
async def gather_elements() -> None:
|
async def gather_elements() -> None:
|
||||||
elements.clear()
|
elements.clear()
|
||||||
async with cl:
|
async with cl:
|
||||||
soup = await self._get_page(url)
|
next_stage_url: Optional[str] = url
|
||||||
|
current_parent = None
|
||||||
if expected_id is not None:
|
|
||||||
perma_link_element: Tag = soup.find(id="current_perma_link")
|
|
||||||
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
|
|
||||||
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
|
|
||||||
|
|
||||||
# Duplicated code, but the root page is special - we want to avoid fetching it twice!
|
# Duplicated code, but the root page is special - we want to avoid fetching it twice!
|
||||||
log.explain_topic("Parsing root HTML page")
|
while next_stage_url:
|
||||||
log.explain(f"URL: {url}")
|
soup = await self._get_page(next_stage_url)
|
||||||
page = IliasPage(soup, url, None)
|
|
||||||
elements.extend(page.get_child_elements())
|
|
||||||
|
|
||||||
|
if current_parent is None and expected_id is not None:
|
||||||
|
perma_link_element: Tag = soup.find(id="current_perma_link")
|
||||||
|
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
|
||||||
|
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
|
||||||
|
|
||||||
|
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
|
||||||
|
log.explain(f"URL: {next_stage_url}")
|
||||||
|
page = IliasPage(soup, next_stage_url, current_parent)
|
||||||
|
if next_element := page.get_next_stage_element():
|
||||||
|
current_parent = next_element
|
||||||
|
next_stage_url = next_element.url
|
||||||
|
else:
|
||||||
|
next_stage_url = None
|
||||||
|
|
||||||
|
elements.extend(page.get_child_elements())
|
||||||
if description_string := page.get_description():
|
if description_string := page.get_description():
|
||||||
description.append(description_string)
|
description.append(description_string)
|
||||||
|
|
||||||
@ -362,9 +377,20 @@ instance's greatest bottleneck.
|
|||||||
return None
|
return None
|
||||||
return await self._handle_forum(element, element_path)
|
return await self._handle_forum(element, element_path)
|
||||||
elif element.type == IliasElementType.TEST:
|
elif element.type == IliasElementType.TEST:
|
||||||
log.explain_topic(f"Decision: Crawl {fmt_path(element_path)}")
|
log.status(
|
||||||
log.explain("Tests contain no relevant files")
|
"[bold bright_black]",
|
||||||
log.explain("Answer: No")
|
"Ignored",
|
||||||
|
fmt_path(element_path),
|
||||||
|
"[bright_black](tests contain no relevant data)"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
elif element.type == IliasElementType.SURVEY:
|
||||||
|
log.status(
|
||||||
|
"[bold bright_black]",
|
||||||
|
"Ignored",
|
||||||
|
fmt_path(element_path),
|
||||||
|
"[bright_black](surveys contain no relevant data)"
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
elif element.type == IliasElementType.LINK:
|
elif element.type == IliasElementType.LINK:
|
||||||
return await self._handle_link(element, element_path)
|
return await self._handle_link(element, element_path)
|
||||||
@ -658,7 +684,7 @@ instance's greatest bottleneck.
|
|||||||
@_iorepeat(3, "crawling forum")
|
@_iorepeat(3, "crawling forum")
|
||||||
@anoncritical
|
@anoncritical
|
||||||
async def _crawl_forum(self, element: IliasPageElement, cl: CrawlToken) -> None:
|
async def _crawl_forum(self, element: IliasPageElement, cl: CrawlToken) -> None:
|
||||||
elements = []
|
elements: List[IliasForumThread] = []
|
||||||
|
|
||||||
async with cl:
|
async with cl:
|
||||||
next_stage_url = element.url
|
next_stage_url = element.url
|
||||||
@ -677,6 +703,10 @@ instance's greatest bottleneck.
|
|||||||
download_data = page.get_download_forum_data()
|
download_data = page.get_download_forum_data()
|
||||||
if not download_data:
|
if not download_data:
|
||||||
raise CrawlWarning("Failed to extract forum data")
|
raise CrawlWarning("Failed to extract forum data")
|
||||||
|
if download_data.empty:
|
||||||
|
log.explain("Forum had no threads")
|
||||||
|
elements = []
|
||||||
|
return
|
||||||
html = await self._post_authenticated(download_data.url, download_data.form_data)
|
html = await self._post_authenticated(download_data.url, download_data.form_data)
|
||||||
elements = parse_ilias_forum_export(soupify(html))
|
elements = parse_ilias_forum_export(soupify(html))
|
||||||
|
|
||||||
@ -804,14 +834,17 @@ class KitShibbolethLogin:
|
|||||||
|
|
||||||
# Equivalent: Click on "Mit KIT-Account anmelden" button in
|
# Equivalent: Click on "Mit KIT-Account anmelden" button in
|
||||||
# https://ilias.studium.kit.edu/login.php
|
# https://ilias.studium.kit.edu/login.php
|
||||||
url = "https://ilias.studium.kit.edu/shib_login.php"
|
url = f"{_ILIAS_URL}/shib_login.php"
|
||||||
data = {
|
data = {
|
||||||
"sendLogin": "1",
|
"sendLogin": "1",
|
||||||
"idp_selection": "https://idp.scc.kit.edu/idp/shibboleth",
|
"idp_selection": "https://idp.scc.kit.edu/idp/shibboleth",
|
||||||
"il_target": "",
|
"il_target": "",
|
||||||
"home_organization_selection": "Weiter",
|
"home_organization_selection": "Weiter",
|
||||||
}
|
}
|
||||||
soup: BeautifulSoup = await _shib_post(sess, url, data)
|
soup: Union[BeautifulSoup, KitShibbolethBackgroundLoginSuccessful] = await _shib_post(sess, url, data)
|
||||||
|
|
||||||
|
if isinstance(soup, KitShibbolethBackgroundLoginSuccessful):
|
||||||
|
return
|
||||||
|
|
||||||
# Attempt to login using credentials, if necessary
|
# Attempt to login using credentials, if necessary
|
||||||
while not self._login_successful(soup):
|
while not self._login_successful(soup):
|
||||||
@ -850,7 +883,7 @@ class KitShibbolethLogin:
|
|||||||
# (or clicking "Continue" if you have JS disabled)
|
# (or clicking "Continue" if you have JS disabled)
|
||||||
relay_state = soup.find("input", {"name": "RelayState"})
|
relay_state = soup.find("input", {"name": "RelayState"})
|
||||||
saml_response = soup.find("input", {"name": "SAMLResponse"})
|
saml_response = soup.find("input", {"name": "SAMLResponse"})
|
||||||
url = "https://ilias.studium.kit.edu/Shibboleth.sso/SAML2/POST"
|
url = f"{_ILIAS_URL}/Shibboleth.sso/SAML2/POST"
|
||||||
data = { # using the info obtained in the while loop above
|
data = { # using the info obtained in the while loop above
|
||||||
"RelayState": relay_state["value"],
|
"RelayState": relay_state["value"],
|
||||||
"SAMLResponse": saml_response["value"],
|
"SAMLResponse": saml_response["value"],
|
||||||
@ -899,22 +932,35 @@ async def _post(session: aiohttp.ClientSession, url: str, data: Any) -> Beautifu
|
|||||||
return soupify(await response.read())
|
return soupify(await response.read())
|
||||||
|
|
||||||
|
|
||||||
async def _shib_post(session: aiohttp.ClientSession, url: str, data: Any) -> BeautifulSoup:
|
async def _shib_post(
|
||||||
|
session: aiohttp.ClientSession,
|
||||||
|
url: str,
|
||||||
|
data: Any
|
||||||
|
) -> Union[BeautifulSoup, KitShibbolethBackgroundLoginSuccessful]:
|
||||||
"""
|
"""
|
||||||
aiohttp unescapes '/' and ':' in URL query parameters which is not RFC compliant and rejected
|
aiohttp unescapes '/' and ':' in URL query parameters which is not RFC compliant and rejected
|
||||||
by Shibboleth. Thanks a lot. So now we unroll the requests manually, parse location headers and
|
by Shibboleth. Thanks a lot. So now we unroll the requests manually, parse location headers and
|
||||||
build encoded URL objects ourselves... Who thought mangling location header was a good idea??
|
build encoded URL objects ourselves... Who thought mangling location header was a good idea??
|
||||||
"""
|
"""
|
||||||
|
log.explain_topic("Shib login POST")
|
||||||
async with session.post(url, data=data, allow_redirects=False) as response:
|
async with session.post(url, data=data, allow_redirects=False) as response:
|
||||||
location = response.headers.get("location")
|
location = response.headers.get("location")
|
||||||
|
log.explain(f"Got location {location!r}")
|
||||||
if not location:
|
if not location:
|
||||||
raise CrawlWarning(f"Login failed (1), no location header present at {url}")
|
raise CrawlWarning(f"Login failed (1), no location header present at {url}")
|
||||||
correct_url = yarl.URL(location, encoded=True)
|
correct_url = yarl.URL(location, encoded=True)
|
||||||
|
log.explain(f"Corrected location to {correct_url!r}")
|
||||||
|
|
||||||
|
if str(correct_url).startswith(_ILIAS_URL):
|
||||||
|
log.explain("ILIAS recognized our shib token and logged us in in the background, returning")
|
||||||
|
return KitShibbolethBackgroundLoginSuccessful()
|
||||||
|
|
||||||
async with session.get(correct_url, allow_redirects=False) as response:
|
async with session.get(correct_url, allow_redirects=False) as response:
|
||||||
location = response.headers.get("location")
|
location = response.headers.get("location")
|
||||||
|
log.explain(f"Redirected to {location!r} with status {response.status}")
|
||||||
# If shib still still has a valid session, it will directly respond to the request
|
# If shib still still has a valid session, it will directly respond to the request
|
||||||
if location is None:
|
if location is None:
|
||||||
|
log.explain("Shib recognized us, returning its response directly")
|
||||||
return soupify(await response.read())
|
return soupify(await response.read())
|
||||||
|
|
||||||
as_yarl = yarl.URL(response.url)
|
as_yarl = yarl.URL(response.url)
|
||||||
@ -928,6 +974,7 @@ async def _shib_post(session: aiohttp.ClientSession, url: str, data: Any) -> Bea
|
|||||||
path=location,
|
path=location,
|
||||||
encoded=True
|
encoded=True
|
||||||
)
|
)
|
||||||
|
log.explain(f"Corrected location to {correct_url!r}")
|
||||||
|
|
||||||
async with session.get(correct_url, allow_redirects=False) as response:
|
async with session.get(correct_url, allow_redirects=False) as response:
|
||||||
return soupify(await response.read())
|
return soupify(await response.read())
|
||||||
|
@ -24,6 +24,9 @@ class KitIpdCrawlerSection(HttpCrawlerSection):
|
|||||||
if not target.startswith("https://"):
|
if not target.startswith("https://"):
|
||||||
self.invalid_value("target", target, "Should be a URL")
|
self.invalid_value("target", target, "Should be a URL")
|
||||||
|
|
||||||
|
if not target.endswith("/"):
|
||||||
|
target = target + "/"
|
||||||
|
|
||||||
return target
|
return target
|
||||||
|
|
||||||
def link_regex(self) -> Pattern[str]:
|
def link_regex(self) -> Pattern[str]:
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
NAME = "PFERD"
|
NAME = "PFERD"
|
||||||
VERSION = "3.4.1"
|
VERSION = "3.4.2"
|
||||||
|
Reference in New Issue
Block a user