Compare commits

...

5 Commits

6 changed files with 154 additions and 19 deletions

View File

@ -22,6 +22,10 @@ ambiguous situations.
## Unreleased ## Unreleased
### Fixed
- Forum crawling crashing when parsing empty (= 0 messages) threads
- Forum crawling crashing when a forum has no threads at all
## 3.4.1 - 2022-08-17 ## 3.4.1 - 2022-08-17
### Added ### Added

View File

@ -5,6 +5,8 @@ import os
import sys import sys
from pathlib import Path from pathlib import Path
from PFERD.update import check_for_updates
from .auth import AuthLoadError from .auth import AuthLoadError
from .cli import PARSER, ParserLoadError, load_default_section from .cli import PARSER, ParserLoadError, load_default_section
from .config import Config, ConfigDumpError, ConfigLoadError, ConfigOptionError from .config import Config, ConfigDumpError, ConfigLoadError, ConfigOptionError
@ -134,6 +136,11 @@ def main() -> None:
loop.run_until_complete(asyncio.sleep(1)) loop.run_until_complete(asyncio.sleep(1))
loop.close() loop.close()
else: else:
log.explain_topic("Checking for updates")
if not args.skip_update_check:
asyncio.run(check_for_updates())
else:
log.explain("Update check skipped due to configuration option")
asyncio.run(pferd.run(args.debug_transforms)) asyncio.run(pferd.run(args.debug_transforms))
except (ConfigOptionError, AuthLoadError) as e: except (ConfigOptionError, AuthLoadError) as e:
log.unlock() log.unlock()

View File

@ -151,6 +151,11 @@ PARSER.add_argument(
action="version", action="version",
version=f"{NAME} {VERSION} (https://github.com/Garmelon/PFERD)", version=f"{NAME} {VERSION} (https://github.com/Garmelon/PFERD)",
) )
PARSER.add_argument(
"--skip-update-check",
action="store_true",
help="disable automatic update checks at startup"
)
PARSER.add_argument( PARSER.add_argument(
"--config", "-c", "--config", "-c",
type=Path, type=Path,

View File

@ -59,6 +59,7 @@ class IliasPageElement:
class IliasDownloadForumData: class IliasDownloadForumData:
url: str url: str
form_data: Dict[str, Union[str, List[str]]] form_data: Dict[str, Union[str, List[str]]]
empty: bool
@dataclass @dataclass
@ -130,24 +131,32 @@ class IliasPage:
return None return None
post_url = self._abs_url_from_relative(form["action"]) post_url = self._abs_url_from_relative(form["action"])
thread_ids = [f["value"] for f in form.find_all(attrs={"name": "thread_ids[]"})]
form_data: Dict[str, Union[str, List[ſtr]]] = { form_data: Dict[str, Union[str, List[ſtr]]] = {
"thread_ids[]": [f["value"] for f in form.find_all(attrs={"name": "thread_ids[]"})], "thread_ids[]": thread_ids,
"selected_cmd2": "html", "selected_cmd2": "html",
"select_cmd2": "Ausführen", "select_cmd2": "Ausführen",
"selected_cmd": "", "selected_cmd": "",
} }
return IliasDownloadForumData(post_url, form_data) return IliasDownloadForumData(url=post_url, form_data=form_data, empty=len(thread_ids) == 0)
def get_next_stage_element(self) -> Optional[IliasPageElement]: def get_next_stage_element(self) -> Optional[IliasPageElement]:
if self._is_forum_page(): if self._is_forum_page():
if "trows=800" in self._page_url: if "trows=800" in self._page_url:
return None return None
log.explain("Requesting *all* forum threads")
return self._get_show_max_forum_entries_per_page_url() return self._get_show_max_forum_entries_per_page_url()
if self._is_ilias_opencast_embedding(): if self._is_ilias_opencast_embedding():
log.explain("Unwrapping opencast embedding")
return self.get_child_elements()[0] return self.get_child_elements()[0]
if self._page_type == IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED: if self._page_type == IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED:
log.explain("Unwrapping video pagination")
return self._find_video_entries_paginated()[0] return self._find_video_entries_paginated()[0]
if self._contains_collapsed_future_meetings():
log.explain("Requesting *all* future meetings")
return self._uncollapse_future_meetings_url()
return None return None
def _is_forum_page(self) -> bool: def _is_forum_page(self) -> bool:
@ -200,6 +209,16 @@ class IliasPage:
return False return False
return "target=copa_" in link.get("value") return "target=copa_" in link.get("value")
def _contains_collapsed_future_meetings(self) -> bool:
return self._uncollapse_future_meetings_url() is not None
def _uncollapse_future_meetings_url(self) -> Optional[IliasPageElement]:
element = self._soup.find("a", attrs={"href": lambda x: x and "crs_next_sess=1" in x})
if not element:
return None
link = self._abs_url_from_link(element)
return IliasPageElement(IliasElementType.FOLDER, link, "show all meetings")
def _player_to_video(self) -> List[IliasPageElement]: def _player_to_video(self) -> List[IliasPageElement]:
# Fetch the actual video page. This is a small wrapper page initializing a javscript # Fetch the actual video page. This is a small wrapper page initializing a javscript
# player. Sadly we can not execute that JS. The actual video stream url is nowhere # player. Sadly we can not execute that JS. The actual video stream url is nowhere
@ -790,6 +809,10 @@ class IliasPage:
if img_tag is None: if img_tag is None:
img_tag = found_parent.select_one("img.icon") img_tag = found_parent.select_one("img.icon")
if img_tag is None and found_parent.find("a", attrs={"href": lambda x: x and "crs_next_sess=" in x}):
log.explain("Found session expansion button, skipping it as it has no content")
return None
if img_tag is None: if img_tag is None:
_unexpected_html_warning() _unexpected_html_warning()
log.warn_contd(f"Tried to figure out element type, but did not find an image for {url}") log.warn_contd(f"Tried to figure out element type, but did not find an image for {url}")
@ -937,6 +960,13 @@ def parse_ilias_forum_export(forum_export: BeautifulSoup) -> List[IliasForumThre
for p in forum_export.select("body > p"): for p in forum_export.select("body > p"):
title_tag = p title_tag = p
content_tag = p.find_next_sibling("ul") content_tag = p.find_next_sibling("ul")
if not content_tag:
# ILIAS allows users to delete the initial post while keeping the thread open
# This produces empty threads without *any* content.
# I am not sure why you would want this, but ILIAS makes it easy to do.
continue
title = p.find("b").text title = p.find("b").text
if ":" in title: if ":" in title:
title = title[title.find(":") + 1:] title = title[title.find(":") + 1:]

View File

@ -23,6 +23,12 @@ from .kit_ilias_html import (IliasElementType, IliasForumThread, IliasPage, Ilia
TargetType = Union[str, int] TargetType = Union[str, int]
_ILIAS_URL = "https://ilias.studium.kit.edu"
class KitShibbolethBackgroundLoginSuccessful():
pass
class KitIliasWebCrawlerSection(HttpCrawlerSection): class KitIliasWebCrawlerSection(HttpCrawlerSection):
def target(self) -> TargetType: def target(self) -> TargetType:
@ -36,7 +42,7 @@ class KitIliasWebCrawlerSection(HttpCrawlerSection):
if target == "desktop": if target == "desktop":
# Full personal desktop # Full personal desktop
return target return target
if target.startswith("https://ilias.studium.kit.edu"): if target.startswith(_ILIAS_URL):
# ILIAS URL # ILIAS URL
return target return target
@ -181,7 +187,7 @@ instance's greatest bottleneck.
section.tfa_auth(authenticators), section.tfa_auth(authenticators),
) )
self._base_url = "https://ilias.studium.kit.edu" self._base_url = _ILIAS_URL
self._target = section.target() self._target = section.target()
self._link_file_redirect_delay = section.link_redirect_delay() self._link_file_redirect_delay = section.link_redirect_delay()
@ -228,19 +234,28 @@ instance's greatest bottleneck.
async def gather_elements() -> None: async def gather_elements() -> None:
elements.clear() elements.clear()
async with cl: async with cl:
soup = await self._get_page(url) next_stage_url: Optional[str] = url
current_parent = None
if expected_id is not None:
perma_link_element: Tag = soup.find(id="current_perma_link")
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
# Duplicated code, but the root page is special - we want to avoid fetching it twice! # Duplicated code, but the root page is special - we want to avoid fetching it twice!
log.explain_topic("Parsing root HTML page") while next_stage_url:
log.explain(f"URL: {url}") soup = await self._get_page(next_stage_url)
page = IliasPage(soup, url, None)
elements.extend(page.get_child_elements())
if current_parent is None and expected_id is not None:
perma_link_element: Tag = soup.find(id="current_perma_link")
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
log.explain(f"URL: {next_stage_url}")
page = IliasPage(soup, next_stage_url, current_parent)
if next_element := page.get_next_stage_element():
current_parent = next_element
next_stage_url = next_element.url
else:
next_stage_url = None
elements.extend(page.get_child_elements())
if description_string := page.get_description(): if description_string := page.get_description():
description.append(description_string) description.append(description_string)
@ -658,7 +673,7 @@ instance's greatest bottleneck.
@_iorepeat(3, "crawling forum") @_iorepeat(3, "crawling forum")
@anoncritical @anoncritical
async def _crawl_forum(self, element: IliasPageElement, cl: CrawlToken) -> None: async def _crawl_forum(self, element: IliasPageElement, cl: CrawlToken) -> None:
elements = [] elements: List[IliasForumThread] = []
async with cl: async with cl:
next_stage_url = element.url next_stage_url = element.url
@ -677,6 +692,10 @@ instance's greatest bottleneck.
download_data = page.get_download_forum_data() download_data = page.get_download_forum_data()
if not download_data: if not download_data:
raise CrawlWarning("Failed to extract forum data") raise CrawlWarning("Failed to extract forum data")
if download_data.empty:
log.explain("Forum had no threads")
elements = []
return
html = await self._post_authenticated(download_data.url, download_data.form_data) html = await self._post_authenticated(download_data.url, download_data.form_data)
elements = parse_ilias_forum_export(soupify(html)) elements = parse_ilias_forum_export(soupify(html))
@ -804,14 +823,17 @@ class KitShibbolethLogin:
# Equivalent: Click on "Mit KIT-Account anmelden" button in # Equivalent: Click on "Mit KIT-Account anmelden" button in
# https://ilias.studium.kit.edu/login.php # https://ilias.studium.kit.edu/login.php
url = "https://ilias.studium.kit.edu/shib_login.php" url = f"{_ILIAS_URL}/shib_login.php"
data = { data = {
"sendLogin": "1", "sendLogin": "1",
"idp_selection": "https://idp.scc.kit.edu/idp/shibboleth", "idp_selection": "https://idp.scc.kit.edu/idp/shibboleth",
"il_target": "", "il_target": "",
"home_organization_selection": "Weiter", "home_organization_selection": "Weiter",
} }
soup: BeautifulSoup = await _shib_post(sess, url, data) soup: Union[BeautifulSoup, KitShibbolethBackgroundLoginSuccessful] = await _shib_post(sess, url, data)
if isinstance(soup, KitShibbolethBackgroundLoginSuccessful):
return
# Attempt to login using credentials, if necessary # Attempt to login using credentials, if necessary
while not self._login_successful(soup): while not self._login_successful(soup):
@ -850,7 +872,7 @@ class KitShibbolethLogin:
# (or clicking "Continue" if you have JS disabled) # (or clicking "Continue" if you have JS disabled)
relay_state = soup.find("input", {"name": "RelayState"}) relay_state = soup.find("input", {"name": "RelayState"})
saml_response = soup.find("input", {"name": "SAMLResponse"}) saml_response = soup.find("input", {"name": "SAMLResponse"})
url = "https://ilias.studium.kit.edu/Shibboleth.sso/SAML2/POST" url = f"{_ILIAS_URL}/Shibboleth.sso/SAML2/POST"
data = { # using the info obtained in the while loop above data = { # using the info obtained in the while loop above
"RelayState": relay_state["value"], "RelayState": relay_state["value"],
"SAMLResponse": saml_response["value"], "SAMLResponse": saml_response["value"],
@ -899,22 +921,35 @@ async def _post(session: aiohttp.ClientSession, url: str, data: Any) -> Beautifu
return soupify(await response.read()) return soupify(await response.read())
async def _shib_post(session: aiohttp.ClientSession, url: str, data: Any) -> BeautifulSoup: async def _shib_post(
session: aiohttp.ClientSession,
url: str,
data: Any
) -> Union[BeautifulSoup, KitShibbolethBackgroundLoginSuccessful]:
""" """
aiohttp unescapes '/' and ':' in URL query parameters which is not RFC compliant and rejected aiohttp unescapes '/' and ':' in URL query parameters which is not RFC compliant and rejected
by Shibboleth. Thanks a lot. So now we unroll the requests manually, parse location headers and by Shibboleth. Thanks a lot. So now we unroll the requests manually, parse location headers and
build encoded URL objects ourselves... Who thought mangling location header was a good idea?? build encoded URL objects ourselves... Who thought mangling location header was a good idea??
""" """
log.explain_topic("Shib login POST")
async with session.post(url, data=data, allow_redirects=False) as response: async with session.post(url, data=data, allow_redirects=False) as response:
location = response.headers.get("location") location = response.headers.get("location")
log.explain(f"Got location {location!r}")
if not location: if not location:
raise CrawlWarning(f"Login failed (1), no location header present at {url}") raise CrawlWarning(f"Login failed (1), no location header present at {url}")
correct_url = yarl.URL(location, encoded=True) correct_url = yarl.URL(location, encoded=True)
log.explain(f"Corrected location to {correct_url!r}")
if str(correct_url).startswith(_ILIAS_URL):
log.explain("ILIAS recognized our shib token and logged us in in the background, returning")
return KitShibbolethBackgroundLoginSuccessful()
async with session.get(correct_url, allow_redirects=False) as response: async with session.get(correct_url, allow_redirects=False) as response:
location = response.headers.get("location") location = response.headers.get("location")
log.explain(f"Redirected to {location!r} with status {response.status}")
# If shib still still has a valid session, it will directly respond to the request # If shib still still has a valid session, it will directly respond to the request
if location is None: if location is None:
log.explain("Shib recognized us, returning its response directly")
return soupify(await response.read()) return soupify(await response.read())
as_yarl = yarl.URL(response.url) as_yarl = yarl.URL(response.url)
@ -928,6 +963,7 @@ async def _shib_post(session: aiohttp.ClientSession, url: str, data: Any) -> Bea
path=location, path=location,
encoded=True encoded=True
) )
log.explain(f"Corrected location to {correct_url!r}")
async with session.get(correct_url, allow_redirects=False) as response: async with session.get(correct_url, allow_redirects=False) as response:
return soupify(await response.read()) return soupify(await response.read())

53
PFERD/update.py Normal file
View File

@ -0,0 +1,53 @@
from dataclasses import dataclass
import ssl
from typing import Optional
import aiohttp
import certifi
from .version import NAME, VERSION
from .logging import log
@dataclass
class PferdUpdate:
release_url: str
version: str
def _build_session() -> aiohttp.ClientSession:
return aiohttp.ClientSession(
headers={"User-Agent": f"{NAME}/{VERSION}"},
connector=aiohttp.TCPConnector(ssl=ssl.create_default_context(cafile=certifi.where())),
timeout=aiohttp.ClientTimeout(
total=15 * 60,
connect=10,
sock_connect=10,
sock_read=10,
)
)
async def check_for_updates() -> None:
if new_version := await get_newer_version():
log.warn(
f"{NAME} version out of date. "
+ f"You are running version {VERSION!r} but {new_version.version!r} was found on GitHub."
)
log.warn_contd(f"You can download it on GitHub: {new_version.release_url}")
else:
log.explain("No update found")
async def get_newer_version() -> Optional[PferdUpdate]:
async with _build_session() as session:
async with session.get(
"https://api.github.com/repos/Garmelon/Pferd/releases/latest",
headers={"Accept": "application/vnd.github+json"}
) as response:
release_information = await response.json()
tag_name: str = release_information["tag_name"]
tag_name = tag_name.removeprefix("v")
if VERSION == tag_name:
return None
return PferdUpdate(release_url=release_information["html_url"], version=tag_name)