Compare commits

..

1 Commits

Author SHA1 Message Date
I-Al-Istannen
4af02012bc Strip long path prefix from file links in report 2024-11-14 20:06:13 +01:00
50 changed files with 1346 additions and 3004 deletions

View File

@@ -1 +0,0 @@
2cf0e060ed126537dd993896b6aa793e2a6b9e80

View File

@@ -14,17 +14,23 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest, windows-latest, macos-13, macos-latest] os: [ubuntu-latest, windows-latest, macos-13, macos-latest]
python: ["3.11"] python: ["3.9"]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Install uv - uses: actions/setup-python@v5
uses: astral-sh/setup-uv@v7
with: with:
python-version: ${{ matrix.python }} python-version: ${{ matrix.python }}
- name: Set up project - name: Set up project
run: uv sync if: matrix.os != 'windows-latest'
run: ./scripts/setup
- name: Set up project on windows
if: matrix.os == 'windows-latest'
# For some reason, `pip install --upgrade pip` doesn't work on
# 'windows-latest'. The installed pip version works fine however.
run: ./scripts/setup --no-pip
- name: Run checks - name: Run checks
run: | run: |

View File

@@ -22,61 +22,6 @@ ambiguous situations.
## Unreleased ## Unreleased
## Added
- Store the description when using the `internet-shortcut` link format
- Support for basic auth with the kit-ipd crawler
## Fixed
- Event loop errors on Windows with Python 3.14
- Sanitize `/` in headings in kit-ipd crawler
- Crawl info tab again
## 3.8.3 - 2025-07-01
## Added
- Support for link collections.
In "fancy" mode, a single HTML file with multiple links is generated.
In all other modes, PFERD creates a folder for the collection and a new file
for every link inside.
## Fixed
- Crawling of exercises with instructions
- Don't download unavailable elements.
Elements that are unavailable (for example, because their availability is
time restricted) will not download the HTML for the info page anymore.
- `base_url` argument for `ilias-web` crawler causing crashes
## 3.8.2 - 2025-04-29
## Changed
- Explicitly mention that wikis are not supported at the moment and ignore them
## Fixed
- Ilias-native login
- Exercise crawling
## 3.8.1 - 2025-04-17
## Fixed
- Description html files now specify at UTF-8 encoding
- Images in descriptions now always have a white background
## 3.8.0 - 2025-04-16
### Added
- Support for ILIAS 9
### Changed
- Added prettier CSS to forum threads
- Downloaded forum threads now link to the forum instead of the ILIAS thread
- Increase minimum supported Python version to 3.11
- Do not crawl nested courses (courses linked in other courses)
## Fixed
- File links in report on Windows
- TOTP authentication in KIT Shibboleth
- Forum crawling only considering the first 20 entries
## 3.7.0 - 2024-11-13 ## 3.7.0 - 2024-11-13
### Added ### Added

View File

@@ -153,7 +153,6 @@ requests is likely a good idea.
- `link_regex`: A regex that is matched against the `href` part of links. If it - `link_regex`: A regex that is matched against the `href` part of links. If it
matches, the given link is downloaded as a file. This is used to extract matches, the given link is downloaded as a file. This is used to extract
files from KIT-IPD pages. (Default: `^.*?[^/]+\.(pdf|zip|c|cpp|java)$`) files from KIT-IPD pages. (Default: `^.*?[^/]+\.(pdf|zip|c|cpp|java)$`)
- `auth`: Name of auth section to use for basic authentication. (Optional)
### The `ilias-web` crawler ### The `ilias-web` crawler
@@ -164,15 +163,13 @@ out of the box for the corresponding universities:
[ilias-dl]: https://github.com/V3lop5/ilias-downloader/blob/main/configs "ilias-downloader configs" [ilias-dl]: https://github.com/V3lop5/ilias-downloader/blob/main/configs "ilias-downloader configs"
| University | `base_url` | `login_type` | `client_id` | | University | `base_url` | `login_type` | `client_id` |
|-----------------|-----------------------------------------|--------------|---------------| |---------------|-----------------------------------------|--------------|---------------|
| FH Aachen | https://www.ili.fh-aachen.de | local | elearning | | FH Aachen | https://www.ili.fh-aachen.de | local | elearning |
| HHU Düsseldorf | https://ilias.hhu.de | local | UniRZ | | Uni Köln | https://www.ilias.uni-koeln.de/ilias | local | uk |
| Uni Köln | https://www.ilias.uni-koeln.de/ilias | local | uk | | Uni Konstanz | https://ilias.uni-konstanz.de | local | ILIASKONSTANZ |
| Uni Konstanz | https://ilias.uni-konstanz.de | local | ILIASKONSTANZ | | Uni Stuttgart | https://ilias3.uni-stuttgart.de | local | Uni_Stuttgart |
| Uni Stuttgart | https://ilias3.uni-stuttgart.de | local | Uni_Stuttgart | | Uni Tübingen | https://ovidius.uni-tuebingen.de/ilias3 | shibboleth | |
| Uni Tübingen | https://ovidius.uni-tuebingen.de/ilias3 | shibboleth | |
| KIT ILIAS Pilot | https://pilot.ilias.studium.kit.edu | shibboleth | pilot |
If your university isn't listed, try navigating to your instance's login page. If your university isn't listed, try navigating to your instance's login page.
Assuming no custom login service is used, the URL will look something like this: Assuming no custom login service is used, the URL will look something like this:

21
DEV.md
View File

@@ -9,25 +9,30 @@ particular [this][ppug-1] and [this][ppug-2] guide).
## Setting up a dev environment ## Setting up a dev environment
The use of [venv][venv] and [uv][uv] is recommended. To initially set up a The use of [venv][venv] is recommended. To initially set up a development
development environment, run these commands in the same directory as this file: environment, run these commands in the same directory as this file:
``` ```
$ uv sync $ python -m venv .venv
$ . .venv/bin/activate $ . .venv/bin/activate
$ ./scripts/setup
``` ```
This install all required dependencies and tools. It also installs PFERD as The setup script installs a few required dependencies and tools. It also
*editable*, which means that you can just run `pferd` as if it was installed installs PFERD via `pip install --editable .`, which means that you can just run
normally. Since PFERD was installed with `--editable`, there is no need to `pferd` as if it was installed normally. Since PFERD was installed with
re-run `uv sync` when the source code is changed. `--editable`, there is no need to re-run `pip install` when the source code is
changed.
If you get any errors because pip can't update itself, try running
`./scripts/setup --no-pip` instead of `./scripts/setup`.
For more details, see [this part of the Python Tutorial][venv-tut] and For more details, see [this part of the Python Tutorial][venv-tut] and
[this section on "development mode"][ppug-dev]. [this section on "development mode"][ppug-dev].
[venv]: <https://docs.python.org/3/library/venv.html> "venv - Creation of virtual environments" [venv]: <https://docs.python.org/3/library/venv.html> "venv - Creation of virtual environments"
[venv-tut]: <https://docs.python.org/3/tutorial/venv.html> "12. Virtual Environments and Packages" [venv-tut]: <https://docs.python.org/3/tutorial/venv.html> "12. Virtual Environments and Packages"
[uv]: <https://docs.astral.sh/uv/> "uv - An extremely fast Python package and project manager" [ppug-dev]: <https://packaging.python.org/guides/distributing-packages-using-setuptools/#working-in-development-mode> "Working in “development mode”"
## Checking and formatting the code ## Checking and formatting the code

View File

@@ -133,8 +133,7 @@ def main() -> None:
# https://bugs.python.org/issue39232 # https://bugs.python.org/issue39232
# https://github.com/encode/httpx/issues/914#issuecomment-780023632 # https://github.com/encode/httpx/issues/914#issuecomment-780023632
# TODO Fix this properly # TODO Fix this properly
loop = asyncio.new_event_loop() loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(pferd.run(args.debug_transforms)) loop.run_until_complete(pferd.run(args.debug_transforms))
loop.run_until_complete(asyncio.sleep(1)) loop.run_until_complete(asyncio.sleep(1))
loop.close() loop.close()

View File

@@ -1,5 +1,5 @@
from collections.abc import Callable
from configparser import SectionProxy from configparser import SectionProxy
from typing import Callable, Dict
from ..config import Config from ..config import Config
from .authenticator import Authenticator, AuthError, AuthLoadError, AuthSection # noqa: F401 from .authenticator import Authenticator, AuthError, AuthLoadError, AuthSection # noqa: F401
@@ -9,19 +9,21 @@ from .pass_ import PassAuthenticator, PassAuthSection
from .simple import SimpleAuthenticator, SimpleAuthSection from .simple import SimpleAuthenticator, SimpleAuthSection
from .tfa import TfaAuthenticator from .tfa import TfaAuthenticator
AuthConstructor = Callable[ AuthConstructor = Callable[[
[ str, # Name (without the "auth:" prefix)
str, # Name (without the "auth:" prefix) SectionProxy, # Authenticator's section of global config
SectionProxy, # Authenticator's section of global config Config, # Global config
Config, # Global config ], Authenticator]
],
Authenticator,
]
AUTHENTICATORS: dict[str, AuthConstructor] = { AUTHENTICATORS: Dict[str, AuthConstructor] = {
"credential-file": lambda n, s, c: CredentialFileAuthenticator(n, CredentialFileAuthSection(s), c), "credential-file": lambda n, s, c:
"keyring": lambda n, s, c: KeyringAuthenticator(n, KeyringAuthSection(s)), CredentialFileAuthenticator(n, CredentialFileAuthSection(s), c),
"pass": lambda n, s, c: PassAuthenticator(n, PassAuthSection(s)), "keyring": lambda n, s, c:
"simple": lambda n, s, c: SimpleAuthenticator(n, SimpleAuthSection(s)), KeyringAuthenticator(n, KeyringAuthSection(s)),
"tfa": lambda n, s, c: TfaAuthenticator(n), "pass": lambda n, s, c:
PassAuthenticator(n, PassAuthSection(s)),
"simple": lambda n, s, c:
SimpleAuthenticator(n, SimpleAuthSection(s)),
"tfa": lambda n, s, c:
TfaAuthenticator(n),
} }

View File

@@ -1,4 +1,5 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Tuple
from ..config import Section from ..config import Section
@@ -34,7 +35,7 @@ class Authenticator(ABC):
self.name = name self.name = name
@abstractmethod @abstractmethod
async def credentials(self) -> tuple[str, str]: async def credentials(self) -> Tuple[str, str]:
pass pass
async def username(self) -> str: async def username(self) -> str:

View File

@@ -1,4 +1,5 @@
from pathlib import Path from pathlib import Path
from typing import Tuple
from ..config import Config from ..config import Config
from ..utils import fmt_real_path from ..utils import fmt_real_path
@@ -22,9 +23,7 @@ class CredentialFileAuthenticator(Authenticator):
with open(path, encoding="utf-8") as f: with open(path, encoding="utf-8") as f:
lines = list(f) lines = list(f)
except UnicodeDecodeError: except UnicodeDecodeError:
raise AuthLoadError( raise AuthLoadError(f"Credential file at {fmt_real_path(path)} is not encoded using UTF-8")
f"Credential file at {fmt_real_path(path)} is not encoded using UTF-8"
) from None
except OSError as e: except OSError as e:
raise AuthLoadError(f"No credential file at {fmt_real_path(path)}") from e raise AuthLoadError(f"No credential file at {fmt_real_path(path)}") from e
@@ -43,5 +42,5 @@ class CredentialFileAuthenticator(Authenticator):
self._username = uline[9:] self._username = uline[9:]
self._password = pline[9:] self._password = pline[9:]
async def credentials(self) -> tuple[str, str]: async def credentials(self) -> Tuple[str, str]:
return self._username, self._password return self._username, self._password

View File

@@ -1,4 +1,4 @@
from typing import Optional from typing import Optional, Tuple
import keyring import keyring
@@ -17,6 +17,7 @@ class KeyringAuthSection(AuthSection):
class KeyringAuthenticator(Authenticator): class KeyringAuthenticator(Authenticator):
def __init__(self, name: str, section: KeyringAuthSection) -> None: def __init__(self, name: str, section: KeyringAuthSection) -> None:
super().__init__(name) super().__init__(name)
@@ -27,7 +28,7 @@ class KeyringAuthenticator(Authenticator):
self._password_invalidated = False self._password_invalidated = False
self._username_fixed = section.username() is not None self._username_fixed = section.username() is not None
async def credentials(self) -> tuple[str, str]: async def credentials(self) -> Tuple[str, str]:
# Request the username # Request the username
if self._username is None: if self._username is None:
async with log.exclusive_output(): async with log.exclusive_output():

View File

@@ -1,5 +1,6 @@
import re import re
import subprocess import subprocess
from typing import List, Tuple
from ..logging import log from ..logging import log
from .authenticator import Authenticator, AuthError, AuthSection from .authenticator import Authenticator, AuthError, AuthSection
@@ -11,11 +12,11 @@ class PassAuthSection(AuthSection):
self.missing_value("passname") self.missing_value("passname")
return value return value
def username_prefixes(self) -> list[str]: def username_prefixes(self) -> List[str]:
value = self.s.get("username_prefixes", "login,username,user") value = self.s.get("username_prefixes", "login,username,user")
return [prefix.lower() for prefix in value.split(",")] return [prefix.lower() for prefix in value.split(",")]
def password_prefixes(self) -> list[str]: def password_prefixes(self) -> List[str]:
value = self.s.get("password_prefixes", "password,pass,secret") value = self.s.get("password_prefixes", "password,pass,secret")
return [prefix.lower() for prefix in value.split(",")] return [prefix.lower() for prefix in value.split(",")]
@@ -30,14 +31,14 @@ class PassAuthenticator(Authenticator):
self._username_prefixes = section.username_prefixes() self._username_prefixes = section.username_prefixes()
self._password_prefixes = section.password_prefixes() self._password_prefixes = section.password_prefixes()
async def credentials(self) -> tuple[str, str]: async def credentials(self) -> Tuple[str, str]:
log.explain_topic("Obtaining credentials from pass") log.explain_topic("Obtaining credentials from pass")
try: try:
log.explain(f"Calling 'pass show {self._passname}'") log.explain(f"Calling 'pass show {self._passname}'")
result = subprocess.check_output(["pass", "show", self._passname], text=True) result = subprocess.check_output(["pass", "show", self._passname], text=True)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
raise AuthError(f"Failed to get password info from {self._passname}: {e}") from e raise AuthError(f"Failed to get password info from {self._passname}: {e}")
prefixed = {} prefixed = {}
unprefixed = [] unprefixed = []

View File

@@ -1,4 +1,4 @@
from typing import Optional from typing import Optional, Tuple
from ..logging import log from ..logging import log
from ..utils import agetpass, ainput from ..utils import agetpass, ainput
@@ -23,7 +23,7 @@ class SimpleAuthenticator(Authenticator):
self._username_fixed = self.username is not None self._username_fixed = self.username is not None
self._password_fixed = self.password is not None self._password_fixed = self.password is not None
async def credentials(self) -> tuple[str, str]: async def credentials(self) -> Tuple[str, str]:
if self._username is not None and self._password is not None: if self._username is not None and self._password is not None:
return self._username, self._password return self._username, self._password

View File

@@ -1,3 +1,5 @@
from typing import Tuple
from ..logging import log from ..logging import log
from ..utils import ainput from ..utils import ainput
from .authenticator import Authenticator, AuthError from .authenticator import Authenticator, AuthError
@@ -15,7 +17,7 @@ class TfaAuthenticator(Authenticator):
code = await ainput("TFA code: ") code = await ainput("TFA code: ")
return code return code
async def credentials(self) -> tuple[str, str]: async def credentials(self) -> Tuple[str, str]:
raise AuthError("TFA authenticator does not support usernames") raise AuthError("TFA authenticator does not support usernames")
def invalidate_username(self) -> None: def invalidate_username(self) -> None:

View File

@@ -21,20 +21,23 @@ GROUP.add_argument(
"--base-url", "--base-url",
type=str, type=str,
metavar="BASE_URL", metavar="BASE_URL",
help="The base url of the ilias instance", help="The base url of the ilias instance"
) )
GROUP.add_argument( GROUP.add_argument(
"--client-id", "--client-id",
type=str, type=str,
metavar="CLIENT_ID", metavar="CLIENT_ID",
help="The client id of the ilias instance", help="The client id of the ilias instance"
) )
configure_common_group_args(GROUP) configure_common_group_args(GROUP)
def load(args: argparse.Namespace, parser: configparser.ConfigParser) -> None: def load(
args: argparse.Namespace,
parser: configparser.ConfigParser,
) -> None:
log.explain(f"Creating config for command '{COMMAND_NAME}'") log.explain(f"Creating config for command '{COMMAND_NAME}'")
parser["crawl:ilias"] = {} parser["crawl:ilias"] = {}
@@ -42,8 +45,8 @@ def load(args: argparse.Namespace, parser: configparser.ConfigParser) -> None:
load_crawler(args, section) load_crawler(args, section)
section["type"] = COMMAND_NAME section["type"] = COMMAND_NAME
if args.base_url is not None: if args.ilias_url is not None:
section["base_url"] = args.base_url section["base_url"] = args.ilias_url
if args.client_id is not None: if args.client_id is not None:
section["client_id"] = args.client_id section["client_id"] = args.client_id

View File

@@ -21,8 +21,8 @@ configure_common_group_args(GROUP)
def load( def load(
args: argparse.Namespace, args: argparse.Namespace,
parser: configparser.ConfigParser, parser: configparser.ConfigParser,
) -> None: ) -> None:
log.explain(f"Creating config for command '{COMMAND_NAME}'") log.explain(f"Creating config for command '{COMMAND_NAME}'")

View File

@@ -18,30 +18,25 @@ GROUP.add_argument(
"--link-regex", "--link-regex",
type=str, type=str,
metavar="REGEX", metavar="REGEX",
help="href-matching regex to identify downloadable files", help="href-matching regex to identify downloadable files"
)
GROUP.add_argument(
"--basic-auth",
action="store_true",
help="enable basic authentication",
) )
GROUP.add_argument( GROUP.add_argument(
"target", "target",
type=str, type=str,
metavar="TARGET", metavar="TARGET",
help="url to crawl", help="url to crawl"
) )
GROUP.add_argument( GROUP.add_argument(
"output", "output",
type=Path, type=Path,
metavar="OUTPUT", metavar="OUTPUT",
help="output directory", help="output directory"
) )
def load( def load(
args: argparse.Namespace, args: argparse.Namespace,
parser: configparser.ConfigParser, parser: configparser.ConfigParser,
) -> None: ) -> None:
log.explain("Creating config for command 'kit-ipd'") log.explain("Creating config for command 'kit-ipd'")
@@ -55,11 +50,5 @@ def load(
if args.link_regex: if args.link_regex:
section["link_regex"] = str(args.link_regex) section["link_regex"] = str(args.link_regex)
if args.basic_auth:
section["auth"] = "auth:kit-ipd"
parser["auth:kit-ipd"] = {}
auth_section = parser["auth:kit-ipd"]
auth_section["type"] = "simple"
SUBPARSER.set_defaults(command=load) SUBPARSER.set_defaults(command=load)

View File

@@ -18,37 +18,37 @@ GROUP.add_argument(
"target", "target",
type=Path, type=Path,
metavar="TARGET", metavar="TARGET",
help="directory to crawl", help="directory to crawl"
) )
GROUP.add_argument( GROUP.add_argument(
"output", "output",
type=Path, type=Path,
metavar="OUTPUT", metavar="OUTPUT",
help="output directory", help="output directory"
) )
GROUP.add_argument( GROUP.add_argument(
"--crawl-delay", "--crawl-delay",
type=float, type=float,
metavar="SECONDS", metavar="SECONDS",
help="artificial delay to simulate for crawl requests", help="artificial delay to simulate for crawl requests"
) )
GROUP.add_argument( GROUP.add_argument(
"--download-delay", "--download-delay",
type=float, type=float,
metavar="SECONDS", metavar="SECONDS",
help="artificial delay to simulate for download requests", help="artificial delay to simulate for download requests"
) )
GROUP.add_argument( GROUP.add_argument(
"--download-speed", "--download-speed",
type=int, type=int,
metavar="BYTES_PER_SECOND", metavar="BYTES_PER_SECOND",
help="download speed to simulate", help="download speed to simulate"
) )
def load( def load(
args: argparse.Namespace, args: argparse.Namespace,
parser: configparser.ConfigParser, parser: configparser.ConfigParser,
) -> None: ) -> None:
log.explain("Creating config for command 'local'") log.explain("Creating config for command 'local'")

View File

@@ -12,60 +12,58 @@ def configure_common_group_args(group: argparse._ArgumentGroup) -> None:
"target", "target",
type=str, type=str,
metavar="TARGET", metavar="TARGET",
help="course id, 'desktop', or ILIAS URL to crawl", help="course id, 'desktop', or ILIAS URL to crawl"
) )
group.add_argument( group.add_argument(
"output", "output",
type=Path, type=Path,
metavar="OUTPUT", metavar="OUTPUT",
help="output directory", help="output directory"
) )
group.add_argument( group.add_argument(
"--username", "--username", "-u",
"-u",
type=str, type=str,
metavar="USERNAME", metavar="USERNAME",
help="user name for authentication", help="user name for authentication"
) )
group.add_argument( group.add_argument(
"--keyring", "--keyring",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="use the system keyring to store and retrieve passwords", help="use the system keyring to store and retrieve passwords"
) )
group.add_argument( group.add_argument(
"--credential-file", "--credential-file",
type=Path, type=Path,
metavar="PATH", metavar="PATH",
help="read username and password from a credential file", help="read username and password from a credential file"
) )
group.add_argument( group.add_argument(
"--links", "--links",
type=show_value_error(Links.from_string), type=show_value_error(Links.from_string),
metavar="OPTION", metavar="OPTION",
help="how to represent external links", help="how to represent external links"
) )
group.add_argument( group.add_argument(
"--link-redirect-delay", "--link-redirect-delay",
type=int, type=int,
metavar="SECONDS", metavar="SECONDS",
help="time before 'fancy' links redirect to to their target (-1 to disable)", help="time before 'fancy' links redirect to to their target (-1 to disable)"
) )
group.add_argument( group.add_argument(
"--videos", "--videos",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="crawl and download videos", help="crawl and download videos"
) )
group.add_argument( group.add_argument(
"--forums", "--forums",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="crawl and download forum posts", help="crawl and download forum posts"
) )
group.add_argument( group.add_argument(
"--http-timeout", "--http-timeout", "-t",
"-t",
type=float, type=float,
metavar="SECONDS", metavar="SECONDS",
help="timeout for all HTTP requests", help="timeout for all HTTP requests"
) )

View File

@@ -1,9 +1,8 @@
import argparse import argparse
import configparser import configparser
from argparse import ArgumentTypeError from argparse import ArgumentTypeError
from collections.abc import Callable, Sequence
from pathlib import Path from pathlib import Path
from typing import Any, Optional from typing import Any, Callable, List, Optional, Sequence, Union
from ..output_dir import OnConflict, Redownload from ..output_dir import OnConflict, Redownload
from ..version import NAME, VERSION from ..version import NAME, VERSION
@@ -16,15 +15,15 @@ class ParserLoadError(Exception):
# TODO Replace with argparse version when updating to 3.9? # TODO Replace with argparse version when updating to 3.9?
class BooleanOptionalAction(argparse.Action): class BooleanOptionalAction(argparse.Action):
def __init__( def __init__(
self, self,
option_strings: list[str], option_strings: List[str],
dest: Any, dest: Any,
default: Any = None, default: Any = None,
type: Any = None, type: Any = None,
choices: Any = None, choices: Any = None,
required: Any = False, required: Any = False,
help: Any = None, help: Any = None,
metavar: Any = None, metavar: Any = None,
): ):
if len(option_strings) != 1: if len(option_strings) != 1:
raise ValueError("There must be exactly one option string") raise ValueError("There must be exactly one option string")
@@ -49,11 +48,11 @@ class BooleanOptionalAction(argparse.Action):
) )
def __call__( def __call__(
self, self,
parser: argparse.ArgumentParser, parser: argparse.ArgumentParser,
namespace: argparse.Namespace, namespace: argparse.Namespace,
values: str | Sequence[Any] | None, values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None, option_string: Optional[str] = None,
) -> None: ) -> None:
if option_string and option_string in self.option_strings: if option_string and option_string in self.option_strings:
value = not option_string.startswith("--no-") value = not option_string.startswith("--no-")
@@ -68,13 +67,11 @@ def show_value_error(inner: Callable[[str], Any]) -> Callable[[str], Any]:
Some validation functions (like the from_string in our enums) raise a ValueError. Some validation functions (like the from_string in our enums) raise a ValueError.
Argparse only pretty-prints ArgumentTypeErrors though, so we need to wrap our ValueErrors. Argparse only pretty-prints ArgumentTypeErrors though, so we need to wrap our ValueErrors.
""" """
def wrapper(input: str) -> Any: def wrapper(input: str) -> Any:
try: try:
return inner(input) return inner(input)
except ValueError as e: except ValueError as e:
raise ArgumentTypeError(e) from e raise ArgumentTypeError(e)
return wrapper return wrapper
@@ -84,57 +81,52 @@ CRAWLER_PARSER_GROUP = CRAWLER_PARSER.add_argument_group(
description="arguments common to all crawlers", description="arguments common to all crawlers",
) )
CRAWLER_PARSER_GROUP.add_argument( CRAWLER_PARSER_GROUP.add_argument(
"--redownload", "--redownload", "-r",
"-r",
type=show_value_error(Redownload.from_string), type=show_value_error(Redownload.from_string),
metavar="OPTION", metavar="OPTION",
help="when to download a file that's already present locally", help="when to download a file that's already present locally"
) )
CRAWLER_PARSER_GROUP.add_argument( CRAWLER_PARSER_GROUP.add_argument(
"--on-conflict", "--on-conflict",
type=show_value_error(OnConflict.from_string), type=show_value_error(OnConflict.from_string),
metavar="OPTION", metavar="OPTION",
help="what to do when local and remote files or directories differ", help="what to do when local and remote files or directories differ"
) )
CRAWLER_PARSER_GROUP.add_argument( CRAWLER_PARSER_GROUP.add_argument(
"--transform", "--transform", "-T",
"-T",
action="append", action="append",
type=str, type=str,
metavar="RULE", metavar="RULE",
help="add a single transformation rule. Can be specified multiple times", help="add a single transformation rule. Can be specified multiple times"
) )
CRAWLER_PARSER_GROUP.add_argument( CRAWLER_PARSER_GROUP.add_argument(
"--tasks", "--tasks", "-n",
"-n",
type=int, type=int,
metavar="N", metavar="N",
help="maximum number of concurrent tasks (crawling, downloading)", help="maximum number of concurrent tasks (crawling, downloading)"
) )
CRAWLER_PARSER_GROUP.add_argument( CRAWLER_PARSER_GROUP.add_argument(
"--downloads", "--downloads", "-N",
"-N",
type=int, type=int,
metavar="N", metavar="N",
help="maximum number of tasks that may download data at the same time", help="maximum number of tasks that may download data at the same time"
) )
CRAWLER_PARSER_GROUP.add_argument( CRAWLER_PARSER_GROUP.add_argument(
"--task-delay", "--task-delay", "-d",
"-d",
type=float, type=float,
metavar="SECONDS", metavar="SECONDS",
help="time the crawler should wait between subsequent tasks", help="time the crawler should wait between subsequent tasks"
) )
CRAWLER_PARSER_GROUP.add_argument( CRAWLER_PARSER_GROUP.add_argument(
"--windows-paths", "--windows-paths",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="whether to repair invalid paths on windows", help="whether to repair invalid paths on windows"
) )
def load_crawler( def load_crawler(
args: argparse.Namespace, args: argparse.Namespace,
section: configparser.SectionProxy, section: configparser.SectionProxy,
) -> None: ) -> None:
if args.redownload is not None: if args.redownload is not None:
section["redownload"] = args.redownload.value section["redownload"] = args.redownload.value
@@ -160,79 +152,79 @@ PARSER.add_argument(
version=f"{NAME} {VERSION} (https://github.com/Garmelon/PFERD)", version=f"{NAME} {VERSION} (https://github.com/Garmelon/PFERD)",
) )
PARSER.add_argument( PARSER.add_argument(
"--config", "--config", "-c",
"-c",
type=Path, type=Path,
metavar="PATH", metavar="PATH",
help="custom config file", help="custom config file"
) )
PARSER.add_argument( PARSER.add_argument(
"--dump-config", "--dump-config",
action="store_true", action="store_true",
help="dump current configuration to the default config path and exit", help="dump current configuration to the default config path and exit"
) )
PARSER.add_argument( PARSER.add_argument(
"--dump-config-to", "--dump-config-to",
metavar="PATH", metavar="PATH",
help="dump current configuration to a file and exit. Use '-' as path to print to stdout instead", help="dump current configuration to a file and exit."
" Use '-' as path to print to stdout instead"
) )
PARSER.add_argument( PARSER.add_argument(
"--debug-transforms", "--debug-transforms",
action="store_true", action="store_true",
help="apply transform rules to files of previous run", help="apply transform rules to files of previous run"
) )
PARSER.add_argument( PARSER.add_argument(
"--crawler", "--crawler", "-C",
"-C",
action="append", action="append",
type=str, type=str,
metavar="NAME", metavar="NAME",
help="only execute a single crawler. Can be specified multiple times to execute multiple crawlers", help="only execute a single crawler."
" Can be specified multiple times to execute multiple crawlers"
) )
PARSER.add_argument( PARSER.add_argument(
"--skip", "--skip", "-S",
"-S",
action="append", action="append",
type=str, type=str,
metavar="NAME", metavar="NAME",
help="don't execute this particular crawler. Can be specified multiple times to skip multiple crawlers", help="don't execute this particular crawler."
" Can be specified multiple times to skip multiple crawlers"
) )
PARSER.add_argument( PARSER.add_argument(
"--working-dir", "--working-dir",
type=Path, type=Path,
metavar="PATH", metavar="PATH",
help="custom working directory", help="custom working directory"
) )
PARSER.add_argument( PARSER.add_argument(
"--explain", "--explain",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="log and explain in detail what PFERD is doing", help="log and explain in detail what PFERD is doing"
) )
PARSER.add_argument( PARSER.add_argument(
"--status", "--status",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="print status updates while PFERD is crawling", help="print status updates while PFERD is crawling"
) )
PARSER.add_argument( PARSER.add_argument(
"--report", "--report",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="print a report of all local changes before exiting", help="print a report of all local changes before exiting"
) )
PARSER.add_argument( PARSER.add_argument(
"--share-cookies", "--share-cookies",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="whether crawlers should share cookies where applicable", help="whether crawlers should share cookies where applicable"
) )
PARSER.add_argument( PARSER.add_argument(
"--show-not-deleted", "--show-not-deleted",
action=BooleanOptionalAction, action=BooleanOptionalAction,
help="print messages in status and report when PFERD did not delete a local only file", help="print messages in status and report when PFERD did not delete a local only file"
) )
def load_default_section( def load_default_section(
args: argparse.Namespace, args: argparse.Namespace,
parser: configparser.ConfigParser, parser: configparser.ConfigParser,
) -> None: ) -> None:
section = parser[parser.default_section] section = parser[parser.default_section]

View File

@@ -3,7 +3,7 @@ import os
import sys import sys
from configparser import ConfigParser, SectionProxy from configparser import ConfigParser, SectionProxy
from pathlib import Path from pathlib import Path
from typing import Any, NoReturn, Optional from typing import Any, List, NoReturn, Optional, Tuple
from rich.markup import escape from rich.markup import escape
@@ -53,10 +53,10 @@ class Section:
raise ConfigOptionError(self.s.name, key, desc) raise ConfigOptionError(self.s.name, key, desc)
def invalid_value( def invalid_value(
self, self,
key: str, key: str,
value: Any, value: Any,
reason: Optional[str], reason: Optional[str],
) -> NoReturn: ) -> NoReturn:
if reason is None: if reason is None:
self.error(key, f"Invalid value {value!r}") self.error(key, f"Invalid value {value!r}")
@@ -126,13 +126,13 @@ class Config:
with open(path, encoding="utf-8") as f: with open(path, encoding="utf-8") as f:
parser.read_file(f, source=str(path)) parser.read_file(f, source=str(path))
except FileNotFoundError: except FileNotFoundError:
raise ConfigLoadError(path, "File does not exist") from None raise ConfigLoadError(path, "File does not exist")
except IsADirectoryError: except IsADirectoryError:
raise ConfigLoadError(path, "That's a directory, not a file") from None raise ConfigLoadError(path, "That's a directory, not a file")
except PermissionError: except PermissionError:
raise ConfigLoadError(path, "Insufficient permissions") from None raise ConfigLoadError(path, "Insufficient permissions")
except UnicodeDecodeError: except UnicodeDecodeError:
raise ConfigLoadError(path, "File is not encoded using UTF-8") from None raise ConfigLoadError(path, "File is not encoded using UTF-8")
def dump(self, path: Optional[Path] = None) -> None: def dump(self, path: Optional[Path] = None) -> None:
""" """
@@ -150,8 +150,8 @@ class Config:
try: try:
path.parent.mkdir(parents=True, exist_ok=True) path.parent.mkdir(parents=True, exist_ok=True)
except PermissionError as e: except PermissionError:
raise ConfigDumpError(path, "Could not create parent directory") from e raise ConfigDumpError(path, "Could not create parent directory")
try: try:
# Ensuring we don't accidentally overwrite any existing files by # Ensuring we don't accidentally overwrite any existing files by
@@ -167,16 +167,16 @@ class Config:
with open(path, "w", encoding="utf-8") as f: with open(path, "w", encoding="utf-8") as f:
self._parser.write(f) self._parser.write(f)
else: else:
raise ConfigDumpError(path, "File already exists") from None raise ConfigDumpError(path, "File already exists")
except IsADirectoryError: except IsADirectoryError:
raise ConfigDumpError(path, "That's a directory, not a file") from None raise ConfigDumpError(path, "That's a directory, not a file")
except PermissionError as e: except PermissionError:
raise ConfigDumpError(path, "Insufficient permissions") from e raise ConfigDumpError(path, "Insufficient permissions")
def dump_to_stdout(self) -> None: def dump_to_stdout(self) -> None:
self._parser.write(sys.stdout) self._parser.write(sys.stdout)
def crawl_sections(self) -> list[tuple[str, SectionProxy]]: def crawl_sections(self) -> List[Tuple[str, SectionProxy]]:
result = [] result = []
for name, proxy in self._parser.items(): for name, proxy in self._parser.items():
if name.startswith("crawl:"): if name.startswith("crawl:"):
@@ -184,7 +184,7 @@ class Config:
return result return result
def auth_sections(self) -> list[tuple[str, SectionProxy]]: def auth_sections(self) -> List[Tuple[str, SectionProxy]]:
result = [] result = []
for name, proxy in self._parser.items(): for name, proxy in self._parser.items():
if name.startswith("auth:"): if name.startswith("auth:"):

View File

@@ -1,5 +1,5 @@
from collections.abc import Callable
from configparser import SectionProxy from configparser import SectionProxy
from typing import Callable, Dict
from ..auth import Authenticator from ..auth import Authenticator
from ..config import Config from ..config import Config
@@ -8,19 +8,20 @@ from .ilias import IliasWebCrawler, IliasWebCrawlerSection, KitIliasWebCrawler,
from .kit_ipd_crawler import KitIpdCrawler, KitIpdCrawlerSection from .kit_ipd_crawler import KitIpdCrawler, KitIpdCrawlerSection
from .local_crawler import LocalCrawler, LocalCrawlerSection from .local_crawler import LocalCrawler, LocalCrawlerSection
CrawlerConstructor = Callable[ CrawlerConstructor = Callable[[
[ str, # Name (without the "crawl:" prefix)
str, # Name (without the "crawl:" prefix) SectionProxy, # Crawler's section of global config
SectionProxy, # Crawler's section of global config Config, # Global config
Config, # Global config Dict[str, Authenticator], # Loaded authenticators by name
dict[str, Authenticator], # Loaded authenticators by name ], Crawler]
],
Crawler,
]
CRAWLERS: dict[str, CrawlerConstructor] = { CRAWLERS: Dict[str, CrawlerConstructor] = {
"local": lambda n, s, c, a: LocalCrawler(n, LocalCrawlerSection(s), c), "local": lambda n, s, c, a:
"ilias-web": lambda n, s, c, a: IliasWebCrawler(n, IliasWebCrawlerSection(s), c, a), LocalCrawler(n, LocalCrawlerSection(s), c),
"kit-ilias-web": lambda n, s, c, a: KitIliasWebCrawler(n, KitIliasWebCrawlerSection(s), c, a), "ilias-web": lambda n, s, c, a:
"kit-ipd": lambda n, s, c, a: KitIpdCrawler(n, KitIpdCrawlerSection(s), c, a), IliasWebCrawler(n, IliasWebCrawlerSection(s), c, a),
"kit-ilias-web": lambda n, s, c, a:
KitIliasWebCrawler(n, KitIliasWebCrawlerSection(s), c, a),
"kit-ipd": lambda n, s, c, a:
KitIpdCrawler(n, KitIpdCrawlerSection(s), c),
} }

View File

@@ -1,10 +1,10 @@
import asyncio import asyncio
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections.abc import Awaitable, Callable, Coroutine, Sequence from collections.abc import Awaitable, Coroutine
from datetime import datetime from datetime import datetime
from pathlib import Path, PurePath from pathlib import Path, PurePath
from typing import Any, Optional, TypeVar from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, TypeVar
from ..auth import Authenticator from ..auth import Authenticator
from ..config import Config, Section from ..config import Config, Section
@@ -116,7 +116,7 @@ class CrawlToken(ReusableAsyncContextManager[ProgressBar]):
return bar return bar
class DownloadToken(ReusableAsyncContextManager[tuple[ProgressBar, FileSink]]): class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]):
def __init__(self, limiter: Limiter, fs_token: FileSinkToken, path: PurePath): def __init__(self, limiter: Limiter, fs_token: FileSinkToken, path: PurePath):
super().__init__() super().__init__()
@@ -128,13 +128,12 @@ class DownloadToken(ReusableAsyncContextManager[tuple[ProgressBar, FileSink]]):
def path(self) -> PurePath: def path(self) -> PurePath:
return self._path return self._path
async def _on_aenter(self) -> tuple[ProgressBar, FileSink]: async def _on_aenter(self) -> Tuple[ProgressBar, FileSink]:
await self._stack.enter_async_context(self._limiter.limit_download()) await self._stack.enter_async_context(self._limiter.limit_download())
sink = await self._stack.enter_async_context(self._fs_token) sink = await self._stack.enter_async_context(self._fs_token)
# The "Downloaded ..." message is printed in the output dir, not here # The "Downloaded ..." message is printed in the output dir, not here
bar = self._stack.enter_context( bar = self._stack.enter_context(log.download_bar("[bold bright_cyan]", "Downloading",
log.download_bar("[bold bright_cyan]", "Downloading", fmt_path(self._path)) fmt_path(self._path)))
)
return bar, sink return bar, sink
@@ -150,7 +149,9 @@ class CrawlerSection(Section):
return self.s.getboolean("skip", fallback=False) return self.s.getboolean("skip", fallback=False)
def output_dir(self, name: str) -> Path: def output_dir(self, name: str) -> Path:
name = name.removeprefix("crawl:") # TODO Use removeprefix() after switching to 3.9
if name.startswith("crawl:"):
name = name[len("crawl:"):]
return Path(self.s.get("output_dir", name)).expanduser() return Path(self.s.get("output_dir", name)).expanduser()
def redownload(self) -> Redownload: def redownload(self) -> Redownload:
@@ -205,7 +206,7 @@ class CrawlerSection(Section):
on_windows = os.name == "nt" on_windows = os.name == "nt"
return self.s.getboolean("windows_paths", fallback=on_windows) return self.s.getboolean("windows_paths", fallback=on_windows)
def auth(self, authenticators: dict[str, Authenticator]) -> Authenticator: def auth(self, authenticators: Dict[str, Authenticator]) -> Authenticator:
value = self.s.get("auth") value = self.s.get("auth")
if value is None: if value is None:
self.missing_value("auth") self.missing_value("auth")
@@ -217,10 +218,10 @@ class CrawlerSection(Section):
class Crawler(ABC): class Crawler(ABC):
def __init__( def __init__(
self, self,
name: str, name: str,
section: CrawlerSection, section: CrawlerSection,
config: Config, config: Config,
) -> None: ) -> None:
""" """
Initialize a crawler from its name and its section in the config file. Initialize a crawler from its name and its section in the config file.
@@ -262,7 +263,7 @@ class Crawler(ABC):
return self._output_dir return self._output_dir
@staticmethod @staticmethod
async def gather(awaitables: Sequence[Awaitable[Any]]) -> list[Any]: async def gather(awaitables: Sequence[Awaitable[Any]]) -> List[Any]:
""" """
Similar to asyncio.gather. However, in the case of an exception, all Similar to asyncio.gather. However, in the case of an exception, all
still running tasks are cancelled and the exception is rethrown. still running tasks are cancelled and the exception is rethrown.
@@ -293,39 +294,14 @@ class Crawler(ABC):
log.explain("Answer: Yes") log.explain("Answer: Yes")
return CrawlToken(self._limiter, path) return CrawlToken(self._limiter, path)
def should_try_download(
self,
path: PurePath,
*,
etag_differs: Optional[bool] = None,
mtime: Optional[datetime] = None,
redownload: Optional[Redownload] = None,
on_conflict: Optional[OnConflict] = None,
) -> bool:
log.explain_topic(f"Decision: Should Download {fmt_path(path)}")
if self._transformer.transform(path) is None:
log.explain("Answer: No (ignored)")
return False
should_download = self._output_dir.should_try_download(
path, etag_differs=etag_differs, mtime=mtime, redownload=redownload, on_conflict=on_conflict
)
if should_download:
log.explain("Answer: Yes")
return True
else:
log.explain("Answer: No")
return False
async def download( async def download(
self, self,
path: PurePath, path: PurePath,
*, *,
etag_differs: Optional[bool] = None, etag_differs: Optional[bool] = None,
mtime: Optional[datetime] = None, mtime: Optional[datetime] = None,
redownload: Optional[Redownload] = None, redownload: Optional[Redownload] = None,
on_conflict: Optional[OnConflict] = None, on_conflict: Optional[OnConflict] = None,
) -> Optional[DownloadToken]: ) -> Optional[DownloadToken]:
log.explain_topic(f"Decision: Download {fmt_path(path)}") log.explain_topic(f"Decision: Download {fmt_path(path)}")
path = self._deduplicator.mark(path) path = self._deduplicator.mark(path)
@@ -343,7 +319,7 @@ class Crawler(ABC):
etag_differs=etag_differs, etag_differs=etag_differs,
mtime=mtime, mtime=mtime,
redownload=redownload, redownload=redownload,
on_conflict=on_conflict, on_conflict=on_conflict
) )
if fs_token is None: if fs_token is None:
log.explain("Answer: No") log.explain("Answer: No")
@@ -394,7 +370,7 @@ class Crawler(ABC):
log.warn("Couldn't find or load old report") log.warn("Couldn't find or load old report")
return return
seen: set[PurePath] = set() seen: Set[PurePath] = set()
for known in sorted(self.prev_report.found_paths): for known in sorted(self.prev_report.found_paths):
looking_at = list(reversed(known.parents)) + [known] looking_at = list(reversed(known.parents)) + [known]
for path in looking_at: for path in looking_at:

View File

@@ -3,7 +3,7 @@ import http.cookies
import ssl import ssl
from datetime import datetime from datetime import datetime
from pathlib import Path, PurePath from pathlib import Path, PurePath
from typing import Any, Optional from typing import Any, Dict, List, Optional, Tuple
import aiohttp import aiohttp
import certifi import certifi
@@ -13,7 +13,7 @@ from bs4 import Tag
from ..auth import Authenticator from ..auth import Authenticator
from ..config import Config from ..config import Config
from ..logging import log from ..logging import log
from ..utils import fmt_real_path, sanitize_path_name from ..utils import fmt_real_path
from ..version import NAME, VERSION from ..version import NAME, VERSION
from .crawler import Crawler, CrawlerSection from .crawler import Crawler, CrawlerSection
@@ -22,18 +22,18 @@ ETAGS_CUSTOM_REPORT_VALUE_KEY = "etags"
class HttpCrawlerSection(CrawlerSection): class HttpCrawlerSection(CrawlerSection):
def http_timeout(self) -> float: def http_timeout(self) -> float:
return self.s.getfloat("http_timeout", fallback=30) return self.s.getfloat("http_timeout", fallback=20)
class HttpCrawler(Crawler): class HttpCrawler(Crawler):
COOKIE_FILE = PurePath(".cookies") COOKIE_FILE = PurePath(".cookies")
def __init__( def __init__(
self, self,
name: str, name: str,
section: HttpCrawlerSection, section: HttpCrawlerSection,
config: Config, config: Config,
shared_auth: Optional[Authenticator] = None, shared_auth: Optional[Authenticator] = None,
) -> None: ) -> None:
super().__init__(name, section, config) super().__init__(name, section, config)
@@ -43,7 +43,7 @@ class HttpCrawler(Crawler):
self._http_timeout = section.http_timeout() self._http_timeout = section.http_timeout()
self._cookie_jar_path = self._output_dir.resolve(self.COOKIE_FILE) self._cookie_jar_path = self._output_dir.resolve(self.COOKIE_FILE)
self._shared_cookie_jar_paths: Optional[list[Path]] = None self._shared_cookie_jar_paths: Optional[List[Path]] = None
self._shared_auth = shared_auth self._shared_auth = shared_auth
self._output_dir.register_reserved(self.COOKIE_FILE) self._output_dir.register_reserved(self.COOKIE_FILE)
@@ -98,7 +98,7 @@ class HttpCrawler(Crawler):
""" """
raise RuntimeError("_authenticate() was called but crawler doesn't provide an implementation") raise RuntimeError("_authenticate() was called but crawler doesn't provide an implementation")
def share_cookies(self, shared: dict[Authenticator, list[Path]]) -> None: def share_cookies(self, shared: Dict[Authenticator, List[Path]]) -> None:
if not self._shared_auth: if not self._shared_auth:
return return
@@ -192,7 +192,7 @@ class HttpCrawler(Crawler):
if level_heading is None: if level_heading is None:
return find_associated_headings(tag, level - 1) return find_associated_headings(tag, level - 1)
folder_name = sanitize_path_name(level_heading.get_text().strip()) folder_name = level_heading.getText().strip()
return find_associated_headings(level_heading, level - 1) / folder_name return find_associated_headings(level_heading, level - 1) / folder_name
# start at level <h3> because paragraph-level headings are usually too granular for folder names # start at level <h3> because paragraph-level headings are usually too granular for folder names
@@ -219,7 +219,7 @@ class HttpCrawler(Crawler):
etags[str(path)] = etag etags[str(path)] = etag
self._output_dir.report.add_custom_value(ETAGS_CUSTOM_REPORT_VALUE_KEY, etags) self._output_dir.report.add_custom_value(ETAGS_CUSTOM_REPORT_VALUE_KEY, etags)
async def _request_resource_version(self, resource_url: str) -> tuple[Optional[str], Optional[datetime]]: async def _request_resource_version(self, resource_url: str) -> Tuple[Optional[str], Optional[datetime]]:
""" """
Requests the ETag and Last-Modified headers of a resource via a HEAD request. Requests the ETag and Last-Modified headers of a resource via a HEAD request.
If no entity tag / modification date can be obtained, the according value will be None. If no entity tag / modification date can be obtained, the according value will be None.
@@ -231,7 +231,6 @@ class HttpCrawler(Crawler):
etag_header = resp.headers.get("ETag") etag_header = resp.headers.get("ETag")
last_modified_header = resp.headers.get("Last-Modified") last_modified_header = resp.headers.get("Last-Modified")
last_modified = None
if last_modified_header: if last_modified_header:
try: try:
@@ -252,23 +251,23 @@ class HttpCrawler(Crawler):
self._load_cookies() self._load_cookies()
async with aiohttp.ClientSession( async with aiohttp.ClientSession(
headers={"User-Agent": f"{NAME}/{VERSION}"}, headers={"User-Agent": f"{NAME}/{VERSION}"},
cookie_jar=self._cookie_jar, cookie_jar=self._cookie_jar,
connector=aiohttp.TCPConnector(ssl=ssl.create_default_context(cafile=certifi.where())), connector=aiohttp.TCPConnector(ssl=ssl.create_default_context(cafile=certifi.where())),
timeout=ClientTimeout( timeout=ClientTimeout(
# 30 minutes. No download in the history of downloads was longer than 30 minutes. # 30 minutes. No download in the history of downloads was longer than 30 minutes.
# This is enough to transfer a 600 MB file over a 3 Mib/s connection. # This is enough to transfer a 600 MB file over a 3 Mib/s connection.
# Allowing an arbitrary value could be annoying for overnight batch jobs # Allowing an arbitrary value could be annoying for overnight batch jobs
total=15 * 60, total=15 * 60,
connect=self._http_timeout, connect=self._http_timeout,
sock_connect=self._http_timeout, sock_connect=self._http_timeout,
sock_read=self._http_timeout, sock_read=self._http_timeout,
), ),
# See https://github.com/aio-libs/aiohttp/issues/6626 # See https://github.com/aio-libs/aiohttp/issues/6626
# Without this aiohttp will mangle the redirect header from Shibboleth, invalidating the # Without this aiohttp will mangle the redirect header from Shibboleth, invalidating the
# passed signature. Shibboleth will not accept the broken signature and authentication will # passed signature. Shibboleth will not accept the broken signature and authentication will
# fail. # fail.
requote_redirect_url=False, requote_redirect_url=False
) as session: ) as session:
self.session = session self.session = session
try: try:

View File

@@ -1,9 +1,5 @@
from .kit_ilias_web_crawler import ( from .kit_ilias_web_crawler import (IliasWebCrawler, IliasWebCrawlerSection, KitIliasWebCrawler,
IliasWebCrawler, KitIliasWebCrawlerSection)
IliasWebCrawlerSection,
KitIliasWebCrawler,
KitIliasWebCrawlerSection,
)
__all__ = [ __all__ = [
"IliasWebCrawler", "IliasWebCrawler",

View File

@@ -1,6 +1,5 @@
import asyncio import asyncio
from collections.abc import Callable from typing import Any, Callable, Optional
from typing import Any, Optional
import aiohttp import aiohttp
@@ -16,9 +15,9 @@ def _iorepeat(attempts: int, name: str, failure_is_error: bool = False) -> Calla
try: try:
return await f(*args, **kwargs) return await f(*args, **kwargs)
except aiohttp.ContentTypeError: # invalid content type except aiohttp.ContentTypeError: # invalid content type
raise CrawlWarning("ILIAS returned an invalid content type") from None raise CrawlWarning("ILIAS returned an invalid content type")
except aiohttp.TooManyRedirects: except aiohttp.TooManyRedirects:
raise CrawlWarning("Got stuck in a redirect loop") from None raise CrawlWarning("Got stuck in a redirect loop")
except aiohttp.ClientPayloadError as e: # encoding or not enough bytes except aiohttp.ClientPayloadError as e: # encoding or not enough bytes
last_exception = e last_exception = e
except aiohttp.ClientConnectionError as e: # e.g. timeout, disconnect, resolve failed, etc. except aiohttp.ClientConnectionError as e: # e.g. timeout, disconnect, resolve failed, etc.

View File

@@ -1,7 +1,5 @@
import dataclasses
import re
from enum import Enum from enum import Enum
from typing import Optional, cast from typing import Optional
import bs4 import bs4
@@ -14,9 +12,7 @@ _link_template_fancy = """
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<title>ILIAS - Link: {{name}}</title> <title>ILIAS - Link: {{name}}</title>
<!-- REPEAT REMOVE START -->
<meta http-equiv = "refresh" content = "{{redirect_delay}}; url = {{link}}" /> <meta http-equiv = "refresh" content = "{{redirect_delay}}; url = {{link}}" />
<!-- REPEAT REMOVE END -->
</head> </head>
<style> <style>
@@ -27,8 +23,6 @@ _link_template_fancy = """
display: flex; display: flex;
align-items: center; align-items: center;
justify-content: center; justify-content: center;
flex-direction: column;
gap: 4px;
} }
body { body {
padding: 0; padding: 0;
@@ -37,15 +31,10 @@ _link_template_fancy = """
font-family: "Open Sans", Verdana, Arial, Helvetica, sans-serif; font-family: "Open Sans", Verdana, Arial, Helvetica, sans-serif;
height: 100vh; height: 100vh;
} }
.column {
min-width: 500px;
max-width: 90vw;
display: flex;
flex-direction: column;
row-gap: 5px;
}
.row { .row {
background-color: white; background-color: white;
min-width: 500px;
max-width: 90vw;
display: flex; display: flex;
padding: 1em; padding: 1em;
} }
@@ -86,23 +75,19 @@ _link_template_fancy = """
} }
</style> </style>
<body class="center-flex"> <body class="center-flex">
<div class="column"> <div class="row">
<!-- REPEAT START --> <div class="logo center-flex">
<div class="row"> <svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24">
<div class="logo center-flex"> <path d="M12 0c-6.627 0-12 5.373-12 12s5.373 12 12 12 12-5.373 12-12-5.373-12-12-12zm9.567 9.098c-.059-.058-.127-.108-.206-.138-.258-.101-1.35.603-1.515.256-.108-.231-.327.148-.578.008-.121-.067-.459-.52-.611-.465-.312.112.479.974.694 1.087.203-.154.86-.469 1.002-.039.271.812-.745 1.702-1.264 2.171-.775.702-.63-.454-1.159-.86-.277-.213-.274-.667-.555-.824-.125-.071-.7-.732-.694-.821l-.017.167c-.095.072-.297-.27-.319-.325 0 .298.485.772.646 1.011.273.409.42 1.005.756 1.339.179.18.866.923 1.045.908l.921-.437c.649.154-1.531 3.237-1.738 3.619-.171.321.139 1.112.114 1.49-.029.437-.374.579-.7.817-.35.255-.268.752-.562.934-.521.321-.897 1.366-1.639 1.361-.219-.001-1.151.364-1.273.007-.095-.258-.223-.455-.356-.71-.131-.25-.015-.51-.175-.731-.11-.154-.479-.502-.513-.684-.002-.157.118-.632.283-.715.231-.118.044-.462.016-.663-.048-.357-.27-.652-.535-.859-.393-.302-.189-.542-.098-.974 0-.206-.126-.476-.402-.396-.57.166-.396-.445-.812-.417-.299.021-.543.211-.821.295-.349.104-.707-.083-1.053-.126-1.421-.179-1.885-1.804-1.514-2.976.037-.192-.115-.547-.048-.696.159-.352.485-.752.768-1.021.16-.152.365-.113.553-.231.29-.182.294-.558.578-.789.404-.328.956-.321 1.482-.392.281-.037 1.35-.268 1.518-.06 0 .039.193.611-.019.578.438.023 1.061.756 1.476.585.213-.089.135-.744.573-.427.265.19 1.45.275 1.696.07.152-.125.236-.939.053-1.031.117.116-.618.125-.686.099-.122-.044-.235.115-.43.025.117.055-.651-.358-.22-.674-.181.132-.349-.037-.544.109-.135.109.062.181-.13.277-.305.155-.535-.53-.649-.607-.118-.077-1.024-.713-.777-.298l.797.793c-.04.026-.209-.289-.209-.059.053-.136.02.585-.105.35-.056-.09.091-.14.006-.271 0-.085-.23-.169-.275-.228-.126-.157-.462-.502-.644-.585-.05-.024-.771.088-.832.111-.071.099-.131.203-.181.314-.149.055-.29.127-.423.216l-.159.356c-.068.061-.772.294-.776.303.03-.076-.492-.172-.457-.324.038-.167.215-.687.169-.877-.048-.199 1.085.287 1.158-.238.029-.227.047-.492-.316-.531.069.008.702-.249.807-.364.148-.169.486-.447.731-.447.286 0 .225-.417.356-.622.133.053-.071.38.088.512-.01-.104.45.057.494.033.105-.056.691-.023.601-.299-.101-.28.052-.197.183-.255-.02.008.248-.458.363-.456-.104-.089-.398.112-.516.103-.308-.024-.177-.525-.061-.672.09-.116-.246-.258-.25-.036-.006.332-.314.633-.243 1.075.109.666-.743-.161-.816-.115-.283.172-.515-.216-.368-.449.149-.238.51-.226.659-.48.104-.179.227-.389.388-.524.541-.454.689-.091 1.229-.042.526.048.178.125.105.327-.07.192.289.261.413.1.071-.092.232-.326.301-.499.07-.175.578-.2.527-.365 2.72 1.148 4.827 3.465 5.694 6.318zm-11.113-3.779l.068-.087.073-.019c.042-.034.086-.118.151-.104.043.009.146.095.111.148-.037.054-.066-.049-.081.101-.018.169-.188.167-.313.222-.087.037-.175-.018-.09-.104l.088-.108-.007-.049zm.442.245c.046-.045.138-.008.151-.094.014-.084.078-.178-.008-.335-.022-.042.116-.082.051-.137l-.109.032s.155-.668.364-.366l-.089.103c.135.134.172.47.215.687.127.066.324.078.098.192.117-.02-.618.314-.715.178-.072-.083.317-.139.307-.173-.004-.011-.317-.02-.265-.087zm1.43-3.547l-.356.326c-.36.298-1.28.883-1.793.705-.524-.18-1.647.667-1.826.673-.067.003.002-.641.36-.689-.141.021.993-.575 1.185-.805.678-.146 1.381-.227 2.104-.227l.326.017zm-5.086 1.19c.07.082.278.092-.026.288-.183.11-.377.809-.548.809-.51.223-.542-.439-1.109.413-.078.115-.395.158-.644.236.685-.688 1.468-1.279 2.327-1.746zm-5.24 8.793c0-.541.055-1.068.139-1.586l.292.185c.113.135.113.719.169.911.139.482.484.751.748 1.19.155.261.414.923.332 1.197.109-.179 1.081.824 1.259 1.033.418.492.74 1.088.061 1.574-.219.158.334 1.14.049 1.382l-.365.094c-.225.138-.235.397-.166.631-1.562-1.765-2.518-4.076-2.518-6.611zm14.347-5.823c.083-.01-.107.167-.107.167.033.256.222.396.581.527.437.157.038.455-.213.385-.139-.039-.854-.255-.879.025 0 .167-.679.001-.573-.175.073-.119.05-.387.186-.562.193-.255.38-.116.386.032-.001.394.398-.373.619-.399z"/>
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24"> </svg>
<path d="M12 0c-6.627 0-12 5.373-12 12s5.373 12 12 12 12-5.373 12-12-5.373-12-12-12zm9.567 9.098c-.059-.058-.127-.108-.206-.138-.258-.101-1.35.603-1.515.256-.108-.231-.327.148-.578.008-.121-.067-.459-.52-.611-.465-.312.112.479.974.694 1.087.203-.154.86-.469 1.002-.039.271.812-.745 1.702-1.264 2.171-.775.702-.63-.454-1.159-.86-.277-.213-.274-.667-.555-.824-.125-.071-.7-.732-.694-.821l-.017.167c-.095.072-.297-.27-.319-.325 0 .298.485.772.646 1.011.273.409.42 1.005.756 1.339.179.18.866.923 1.045.908l.921-.437c.649.154-1.531 3.237-1.738 3.619-.171.321.139 1.112.114 1.49-.029.437-.374.579-.7.817-.35.255-.268.752-.562.934-.521.321-.897 1.366-1.639 1.361-.219-.001-1.151.364-1.273.007-.095-.258-.223-.455-.356-.71-.131-.25-.015-.51-.175-.731-.11-.154-.479-.502-.513-.684-.002-.157.118-.632.283-.715.231-.118.044-.462.016-.663-.048-.357-.27-.652-.535-.859-.393-.302-.189-.542-.098-.974 0-.206-.126-.476-.402-.396-.57.166-.396-.445-.812-.417-.299.021-.543.211-.821.295-.349.104-.707-.083-1.053-.126-1.421-.179-1.885-1.804-1.514-2.976.037-.192-.115-.547-.048-.696.159-.352.485-.752.768-1.021.16-.152.365-.113.553-.231.29-.182.294-.558.578-.789.404-.328.956-.321 1.482-.392.281-.037 1.35-.268 1.518-.06 0 .039.193.611-.019.578.438.023 1.061.756 1.476.585.213-.089.135-.744.573-.427.265.19 1.45.275 1.696.07.152-.125.236-.939.053-1.031.117.116-.618.125-.686.099-.122-.044-.235.115-.43.025.117.055-.651-.358-.22-.674-.181.132-.349-.037-.544.109-.135.109.062.181-.13.277-.305.155-.535-.53-.649-.607-.118-.077-1.024-.713-.777-.298l.797.793c-.04.026-.209-.289-.209-.059.053-.136.02.585-.105.35-.056-.09.091-.14.006-.271 0-.085-.23-.169-.275-.228-.126-.157-.462-.502-.644-.585-.05-.024-.771.088-.832.111-.071.099-.131.203-.181.314-.149.055-.29.127-.423.216l-.159.356c-.068.061-.772.294-.776.303.03-.076-.492-.172-.457-.324.038-.167.215-.687.169-.877-.048-.199 1.085.287 1.158-.238.029-.227.047-.492-.316-.531.069.008.702-.249.807-.364.148-.169.486-.447.731-.447.286 0 .225-.417.356-.622.133.053-.071.38.088.512-.01-.104.45.057.494.033.105-.056.691-.023.601-.299-.101-.28.052-.197.183-.255-.02.008.248-.458.363-.456-.104-.089-.398.112-.516.103-.308-.024-.177-.525-.061-.672.09-.116-.246-.258-.25-.036-.006.332-.314.633-.243 1.075.109.666-.743-.161-.816-.115-.283.172-.515-.216-.368-.449.149-.238.51-.226.659-.48.104-.179.227-.389.388-.524.541-.454.689-.091 1.229-.042.526.048.178.125.105.327-.07.192.289.261.413.1.071-.092.232-.326.301-.499.07-.175.578-.2.527-.365 2.72 1.148 4.827 3.465 5.694 6.318zm-11.113-3.779l.068-.087.073-.019c.042-.034.086-.118.151-.104.043.009.146.095.111.148-.037.054-.066-.049-.081.101-.018.169-.188.167-.313.222-.087.037-.175-.018-.09-.104l.088-.108-.007-.049zm.442.245c.046-.045.138-.008.151-.094.014-.084.078-.178-.008-.335-.022-.042.116-.082.051-.137l-.109.032s.155-.668.364-.366l-.089.103c.135.134.172.47.215.687.127.066.324.078.098.192.117-.02-.618.314-.715.178-.072-.083.317-.139.307-.173-.004-.011-.317-.02-.265-.087zm1.43-3.547l-.356.326c-.36.298-1.28.883-1.793.705-.524-.18-1.647.667-1.826.673-.067.003.002-.641.36-.689-.141.021.993-.575 1.185-.805.678-.146 1.381-.227 2.104-.227l.326.017zm-5.086 1.19c.07.082.278.092-.026.288-.183.11-.377.809-.548.809-.51.223-.542-.439-1.109.413-.078.115-.395.158-.644.236.685-.688 1.468-1.279 2.327-1.746zm-5.24 8.793c0-.541.055-1.068.139-1.586l.292.185c.113.135.113.719.169.911.139.482.484.751.748 1.19.155.261.414.923.332 1.197.109-.179 1.081.824 1.259 1.033.418.492.74 1.088.061 1.574-.219.158.334 1.14.049 1.382l-.365.094c-.225.138-.235.397-.166.631-1.562-1.765-2.518-4.076-2.518-6.611zm14.347-5.823c.083-.01-.107.167-.107.167.033.256.222.396.581.527.437.157.038.455-.213.385-.139-.039-.854-.255-.879.025 0 .167-.679.001-.573-.175.073-.119.05-.387.186-.562.193-.255.38-.116.386.032-.001.394.398-.373.619-.399z"/>
</svg>
</div>
<div class="tile">
<div class="top-row">
<a href="{{link}}">{{name}}</a>
</div>
<div class="bottom-row">{{description}}</div>
</div>
<div class="menu-button center-flex"> ⯆ </div>
</div> </div>
<!-- REPEAT END --> <div class="tile">
<div class="top-row">
<a href="{{link}}">{{name}}</a>
</div>
<div class="bottom-row">{{description}}</div>
</div>
<div class="menu-button center-flex"> ⯆ </div>
</div> </div>
</body> </body>
</html> </html>
@@ -111,7 +96,6 @@ _link_template_fancy = """
_link_template_internet_shortcut = """ _link_template_internet_shortcut = """
[InternetShortcut] [InternetShortcut]
URL={{link}} URL={{link}}
Desc={{description}}
""".strip() """.strip()
_learning_module_template = """ _learning_module_template = """
@@ -142,88 +126,6 @@ _learning_module_template = """
</html> </html>
""" """
_forum_thread_template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>ILIAS - Forum: {{name}}</title>
<style>
* {
box-sizing: border-box;
}
body {
font-family: 'Open Sans', Verdana, Arial, Helvetica, sans-serif;
padding: 8px;
}
ul, ol, p {
margin: 1.2em 0;
}
p {
margin-top: 8px;
margin-bottom: 8px;
}
a {
color: #00876c;
text-decoration: none;
cursor: pointer;
}
a:hover {
text-decoration: underline;
}
body > p:first-child > span:first-child {
font-size: 1.6em;
}
body > p:first-child > span:first-child ~ span.default {
display: inline-block;
font-size: 1.2em;
padding-bottom: 8px;
}
.ilFrmPostContent {
margin-top: 8px;
max-width: 64em;
}
.ilFrmPostContent > *:first-child {
margin-top: 0px;
}
.ilFrmPostTitle {
margin-top: 24px;
color: #00876c;
font-weight: bold;
}
#ilFrmPostList {
list-style: none;
padding-left: 0;
}
li.ilFrmPostRow {
padding: 3px 0 3px 3px;
margin-bottom: 24px;
border-left: 6px solid #dddddd;
}
.ilFrmPostRow > div {
display: flex;
}
.ilFrmPostImage img {
margin: 0 !important;
padding: 6px 9px 9px 6px;
}
.ilUserIcon {
width: 115px;
}
.small {
text-decoration: none;
font-size: 0.75rem;
color: #6f6f6f;
}
</style>
</head>
<body>
{{heading}}
{{content}}
</body>
</html>
""".strip() # noqa: E501 line too long
def learning_module_template(body: bs4.Tag, name: str, prev: Optional[str], next: Optional[str]) -> str: def learning_module_template(body: bs4.Tag, name: str, prev: Optional[str], next: Optional[str]) -> str:
# Seems to be comments, ignore those. # Seems to be comments, ignore those.
@@ -237,13 +139,13 @@ def learning_module_template(body: bs4.Tag, name: str, prev: Optional[str], next
</div> </div>
""" """
if prev and body.select_one(".ilc_page_lnav_LeftNavigation"): if prev and body.select_one(".ilc_page_lnav_LeftNavigation"):
text = cast(bs4.Tag, body.select_one(".ilc_page_lnav_LeftNavigation")).get_text().strip() text = body.select_one(".ilc_page_lnav_LeftNavigation").getText().strip()
left = f'<a href="{prev}">{text}</a>' left = f'<a href="{prev}">{text}</a>'
else: else:
left = "<span></span>" left = "<span></span>"
if next and body.select_one(".ilc_page_rnav_RightNavigation"): if next and body.select_one(".ilc_page_rnav_RightNavigation"):
text = cast(bs4.Tag, body.select_one(".ilc_page_rnav_RightNavigation")).get_text().strip() text = body.select_one(".ilc_page_rnav_RightNavigation").getText().strip()
right = f'<a href="{next}">{text}</a>' right = f'<a href="{next}">{text}</a>'
else: else:
right = "<span></span>" right = "<span></span>"
@@ -254,29 +156,12 @@ def learning_module_template(body: bs4.Tag, name: str, prev: Optional[str], next
) )
if bot_nav := body.select_one(".ilc_page_bnav_BottomNavigation"): if bot_nav := body.select_one(".ilc_page_bnav_BottomNavigation"):
bot_nav.replace_with( bot_nav.replace_with(soupify(nav_template.replace(
soupify(nav_template.replace("{{left}}", left).replace("{{right}}", right).encode()) "{{left}}", left).replace("{{right}}", right).encode())
) )
body_str = body.prettify() body = body.prettify()
return _learning_module_template.replace("{{body}}", body_str).replace("{{name}}", name) return _learning_module_template.replace("{{body}}", body).replace("{{name}}", name)
def forum_thread_template(name: str, url: str, heading: bs4.Tag, content: bs4.Tag) -> str:
if title := heading.find(name="b"):
title.wrap(bs4.Tag(name="a", attrs={"href": url}))
return (
_forum_thread_template.replace("{{name}}", name)
.replace("{{heading}}", heading.prettify())
.replace("{{content}}", content.prettify())
)
@dataclasses.dataclass
class LinkData:
name: str
url: str
description: str
class Links(Enum): class Links(Enum):
@@ -296,9 +181,6 @@ class Links(Enum):
return None return None
raise ValueError("Missing switch case") raise ValueError("Missing switch case")
def collection_as_one(self) -> bool:
return self == Links.FANCY
def extension(self) -> Optional[str]: def extension(self) -> Optional[str]:
if self == Links.FANCY: if self == Links.FANCY:
return ".html" return ".html"
@@ -310,47 +192,10 @@ class Links(Enum):
return None return None
raise ValueError("Missing switch case") raise ValueError("Missing switch case")
def interpolate(self, redirect_delay: int, collection_name: str, links: list[LinkData]) -> str:
template = self.template()
if template is None:
raise ValueError("Cannot interpolate ignored links")
if len(links) == 1:
link = links[0]
content = template
content = content.replace("{{link}}", link.url)
content = content.replace("{{name}}", link.name)
content = content.replace("{{description}}", link.description)
content = content.replace("{{redirect_delay}}", str(redirect_delay))
return content
if self == Links.PLAINTEXT or self == Links.INTERNET_SHORTCUT:
return "\n".join(f"{link.url}" for link in links)
# All others get coerced to fancy
content = cast(str, Links.FANCY.template())
repeated_content = cast(
re.Match[str], re.search(r"<!-- REPEAT START -->([\s\S]+)<!-- REPEAT END -->", content)
).group(1)
parts = []
for link in links:
instance = repeated_content
instance = instance.replace("{{link}}", link.url)
instance = instance.replace("{{name}}", link.name)
instance = instance.replace("{{description}}", link.description)
instance = instance.replace("{{redirect_delay}}", str(redirect_delay))
parts.append(instance)
content = content.replace(repeated_content, "\n".join(parts))
content = content.replace("{{name}}", collection_name)
content = re.sub(r"<!-- REPEAT REMOVE START -->[\s\S]+<!-- REPEAT REMOVE END -->", "", content)
return content
@staticmethod @staticmethod
def from_string(string: str) -> "Links": def from_string(string: str) -> "Links":
try: try:
return Links(string) return Links(string)
except ValueError: except ValueError:
options = [f"'{option.value}'" for option in Links] raise ValueError("must be one of 'ignore', 'plaintext',"
raise ValueError(f"must be one of {', '.join(options)}") from None " 'html', 'internet-shortcut'")

View File

@@ -1,5 +1,3 @@
from typing import cast
from bs4 import BeautifulSoup, Comment, Tag from bs4 import BeautifulSoup, Comment, Tag
_STYLE_TAG_CONTENT = """ _STYLE_TAG_CONTENT = """
@@ -39,10 +37,6 @@ _STYLE_TAG_CONTENT = """
margin: 0.5rem 0; margin: 0.5rem 0;
} }
img {
background-color: white;
}
body { body {
padding: 1em; padding: 1em;
grid-template-columns: 1fr min(60rem, 90%) 1fr; grid-template-columns: 1fr min(60rem, 90%) 1fr;
@@ -60,11 +54,12 @@ _ARTICLE_WORTHY_CLASSES = [
def insert_base_markup(soup: BeautifulSoup) -> BeautifulSoup: def insert_base_markup(soup: BeautifulSoup) -> BeautifulSoup:
head = soup.new_tag("head") head = soup.new_tag("head")
soup.insert(0, head) soup.insert(0, head)
# Force UTF-8 encoding
head.append(soup.new_tag("meta", charset="utf-8"))
simplecss_link: Tag = soup.new_tag("link")
# <link rel="stylesheet" href="https://cdn.simplecss.org/simple.css"> # <link rel="stylesheet" href="https://cdn.simplecss.org/simple.css">
head.append(soup.new_tag("link", rel="stylesheet", href="https://cdn.simplecss.org/simple.css")) simplecss_link["rel"] = "stylesheet"
simplecss_link["href"] = "https://cdn.simplecss.org/simple.css"
head.append(simplecss_link)
# Basic style tags for compat # Basic style tags for compat
style: Tag = soup.new_tag("style") style: Tag = soup.new_tag("style")
@@ -75,18 +70,18 @@ def insert_base_markup(soup: BeautifulSoup) -> BeautifulSoup:
def clean(soup: BeautifulSoup) -> BeautifulSoup: def clean(soup: BeautifulSoup) -> BeautifulSoup:
for block in cast(list[Tag], soup.find_all(class_=lambda x: x in _ARTICLE_WORTHY_CLASSES)): for block in soup.find_all(class_=lambda x: x in _ARTICLE_WORTHY_CLASSES):
block.name = "article" block.name = "article"
for block in cast(list[Tag], soup.find_all("h3")): for block in soup.find_all("h3"):
block.name = "div" block.name = "div"
for block in cast(list[Tag], soup.find_all("h1")): for block in soup.find_all("h1"):
block.name = "h3" block.name = "h3"
for block in cast(list[Tag], soup.find_all(class_="ilc_va_ihcap_VAccordIHeadCap")): for block in soup.find_all(class_="ilc_va_ihcap_VAccordIHeadCap"):
block.name = "h3" block.name = "h3"
block["class"] += ["accordion-head"] # type: ignore block["class"] += ["accordion-head"]
for dummy in soup.select(".ilc_text_block_Standard.ilc_Paragraph"): for dummy in soup.select(".ilc_text_block_Standard.ilc_Paragraph"):
children = list(dummy.children) children = list(dummy.children)
@@ -102,7 +97,7 @@ def clean(soup: BeautifulSoup) -> BeautifulSoup:
if figure := video.find_parent("figure"): if figure := video.find_parent("figure"):
figure.decompose() figure.decompose()
for hrule_imposter in cast(list[Tag], soup.find_all(class_="ilc_section_Separator")): for hrule_imposter in soup.find_all(class_="ilc_section_Separator"):
hrule_imposter.insert(0, soup.new_tag("hr")) hrule_imposter.insert(0, soup.new_tag("hr"))
return soup return soup

View File

@@ -4,7 +4,7 @@ import os
import re import re
from collections.abc import Awaitable, Coroutine from collections.abc import Awaitable, Coroutine
from pathlib import PurePath from pathlib import PurePath
from typing import Any, Literal, Optional, cast from typing import Any, Dict, List, Literal, Optional, Set, Union, cast
from urllib.parse import urljoin from urllib.parse import urljoin
import aiohttp import aiohttp
@@ -15,24 +15,17 @@ from ...auth import Authenticator
from ...config import Config from ...config import Config
from ...logging import ProgressBar, log from ...logging import ProgressBar, log
from ...output_dir import FileSink, Redownload from ...output_dir import FileSink, Redownload
from ...utils import fmt_path, sanitize_path_name, soupify, url_set_query_param from ...utils import fmt_path, soupify, url_set_query_param
from ..crawler import CrawlError, CrawlToken, CrawlWarning, DownloadToken, anoncritical from ..crawler import CrawlError, CrawlToken, CrawlWarning, DownloadToken, anoncritical
from ..http_crawler import HttpCrawler, HttpCrawlerSection from ..http_crawler import HttpCrawler, HttpCrawlerSection
from .async_helper import _iorepeat from .async_helper import _iorepeat
from .file_templates import LinkData, Links, forum_thread_template, learning_module_template from .file_templates import Links, learning_module_template
from .ilias_html_cleaner import clean, insert_base_markup from .ilias_html_cleaner import clean, insert_base_markup
from .kit_ilias_html import ( from .kit_ilias_html import (IliasElementType, IliasForumThread, IliasLearningModulePage, IliasPage,
IliasElementType, IliasPageElement, _sanitize_path_name, parse_ilias_forum_export)
IliasForumThread,
IliasLearningModulePage,
IliasPage,
IliasPageElement,
IliasSoup,
parse_ilias_forum_export,
)
from .shibboleth_login import ShibbolethLogin from .shibboleth_login import ShibbolethLogin
TargetType = str | int TargetType = Union[str, int]
class LoginTypeLocal: class LoginTypeLocal:
@@ -48,7 +41,7 @@ class IliasWebCrawlerSection(HttpCrawlerSection):
return base_url return base_url
def login(self) -> Literal["shibboleth"] | LoginTypeLocal: def login(self) -> Union[Literal["shibboleth"], LoginTypeLocal]:
login_type = self.s.get("login_type") login_type = self.s.get("login_type")
if not login_type: if not login_type:
self.missing_value("login_type") self.missing_value("login_type")
@@ -62,7 +55,9 @@ class IliasWebCrawlerSection(HttpCrawlerSection):
self.invalid_value("login_type", login_type, "Should be <shibboleth | local>") self.invalid_value("login_type", login_type, "Should be <shibboleth | local>")
def tfa_auth(self, authenticators: dict[str, Authenticator]) -> Optional[Authenticator]: def tfa_auth(
self, authenticators: Dict[str, Authenticator]
) -> Optional[Authenticator]:
value: Optional[str] = self.s.get("tfa_auth") value: Optional[str] = self.s.get("tfa_auth")
if value is None: if value is None:
return None return None
@@ -109,10 +104,10 @@ class IliasWebCrawlerSection(HttpCrawlerSection):
return self.s.getboolean("forums", fallback=False) return self.s.getboolean("forums", fallback=False)
_DIRECTORY_PAGES: set[IliasElementType] = { _DIRECTORY_PAGES: Set[IliasElementType] = {
IliasElementType.COURSE,
IliasElementType.EXERCISE, IliasElementType.EXERCISE,
IliasElementType.EXERCISE_FILES, IliasElementType.EXERCISE_FILES,
IliasElementType.EXERCISE_OVERVIEW,
IliasElementType.FOLDER, IliasElementType.FOLDER,
IliasElementType.INFO_TAB, IliasElementType.INFO_TAB,
IliasElementType.MEDIACAST_VIDEO_FOLDER, IliasElementType.MEDIACAST_VIDEO_FOLDER,
@@ -121,7 +116,7 @@ _DIRECTORY_PAGES: set[IliasElementType] = {
IliasElementType.OPENCAST_VIDEO_FOLDER_MAYBE_PAGINATED, IliasElementType.OPENCAST_VIDEO_FOLDER_MAYBE_PAGINATED,
} }
_VIDEO_ELEMENTS: set[IliasElementType] = { _VIDEO_ELEMENTS: Set[IliasElementType] = {
IliasElementType.MEDIACAST_VIDEO, IliasElementType.MEDIACAST_VIDEO,
IliasElementType.MEDIACAST_VIDEO_FOLDER, IliasElementType.MEDIACAST_VIDEO_FOLDER,
IliasElementType.OPENCAST_VIDEO, IliasElementType.OPENCAST_VIDEO,
@@ -171,19 +166,17 @@ class IliasWebCrawler(HttpCrawler):
name: str, name: str,
section: IliasWebCrawlerSection, section: IliasWebCrawlerSection,
config: Config, config: Config,
authenticators: dict[str, Authenticator], authenticators: Dict[str, Authenticator]
): ):
# Setting a main authenticator for cookie sharing # Setting a main authenticator for cookie sharing
auth = section.auth(authenticators) auth = section.auth(authenticators)
super().__init__(name, section, config, shared_auth=auth) super().__init__(name, section, config, shared_auth=auth)
if section.tasks() > 1: if section.tasks() > 1:
log.warn( log.warn("""
"""
Please avoid using too many parallel requests as these are the KIT ILIAS Please avoid using too many parallel requests as these are the KIT ILIAS
instance's greatest bottleneck. instance's greatest bottleneck.
""".strip() """.strip())
)
self._auth = auth self._auth = auth
self._base_url = section.base_url() self._base_url = section.base_url()
@@ -200,7 +193,7 @@ instance's greatest bottleneck.
self._links = section.links() self._links = section.links()
self._videos = section.videos() self._videos = section.videos()
self._forums = section.forums() self._forums = section.forums()
self._visited_urls: dict[str, PurePath] = dict() self._visited_urls: Dict[str, PurePath] = dict()
async def _run(self) -> None: async def _run(self) -> None:
if isinstance(self._target, int): if isinstance(self._target, int):
@@ -217,23 +210,18 @@ instance's greatest bottleneck.
# Start crawling at the given course # Start crawling at the given course
root_url = url_set_query_param( root_url = url_set_query_param(
urljoin(self._base_url + "/", "goto.php"), urljoin(self._base_url + "/", "goto.php"),
"target", "target", f"crs_{course_id}",
f"crs_{course_id}",
) )
await self._crawl_url(root_url, expected_id=course_id) await self._crawl_url(root_url, expected_id=course_id)
async def _crawl_desktop(self) -> None: async def _crawl_desktop(self) -> None:
await self._crawl_url( await self._crawl_url(
urljoin(self._base_url, "/ilias.php?baseClass=ilDashboardGUI&cmd=show"), crawl_nested_courses=True urljoin(self._base_url, "/ilias.php?baseClass=ilDashboardGUI&cmd=show")
) )
async def _crawl_url( async def _crawl_url(self, url: str, expected_id: Optional[int] = None) -> None:
self, url: str, expected_id: Optional[int] = None, crawl_nested_courses: bool = False if awaitable := await self._handle_ilias_page(url, None, PurePath("."), expected_id):
) -> None:
if awaitable := await self._handle_ilias_page(
url, None, PurePath("."), expected_id, crawl_nested_courses
):
await awaitable await awaitable
async def _handle_ilias_page( async def _handle_ilias_page(
@@ -242,7 +230,6 @@ instance's greatest bottleneck.
current_element: Optional[IliasPageElement], current_element: Optional[IliasPageElement],
path: PurePath, path: PurePath,
expected_course_id: Optional[int] = None, expected_course_id: Optional[int] = None,
crawl_nested_courses: bool = False,
) -> Optional[Coroutine[Any, Any, None]]: ) -> Optional[Coroutine[Any, Any, None]]:
maybe_cl = await self.crawl(path) maybe_cl = await self.crawl(path)
if not maybe_cl: if not maybe_cl:
@@ -250,9 +237,7 @@ instance's greatest bottleneck.
if current_element: if current_element:
self._ensure_not_seen(current_element, path) self._ensure_not_seen(current_element, path)
return self._crawl_ilias_page( return self._crawl_ilias_page(url, current_element, maybe_cl, expected_course_id)
url, current_element, maybe_cl, expected_course_id, crawl_nested_courses
)
@anoncritical @anoncritical
async def _crawl_ilias_page( async def _crawl_ilias_page(
@@ -261,11 +246,10 @@ instance's greatest bottleneck.
current_element: Optional[IliasPageElement], current_element: Optional[IliasPageElement],
cl: CrawlToken, cl: CrawlToken,
expected_course_id: Optional[int] = None, expected_course_id: Optional[int] = None,
crawl_nested_courses: bool = False,
) -> None: ) -> None:
elements: list[IliasPageElement] = [] elements: List[IliasPageElement] = []
# A list as variable redefinitions are not propagated to outer scopes # A list as variable redefinitions are not propagated to outer scopes
description: list[BeautifulSoup] = [] description: List[BeautifulSoup] = []
@_iorepeat(3, "crawling folder") @_iorepeat(3, "crawling folder")
async def gather_elements() -> None: async def gather_elements() -> None:
@@ -273,7 +257,6 @@ instance's greatest bottleneck.
async with cl: async with cl:
next_stage_url: Optional[str] = url next_stage_url: Optional[str] = url
current_parent = current_element current_parent = current_element
page = None
while next_stage_url: while next_stage_url:
soup = await self._get_page(next_stage_url) soup = await self._get_page(next_stage_url)
@@ -283,22 +266,19 @@ instance's greatest bottleneck.
# If we expect to find a root course, enforce it # If we expect to find a root course, enforce it
if current_parent is None and expected_course_id is not None: if current_parent is None and expected_course_id is not None:
perma_link = IliasPage.get_soup_permalink(soup) perma_link = IliasPage.get_soup_permalink(soup)
if not perma_link or "crs/" not in perma_link: if not perma_link or "crs_" not in perma_link:
raise CrawlError("Invalid course id? Didn't find anything looking like a course") raise CrawlError("Invalid course id? Didn't find anything looking like a course")
if str(expected_course_id) not in perma_link: if str(expected_course_id) not in perma_link:
raise CrawlError(f"Expected course id {expected_course_id} but got {perma_link}") raise CrawlError(f"Expected course id {expected_course_id} but got {perma_link}")
page = IliasPage(soup, current_parent) page = IliasPage(soup, next_stage_url, current_parent)
if next_element := page.get_next_stage_element(): if next_element := page.get_next_stage_element():
current_parent = next_element current_parent = next_element
next_stage_url = next_element.url next_stage_url = next_element.url
else: else:
next_stage_url = None next_stage_url = None
page = cast(IliasPage, page)
elements.extend(page.get_child_elements()) elements.extend(page.get_child_elements())
if current_element is None and (info_tab := page.get_info_tab()):
elements.append(info_tab)
if description_string := page.get_description(): if description_string := page.get_description():
description.append(description_string) description.append(description_string)
@@ -310,9 +290,9 @@ instance's greatest bottleneck.
elements.sort(key=lambda e: e.id()) elements.sort(key=lambda e: e.id())
tasks: list[Awaitable[None]] = [] tasks: List[Awaitable[None]] = []
for element in elements: for element in elements:
if handle := await self._handle_ilias_element(cl.path, element, crawl_nested_courses): if handle := await self._handle_ilias_element(cl.path, element):
tasks.append(asyncio.create_task(handle)) tasks.append(asyncio.create_task(handle))
# And execute them # And execute them
@@ -325,30 +305,24 @@ instance's greatest bottleneck.
# works correctly. # works correctly.
@anoncritical @anoncritical
async def _handle_ilias_element( async def _handle_ilias_element(
self, parent_path: PurePath, element: IliasPageElement, crawl_nested_courses: bool = False self,
parent_path: PurePath,
element: IliasPageElement,
) -> Optional[Coroutine[Any, Any, None]]: ) -> Optional[Coroutine[Any, Any, None]]:
# element.name might contain `/` if the crawler created nested elements, # element.name might contain `/` if the crawler created nested elements,
# so we can not sanitize it here. We trust in the output dir to thwart worst-case # so we can not sanitize it here. We trust in the output dir to thwart worst-case
# directory escape attacks. # directory escape attacks.
element_path = PurePath(parent_path, element.name) element_path = PurePath(parent_path, element.name)
# This is symptomatic of no access to the element, for example, because if element.type in _VIDEO_ELEMENTS:
# of time availability restrictions. if not self._videos:
if "cmdClass=ilInfoScreenGUI" in element.url and "cmd=showSummary" in element.url: log.status(
log.explain( "[bold bright_black]",
"Skipping element as url points to info screen, " "Ignored",
"this should only happen with not-yet-released elements" fmt_path(element_path),
) "[bright_black](enable with option 'videos')"
return None )
return None
if element.type in _VIDEO_ELEMENTS and not self._videos:
log.status(
"[bold bright_black]",
"Ignored",
fmt_path(element_path),
"[bright_black](enable with option 'videos')",
)
return None
if element.type == IliasElementType.FILE: if element.type == IliasElementType.FILE:
return await self._handle_file(element, element_path) return await self._handle_file(element, element_path)
@@ -358,7 +332,7 @@ instance's greatest bottleneck.
"[bold bright_black]", "[bold bright_black]",
"Ignored", "Ignored",
fmt_path(element_path), fmt_path(element_path),
"[bright_black](enable with option 'forums')", "[bright_black](enable with option 'forums')"
) )
return None return None
return await self._handle_forum(element, element_path) return await self._handle_forum(element, element_path)
@@ -367,7 +341,7 @@ instance's greatest bottleneck.
"[bold bright_black]", "[bold bright_black]",
"Ignored", "Ignored",
fmt_path(element_path), fmt_path(element_path),
"[bright_black](tests contain no relevant data)", "[bright_black](tests contain no relevant data)"
) )
return None return None
elif element.type == IliasElementType.SURVEY: elif element.type == IliasElementType.SURVEY:
@@ -375,7 +349,7 @@ instance's greatest bottleneck.
"[bold bright_black]", "[bold bright_black]",
"Ignored", "Ignored",
fmt_path(element_path), fmt_path(element_path),
"[bright_black](surveys contain no relevant data)", "[bright_black](surveys contain no relevant data)"
) )
return None return None
elif element.type == IliasElementType.SCORM_LEARNING_MODULE: elif element.type == IliasElementType.SCORM_LEARNING_MODULE:
@@ -383,73 +357,13 @@ instance's greatest bottleneck.
"[bold bright_black]", "[bold bright_black]",
"Ignored", "Ignored",
fmt_path(element_path), fmt_path(element_path),
"[bright_black](scorm learning modules are not supported)", "[bright_black](scorm learning modules are not supported)"
)
return None
elif element.type == IliasElementType.LITERATURE_LIST:
log.status(
"[bold bright_black]",
"Ignored",
fmt_path(element_path),
"[bright_black](literature lists are not currently supported)",
)
return None
elif element.type == IliasElementType.LEARNING_MODULE_HTML:
log.status(
"[bold bright_black]",
"Ignored",
fmt_path(element_path),
"[bright_black](HTML learning modules are not supported)",
)
return None
elif element.type == IliasElementType.BLOG:
log.status(
"[bold bright_black]",
"Ignored",
fmt_path(element_path),
"[bright_black](blogs are not currently supported)",
)
return None
elif element.type == IliasElementType.DCL_RECORD_LIST:
log.status(
"[bold bright_black]",
"Ignored",
fmt_path(element_path),
"[bright_black](dcl record lists are not currently supported)",
)
return None
elif element.type == IliasElementType.MEDIA_POOL:
log.status(
"[bold bright_black]",
"Ignored",
fmt_path(element_path),
"[bright_black](media pools are not currently supported)",
)
return None
elif element.type == IliasElementType.COURSE:
if crawl_nested_courses:
return await self._handle_ilias_page(element.url, element, element_path)
log.status(
"[bold bright_black]",
"Ignored",
fmt_path(element_path),
"[bright_black](not descending into linked course)",
)
return None
elif element.type == IliasElementType.WIKI:
log.status(
"[bold bright_black]",
"Ignored",
fmt_path(element_path),
"[bright_black](wikis are not currently supported)",
) )
return None return None
elif element.type == IliasElementType.LEARNING_MODULE: elif element.type == IliasElementType.LEARNING_MODULE:
return await self._handle_learning_module(element, element_path) return await self._handle_learning_module(element, element_path)
elif element.type == IliasElementType.LINK: elif element.type == IliasElementType.LINK:
return await self._handle_link(element, element_path) return await self._handle_link(element, element_path)
elif element.type == IliasElementType.LINK_COLLECTION:
return await self._handle_link(element, element_path)
elif element.type == IliasElementType.BOOKING: elif element.type == IliasElementType.BOOKING:
return await self._handle_booking(element, element_path) return await self._handle_booking(element, element_path)
elif element.type == IliasElementType.OPENCAST_VIDEO: elif element.type == IliasElementType.OPENCAST_VIDEO:
@@ -475,93 +389,44 @@ instance's greatest bottleneck.
log.explain_topic(f"Decision: Crawl Link {fmt_path(element_path)}") log.explain_topic(f"Decision: Crawl Link {fmt_path(element_path)}")
log.explain(f"Links type is {self._links}") log.explain(f"Links type is {self._links}")
export_url = url_set_query_param(element.url, "cmd", "exportHTML") link_template_maybe = self._links.template()
resolved = await self._resolve_link_target(export_url) link_extension = self._links.extension()
if resolved == "none": if not link_template_maybe or not link_extension:
links = [LinkData(element.name, "", element.description or "")]
else:
links = self._parse_link_content(element, cast(BeautifulSoup, resolved))
maybe_extension = self._links.extension()
if not maybe_extension:
log.explain("Answer: No") log.explain("Answer: No")
return None return None
else: else:
log.explain("Answer: Yes") log.explain("Answer: Yes")
element_path = element_path.with_name(element_path.name + link_extension)
if len(links) <= 1 or self._links.collection_as_one(): maybe_dl = await self.download(element_path, mtime=element.mtime)
element_path = element_path.with_name(element_path.name + maybe_extension) if not maybe_dl:
maybe_dl = await self.download(element_path, mtime=element.mtime)
if not maybe_dl:
return None
return self._download_link(self._links, element.name, links, maybe_dl)
maybe_cl = await self.crawl(element_path)
if not maybe_cl:
return None return None
# Required for download_all closure
cl = maybe_cl
extension = maybe_extension
async def download_all() -> None: return self._download_link(element, link_template_maybe, maybe_dl)
for link in links:
path = cl.path / (sanitize_path_name(link.name) + extension)
if dl := await self.download(path, mtime=element.mtime):
await self._download_link(self._links, element.name, [link], dl)
return download_all()
@anoncritical @anoncritical
@_iorepeat(3, "resolving link") @_iorepeat(3, "resolving link")
async def _download_link( async def _download_link(self, element: IliasPageElement, link_template: str, dl: DownloadToken) -> None:
self, link_renderer: Links, collection_name: str, links: list[LinkData], dl: DownloadToken
) -> None:
async with dl as (bar, sink): async with dl as (bar, sink):
rendered = link_renderer.interpolate(self._link_file_redirect_delay, collection_name, links) export_url = element.url.replace("cmd=calldirectlink", "cmd=exportHTML")
sink.file.write(rendered.encode("utf-8")) real_url = await self._resolve_link_target(export_url)
sink.done() self._write_link_content(link_template, real_url, element.name, element.description, sink)
async def _resolve_link_target(self, export_url: str) -> BeautifulSoup | Literal["none"]: def _write_link_content(
async def impl() -> Optional[BeautifulSoup | Literal["none"]]: self,
async with self.session.get(export_url, allow_redirects=False) as resp: link_template: str,
# No redirect means we were authenticated url: str,
if hdrs.LOCATION not in resp.headers: name: str,
return soupify(await resp.read()) # .select_one("a").get("href").strip() # type: ignore description: Optional[str],
# We are either unauthenticated or the link is not active sink: FileSink,
new_url = resp.headers[hdrs.LOCATION].lower() ) -> None:
if "baseclass=illinkresourcehandlergui" in new_url and "cmd=infoscreen" in new_url: content = link_template
return "none" content = content.replace("{{link}}", url)
return None content = content.replace("{{name}}", name)
content = content.replace("{{description}}", str(description))
auth_id = await self._current_auth_id() content = content.replace("{{redirect_delay}}", str(self._link_file_redirect_delay))
target = await impl() sink.file.write(content.encode("utf-8"))
if target is not None: sink.done()
return target
await self.authenticate(auth_id)
target = await impl()
if target is not None:
return target
raise CrawlError("resolve_link_target failed even after authenticating")
@staticmethod
def _parse_link_content(element: IliasPageElement, content: BeautifulSoup) -> list[LinkData]:
links = list(content.select("a"))
if len(links) == 1:
url = str(links[0].get("href")).strip()
return [LinkData(name=element.name, description=element.description or "", url=url)]
results = []
for link in links:
url = str(link.get("href")).strip()
name = link.get_text(strip=True)
description = cast(Tag, link.find_next_sibling("dd")).get_text(strip=True)
results.append(LinkData(name=name, description=description, url=url.strip()))
return results
async def _handle_booking( async def _handle_booking(
self, self,
@@ -586,7 +451,7 @@ instance's greatest bottleneck.
self._ensure_not_seen(element, element_path) self._ensure_not_seen(element, element_path)
return self._download_booking(element, maybe_dl) return self._download_booking(element, link_template_maybe, maybe_dl)
@anoncritical @anoncritical
@_iorepeat(1, "downloading description") @_iorepeat(1, "downloading description")
@@ -596,10 +461,10 @@ instance's greatest bottleneck.
if not dl: if not dl:
return return
async with dl as (_bar, sink): async with dl as (bar, sink):
description = clean(insert_base_markup(description)) description = clean(insert_base_markup(description))
description_tag = await self.internalize_images(description) description = await self.internalize_images(description)
sink.file.write(description_tag.prettify().encode("utf-8")) sink.file.write(description.prettify().encode("utf-8"))
sink.done() sink.done()
@anoncritical @anoncritical
@@ -607,13 +472,36 @@ instance's greatest bottleneck.
async def _download_booking( async def _download_booking(
self, self,
element: IliasPageElement, element: IliasPageElement,
link_template: str,
dl: DownloadToken, dl: DownloadToken,
) -> None: ) -> None:
async with dl as (bar, sink): async with dl as (bar, sink):
links = [LinkData(name=element.name, description=element.description or "", url=element.url)] self._write_link_content(link_template, element.url, element.name, element.description, sink)
rendered = self._links.interpolate(self._link_file_redirect_delay, element.name, links)
sink.file.write(rendered.encode("utf-8")) async def _resolve_link_target(self, export_url: str) -> str:
sink.done() async def impl() -> Optional[str]:
async with self.session.get(export_url, allow_redirects=False) as resp:
# No redirect means we were authenticated
if hdrs.LOCATION not in resp.headers:
return soupify(await resp.read()).select_one("a").get("href").strip()
# We are either unauthenticated or the link is not active
new_url = resp.headers[hdrs.LOCATION].lower()
if "baseclass=illinkresourcehandlergui" in new_url and "cmd=infoscreen" in new_url:
return ""
return None
auth_id = await self._current_auth_id()
target = await impl()
if target is not None:
return target
await self.authenticate(auth_id)
target = await impl()
if target is not None:
return target
raise CrawlError("resolve_link_target failed even after authenticating")
async def _handle_opencast_video( async def _handle_opencast_video(
self, self,
@@ -624,7 +512,7 @@ instance's greatest bottleneck.
if self.prev_report: if self.prev_report:
self.report.add_custom_value( self.report.add_custom_value(
_get_video_cache_key(element), _get_video_cache_key(element),
self.prev_report.get_custom_value(_get_video_cache_key(element)), self.prev_report.get_custom_value(_get_video_cache_key(element))
) )
# A video might contain other videos, so let's "crawl" the video first # A video might contain other videos, so let's "crawl" the video first
@@ -658,7 +546,7 @@ instance's greatest bottleneck.
def _previous_contained_opencast_videos( def _previous_contained_opencast_videos(
self, element: IliasPageElement, element_path: PurePath self, element: IliasPageElement, element_path: PurePath
) -> list[PurePath]: ) -> List[PurePath]:
if not self.prev_report: if not self.prev_report:
return [] return []
custom_value = self.prev_report.get_custom_value(_get_video_cache_key(element)) custom_value = self.prev_report.get_custom_value(_get_video_cache_key(element))
@@ -696,11 +584,11 @@ instance's greatest bottleneck.
def add_to_report(paths: list[str]) -> None: def add_to_report(paths: list[str]) -> None:
self.report.add_custom_value( self.report.add_custom_value(
_get_video_cache_key(element), _get_video_cache_key(element),
{"known_paths": paths, "own_path": str(self._transformer.transform(dl.path))}, {"known_paths": paths, "own_path": str(self._transformer.transform(dl.path))}
) )
async with dl as (bar, sink): async with dl as (bar, sink):
page = IliasPage(await self._get_page(element.url), element) page = IliasPage(await self._get_page(element.url), element.url, element)
stream_elements = page.get_child_elements() stream_elements = page.get_child_elements()
if len(stream_elements) > 1: if len(stream_elements) > 1:
@@ -710,11 +598,11 @@ instance's greatest bottleneck.
stream_element = stream_elements[0] stream_element = stream_elements[0]
# We do not have a local cache yet # We do not have a local cache yet
await self._stream_from_url(stream_element, sink, bar, is_video=True) await self._stream_from_url(stream_element.url, sink, bar, is_video=True)
add_to_report([str(self._transformer.transform(dl.path))]) add_to_report([str(self._transformer.transform(dl.path))])
return return
contained_video_paths: list[str] = [] contained_video_paths: List[str] = []
for stream_element in stream_elements: for stream_element in stream_elements:
video_path = dl.path.parent / stream_element.name video_path = dl.path.parent / stream_element.name
@@ -725,7 +613,7 @@ instance's greatest bottleneck.
async with maybe_dl as (bar, sink): async with maybe_dl as (bar, sink):
log.explain(f"Streaming video from real url {stream_element.url}") log.explain(f"Streaming video from real url {stream_element.url}")
contained_video_paths.append(str(self._transformer.transform(maybe_dl.path))) contained_video_paths.append(str(self._transformer.transform(maybe_dl.path)))
await self._stream_from_url(stream_element, sink, bar, is_video=True) await self._stream_from_url(stream_element.url, sink, bar, is_video=True)
add_to_report(contained_video_paths) add_to_report(contained_video_paths)
@@ -747,15 +635,12 @@ instance's greatest bottleneck.
async def _download_file(self, element: IliasPageElement, dl: DownloadToken, is_video: bool) -> None: async def _download_file(self, element: IliasPageElement, dl: DownloadToken, is_video: bool) -> None:
assert dl # The function is only reached when dl is not None assert dl # The function is only reached when dl is not None
async with dl as (bar, sink): async with dl as (bar, sink):
await self._stream_from_url(element, sink, bar, is_video) await self._stream_from_url(element.url, sink, bar, is_video)
async def _stream_from_url(
self, element: IliasPageElement, sink: FileSink, bar: ProgressBar, is_video: bool
) -> None:
url = element.url
async def _stream_from_url(self, url: str, sink: FileSink, bar: ProgressBar, is_video: bool) -> None:
async def try_stream() -> bool: async def try_stream() -> bool:
next_url = url next_url = url
# Normal files redirect to the magazine if we are not authenticated. As files could be HTML, # Normal files redirect to the magazine if we are not authenticated. As files could be HTML,
# we can not match on the content type here. Instead, we disallow redirects and inspect the # we can not match on the content type here. Instead, we disallow redirects and inspect the
# new location. If we are redirected anywhere but the ILIAS 8 "sendfile" command, we assume # new location. If we are redirected anywhere but the ILIAS 8 "sendfile" command, we assume
@@ -803,7 +688,7 @@ instance's greatest bottleneck.
await self.authenticate(auth_id) await self.authenticate(auth_id)
if not await try_stream(): if not await try_stream():
raise CrawlError(f"File streaming failed after authenticate() {element!r}") raise CrawlError("File streaming failed after authenticate()")
async def _handle_forum( async def _handle_forum(
self, self,
@@ -818,23 +703,36 @@ instance's greatest bottleneck.
@_iorepeat(3, "crawling forum") @_iorepeat(3, "crawling forum")
@anoncritical @anoncritical
async def _crawl_forum(self, element: IliasPageElement, cl: CrawlToken) -> None: async def _crawl_forum(self, element: IliasPageElement, cl: CrawlToken) -> None:
elements: List[IliasForumThread] = []
async with cl: async with cl:
inner = IliasPage(await self._get_page(element.url), element) next_stage_url = element.url
export_url = inner.get_forum_export_url() while next_stage_url:
if not export_url: log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
log.warn("Could not extract forum export url") log.explain(f"URL: {next_stage_url}")
soup = await self._get_page(next_stage_url)
page = IliasPage(soup, next_stage_url, element)
if next := page.get_next_stage_element():
next_stage_url = next.url
else:
break
download_data = page.get_download_forum_data()
if not download_data:
raise CrawlWarning("Failed to extract forum data")
if download_data.empty:
log.explain("Forum had no threads")
return return
html = await self._post_authenticated(download_data.url, download_data.form_data)
elements = parse_ilias_forum_export(soupify(html))
export = await self._post( elements.sort(key=lambda elem: elem.title)
export_url,
{"format": "html", "cmd[createExportFile]": ""},
)
elements = parse_ilias_forum_export(soupify(export)) tasks: List[Awaitable[None]] = []
for elem in elements:
tasks: list[Awaitable[None]] = [] tasks.append(asyncio.create_task(self._download_forum_thread(cl.path, elem)))
for thread in elements:
tasks.append(asyncio.create_task(self._download_forum_thread(cl.path, thread, element.url)))
# And execute them # And execute them
await self.gather(tasks) await self.gather(tasks)
@@ -842,18 +740,20 @@ instance's greatest bottleneck.
@anoncritical @anoncritical
@_iorepeat(3, "saving forum thread") @_iorepeat(3, "saving forum thread")
async def _download_forum_thread( async def _download_forum_thread(
self, parent_path: PurePath, thread: IliasForumThread | IliasPageElement, forum_url: str self,
parent_path: PurePath,
element: IliasForumThread,
) -> None: ) -> None:
path = parent_path / (sanitize_path_name(thread.name) + ".html") path = parent_path / (_sanitize_path_name(element.title) + ".html")
maybe_dl = await self.download(path, mtime=thread.mtime) maybe_dl = await self.download(path, mtime=element.mtime)
if not maybe_dl or not isinstance(thread, IliasForumThread): if not maybe_dl:
return return
async with maybe_dl as (bar, sink): async with maybe_dl as (bar, sink):
rendered = forum_thread_template( content = "<!DOCTYPE html>\n"
thread.name, forum_url, thread.name_tag, await self.internalize_images(thread.content_tag) content += element.title_tag.prettify()
) content += element.content_tag.prettify()
sink.file.write(rendered.encode("utf-8")) sink.file.write(content.encode("utf-8"))
sink.done() sink.done()
async def _handle_learning_module( async def _handle_learning_module(
@@ -871,33 +771,33 @@ instance's greatest bottleneck.
@_iorepeat(3, "crawling learning module") @_iorepeat(3, "crawling learning module")
@anoncritical @anoncritical
async def _crawl_learning_module(self, element: IliasPageElement, cl: CrawlToken) -> None: async def _crawl_learning_module(self, element: IliasPageElement, cl: CrawlToken) -> None:
elements: list[IliasLearningModulePage] = [] elements: List[IliasLearningModulePage] = []
async with cl: async with cl:
log.explain_topic(f"Parsing initial HTML page for {fmt_path(cl.path)}") log.explain_topic(f"Parsing initial HTML page for {fmt_path(cl.path)}")
log.explain(f"URL: {element.url}") log.explain(f"URL: {element.url}")
soup = await self._get_page(element.url) soup = await self._get_page(element.url)
page = IliasPage(soup, element) page = IliasPage(soup, element.url, element)
if next := page.get_learning_module_data(): if next := page.get_learning_module_data():
elements.extend( elements.extend(await self._crawl_learning_module_direction(
await self._crawl_learning_module_direction(cl.path, next.previous_url, "left", element) cl.path, next.previous_url, "left", element
) ))
elements.append(next) elements.append(next)
elements.extend( elements.extend(await self._crawl_learning_module_direction(
await self._crawl_learning_module_direction(cl.path, next.next_url, "right", element) cl.path, next.next_url, "right", element
) ))
# Reflect their natural ordering in the file names # Reflect their natural ordering in the file names
for index, lm_element in enumerate(elements): for index, lm_element in enumerate(elements):
lm_element.title = f"{index:02}_{lm_element.title}" lm_element.title = f"{index:02}_{lm_element.title}"
tasks: list[Awaitable[None]] = [] tasks: List[Awaitable[None]] = []
for index, elem in enumerate(elements): for index, elem in enumerate(elements):
prev_url = elements[index - 1].title if index > 0 else None prev_url = elements[index - 1].title if index > 0 else None
next_url = elements[index + 1].title if index < len(elements) - 1 else None next_url = elements[index + 1].title if index < len(elements) - 1 else None
tasks.append( tasks.append(asyncio.create_task(
asyncio.create_task(self._download_learning_module_page(cl.path, elem, prev_url, next_url)) self._download_learning_module_page(cl.path, elem, prev_url, next_url)
) ))
# And execute them # And execute them
await self.gather(tasks) await self.gather(tasks)
@@ -906,10 +806,10 @@ instance's greatest bottleneck.
self, self,
path: PurePath, path: PurePath,
start_url: Optional[str], start_url: Optional[str],
dir: Literal["left"] | Literal["right"], dir: Union[Literal["left"], Literal["right"]],
parent_element: IliasPageElement, parent_element: IliasPageElement
) -> list[IliasLearningModulePage]: ) -> List[IliasLearningModulePage]:
elements: list[IliasLearningModulePage] = [] elements: List[IliasLearningModulePage] = []
if not start_url: if not start_url:
return elements return elements
@@ -920,10 +820,13 @@ instance's greatest bottleneck.
log.explain_topic(f"Parsing HTML page for {fmt_path(path)} ({dir}-{counter})") log.explain_topic(f"Parsing HTML page for {fmt_path(path)} ({dir}-{counter})")
log.explain(f"URL: {next_element_url}") log.explain(f"URL: {next_element_url}")
soup = await self._get_page(next_element_url) soup = await self._get_page(next_element_url)
page = IliasPage(soup, parent_element) page = IliasPage(soup, next_element_url, parent_element)
if next := page.get_learning_module_data(): if next := page.get_learning_module_data():
elements.append(next) elements.append(next)
next_element_url = next.previous_url if dir == "left" else next.next_url if dir == "left":
next_element_url = next.previous_url
else:
next_element_url = next.next_url
counter += 1 counter += 1
return elements return elements
@@ -935,9 +838,9 @@ instance's greatest bottleneck.
parent_path: PurePath, parent_path: PurePath,
element: IliasLearningModulePage, element: IliasLearningModulePage,
prev: Optional[str], prev: Optional[str],
next: Optional[str], next: Optional[str]
) -> None: ) -> None:
path = parent_path / (sanitize_path_name(element.title) + ".html") path = parent_path / (_sanitize_path_name(element.title) + ".html")
maybe_dl = await self.download(path) maybe_dl = await self.download(path)
if not maybe_dl: if not maybe_dl:
return return
@@ -946,11 +849,17 @@ instance's greatest bottleneck.
return return
if prev: if prev:
prev_p = self._transformer.transform(parent_path / (sanitize_path_name(prev) + ".html")) prev_p = self._transformer.transform(parent_path / (_sanitize_path_name(prev) + ".html"))
prev = os.path.relpath(prev_p, my_path.parent) if prev_p else None if prev_p:
prev = os.path.relpath(prev_p, my_path.parent)
else:
prev = None
if next: if next:
next_p = self._transformer.transform(parent_path / (sanitize_path_name(next) + ".html")) next_p = self._transformer.transform(parent_path / (_sanitize_path_name(next) + ".html"))
next = os.path.relpath(next_p, my_path.parent) if next_p else None if next_p:
next = os.path.relpath(next_p, my_path.parent)
else:
next = None
async with maybe_dl as (bar, sink): async with maybe_dl as (bar, sink):
content = element.content content = element.content
@@ -964,16 +873,19 @@ instance's greatest bottleneck.
""" """
log.explain_topic("Internalizing images") log.explain_topic("Internalizing images")
for elem in tag.find_all(recursive=True): for elem in tag.find_all(recursive=True):
if elem.name == "img" and (src := elem.attrs.get("src", None)): if not isinstance(elem, Tag):
url = urljoin(self._base_url, cast(str, src)) continue
if not url.startswith(self._base_url): if elem.name == "img":
continue if src := elem.attrs.get("src", None):
log.explain(f"Internalizing {url!r}") url = urljoin(self._base_url, src)
img = await self._get_authenticated(url) if not url.startswith(self._base_url):
elem.attrs["src"] = "data:;base64," + base64.b64encode(img).decode() continue
if elem.name == "iframe" and cast(str, elem.attrs.get("src", "")).startswith("//"): log.explain(f"Internalizing {url!r}")
img = await self._get_authenticated(url)
elem.attrs["src"] = "data:;base64," + base64.b64encode(img).decode()
if elem.name == "iframe" and elem.attrs.get("src", "").startswith("//"):
# For unknown reasons the protocol seems to be stripped. # For unknown reasons the protocol seems to be stripped.
elem.attrs["src"] = "https:" + cast(str, elem.attrs["src"]) elem.attrs["src"] = "https:" + elem.attrs["src"]
return tag return tag
def _ensure_not_seen(self, element: IliasPageElement, parent_path: PurePath) -> None: def _ensure_not_seen(self, element: IliasPageElement, parent_path: PurePath) -> None:
@@ -985,10 +897,10 @@ instance's greatest bottleneck.
) )
self._visited_urls[element.url] = parent_path self._visited_urls[element.url] = parent_path
async def _get_page(self, url: str, root_page_allowed: bool = False) -> IliasSoup: async def _get_page(self, url: str, root_page_allowed: bool = False) -> BeautifulSoup:
auth_id = await self._current_auth_id() auth_id = await self._current_auth_id()
async with self.session.get(url) as request: async with self.session.get(url) as request:
soup = IliasSoup(soupify(await request.read()), str(request.url)) soup = soupify(await request.read())
if IliasPage.is_logged_in(soup): if IliasPage.is_logged_in(soup):
return self._verify_page(soup, url, root_page_allowed) return self._verify_page(soup, url, root_page_allowed)
@@ -997,13 +909,13 @@ instance's greatest bottleneck.
# Retry once after authenticating. If this fails, we will die. # Retry once after authenticating. If this fails, we will die.
async with self.session.get(url) as request: async with self.session.get(url) as request:
soup = IliasSoup(soupify(await request.read()), str(request.url)) soup = soupify(await request.read())
if IliasPage.is_logged_in(soup): if IliasPage.is_logged_in(soup):
return self._verify_page(soup, url, root_page_allowed) return self._verify_page(soup, url, root_page_allowed)
raise CrawlError(f"get_page failed even after authenticating on {url!r}") raise CrawlError(f"get_page failed even after authenticating on {url!r}")
@staticmethod @staticmethod
def _verify_page(soup: IliasSoup, url: str, root_page_allowed: bool) -> IliasSoup: def _verify_page(soup: BeautifulSoup, url: str, root_page_allowed: bool) -> BeautifulSoup:
if IliasPage.is_root_page(soup) and not root_page_allowed: if IliasPage.is_root_page(soup) and not root_page_allowed:
raise CrawlError( raise CrawlError(
"Unexpectedly encountered ILIAS root page. " "Unexpectedly encountered ILIAS root page. "
@@ -1015,15 +927,29 @@ instance's greatest bottleneck.
) )
return soup return soup
async def _post(self, url: str, data: dict[str, str | list[str]]) -> bytes: async def _post_authenticated(
self,
url: str,
data: dict[str, Union[str, List[str]]]
) -> bytes:
auth_id = await self._current_auth_id()
form_data = aiohttp.FormData() form_data = aiohttp.FormData()
for key, val in data.items(): for key, val in data.items():
form_data.add_field(key, val) form_data.add_field(key, val)
async with self.session.post(url, data=form_data()) as request: async with self.session.post(url, data=form_data(), allow_redirects=False) as request:
if request.status == 200: if request.status == 200:
return await request.read() return await request.read()
raise CrawlError(f"post failed with status {request.status}")
# We weren't authenticated, so try to do that
await self.authenticate(auth_id)
# Retry once after authenticating. If this fails, we will die.
async with self.session.post(url, data=data, allow_redirects=False) as request:
if request.status == 200:
return await request.read()
raise CrawlError("post_authenticated failed even after authenticating")
async def _get_authenticated(self, url: str) -> bytes: async def _get_authenticated(self, url: str) -> bytes:
auth_id = await self._current_auth_id() auth_id = await self._current_auth_id()
@@ -1053,22 +979,52 @@ instance's greatest bottleneck.
async with self.session.get(urljoin(self._base_url, "/login.php"), params=params) as request: async with self.session.get(urljoin(self._base_url, "/login.php"), params=params) as request:
login_page = soupify(await request.read()) login_page = soupify(await request.read())
login_form = login_page.find("form", attrs={"name": "login_form"}) login_form = login_page.find("form", attrs={"name": "formlogin"})
if login_form is None: if login_form is None:
raise CrawlError("Could not find the login form! Specified client id might be invalid.") raise CrawlError("Could not find the login form! Specified client id might be invalid.")
login_url = cast(Optional[str], login_form.attrs.get("action")) login_url = login_form.attrs.get("action")
if login_url is None: if login_url is None:
raise CrawlError("Could not find the action URL in the login form!") raise CrawlError("Could not find the action URL in the login form!")
username, password = await self._auth.credentials() username, password = await self._auth.credentials()
login_form_data = aiohttp.FormData() login_data = {
login_form_data.add_field("login_form/input_3/input_4", username) "username": username,
login_form_data.add_field("login_form/input_3/input_5", password) "password": password,
"cmd[doStandardAuthentication]": "Login",
}
# do the actual login # do the actual login
async with self.session.post(urljoin(self._base_url, login_url), data=login_form_data) as request: async with self.session.post(urljoin(self._base_url, login_url), data=login_data) as request:
soup = IliasSoup(soupify(await request.read()), str(request.url)) soup = soupify(await request.read())
if not IliasPage.is_logged_in(soup): if not self._is_logged_in(soup):
self._auth.invalidate_credentials() self._auth.invalidate_credentials()
@staticmethod
def _is_logged_in(soup: BeautifulSoup) -> bool:
# Normal ILIAS pages
mainbar: Optional[Tag] = soup.find(class_="il-maincontrols-metabar")
if mainbar is not None:
login_button = mainbar.find(attrs={"href": lambda x: x and "login.php" in x})
shib_login = soup.find(id="button_shib_login")
return not login_button and not shib_login
# Personal Desktop
if soup.find("a", attrs={"href": lambda x: x and "block_type=pditems" in x}):
return True
# Video listing embeds do not have complete ILIAS html. Try to match them by
# their video listing table
video_table = soup.find(
recursive=True,
name="table",
attrs={"id": lambda x: x is not None and x.startswith("tbl_xoct")}
)
if video_table is not None:
return True
# The individual video player wrapper page has nothing of the above.
# Match it by its playerContainer.
if soup.select_one("#playerContainer") is not None:
return True
return False

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
from typing import Literal from typing import Dict, Literal
from ...auth import Authenticator from ...auth import Authenticator
from ...config import Config from ...config import Config
@@ -26,7 +26,7 @@ class KitIliasWebCrawler(IliasWebCrawler):
name: str, name: str,
section: KitIliasWebCrawlerSection, section: KitIliasWebCrawlerSection,
config: Config, config: Config,
authenticators: dict[str, Authenticator], authenticators: Dict[str, Authenticator],
): ):
super().__init__(name, section, config, authenticators) super().__init__(name, section, config, authenticators)

View File

@@ -1,8 +1,8 @@
from typing import Any, Optional, cast from typing import Any, Optional
import aiohttp import aiohttp
import yarl import yarl
from bs4 import BeautifulSoup, Tag from bs4 import BeautifulSoup
from ...auth import Authenticator, TfaAuthenticator from ...auth import Authenticator, TfaAuthenticator
from ...logging import log from ...logging import log
@@ -38,7 +38,9 @@ class ShibbolethLogin:
async with sess.get(url) as response: async with sess.get(url) as response:
shib_url = response.url shib_url = response.url
if str(shib_url).startswith(self._ilias_url): if str(shib_url).startswith(self._ilias_url):
log.explain("ILIAS recognized our shib token and logged us in in the background, returning") log.explain(
"ILIAS recognized our shib token and logged us in in the background, returning"
)
return return
soup: BeautifulSoup = soupify(await response.read()) soup: BeautifulSoup = soupify(await response.read())
@@ -46,8 +48,8 @@ class ShibbolethLogin:
while not self._login_successful(soup): while not self._login_successful(soup):
# Searching the form here so that this fails before asking for # Searching the form here so that this fails before asking for
# credentials rather than after asking. # credentials rather than after asking.
form = cast(Tag, soup.find("form", {"method": "post"})) form = soup.find("form", {"method": "post"})
action = cast(str, form["action"]) action = form["action"]
# Equivalent: Enter credentials in # Equivalent: Enter credentials in
# https://idp.scc.kit.edu/idp/profile/SAML2/Redirect/SSO # https://idp.scc.kit.edu/idp/profile/SAML2/Redirect/SSO
@@ -57,10 +59,9 @@ class ShibbolethLogin:
"_eventId_proceed": "", "_eventId_proceed": "",
"j_username": username, "j_username": username,
"j_password": password, "j_password": password,
"fudis_web_authn_assertion_input": "",
} }
if csrf_token_input := form.find("input", {"name": "csrf_token"}): if csrf_token_input := form.find("input", {"name": "csrf_token"}):
data["csrf_token"] = csrf_token_input["value"] # type: ignore data["csrf_token"] = csrf_token_input["value"]
soup = await _post(sess, url, data) soup = await _post(sess, url, data)
if soup.find(id="attributeRelease"): if soup.find(id="attributeRelease"):
@@ -77,14 +78,14 @@ class ShibbolethLogin:
# Equivalent: Being redirected via JS automatically # Equivalent: Being redirected via JS automatically
# (or clicking "Continue" if you have JS disabled) # (or clicking "Continue" if you have JS disabled)
relay_state = cast(Tag, soup.find("input", {"name": "RelayState"})) relay_state = soup.find("input", {"name": "RelayState"})
saml_response = cast(Tag, soup.find("input", {"name": "SAMLResponse"})) saml_response = soup.find("input", {"name": "SAMLResponse"})
url = cast(str, cast(Tag, soup.find("form", {"method": "post"}))["action"]) url = form = soup.find("form", {"method": "post"})["action"]
data = { # using the info obtained in the while loop above data = { # using the info obtained in the while loop above
"RelayState": cast(str, relay_state["value"]), "RelayState": relay_state["value"],
"SAMLResponse": cast(str, saml_response["value"]), "SAMLResponse": saml_response["value"],
} }
await sess.post(cast(str, url), data=data) await sess.post(url, data=data)
async def _authenticate_tfa( async def _authenticate_tfa(
self, session: aiohttp.ClientSession, soup: BeautifulSoup, shib_url: yarl.URL self, session: aiohttp.ClientSession, soup: BeautifulSoup, shib_url: yarl.URL
@@ -96,8 +97,8 @@ class ShibbolethLogin:
# Searching the form here so that this fails before asking for # Searching the form here so that this fails before asking for
# credentials rather than after asking. # credentials rather than after asking.
form = cast(Tag, soup.find("form", {"method": "post"})) form = soup.find("form", {"method": "post"})
action = cast(str, form["action"]) action = form["action"]
# Equivalent: Enter token in # Equivalent: Enter token in
# https://idp.scc.kit.edu/idp/profile/SAML2/Redirect/SSO # https://idp.scc.kit.edu/idp/profile/SAML2/Redirect/SSO
@@ -105,10 +106,10 @@ class ShibbolethLogin:
username, password = await self._auth.credentials() username, password = await self._auth.credentials()
data = { data = {
"_eventId_proceed": "", "_eventId_proceed": "",
"fudis_otp_input": tfa_token, "j_tokenNumber": tfa_token,
} }
if csrf_token_input := form.find("input", {"name": "csrf_token"}): if csrf_token_input := form.find("input", {"name": "csrf_token"}):
data["csrf_token"] = csrf_token_input["value"] # type: ignore data["csrf_token"] = csrf_token_input["value"]
return await _post(session, url, data) return await _post(session, url, data)
@staticmethod @staticmethod
@@ -119,7 +120,7 @@ class ShibbolethLogin:
@staticmethod @staticmethod
def _tfa_required(soup: BeautifulSoup) -> bool: def _tfa_required(soup: BeautifulSoup) -> bool:
return soup.find(id="fudiscr-form") is not None return soup.find(id="j_tokenNumber") is not None
async def _post(session: aiohttp.ClientSession, url: str, data: Any) -> BeautifulSoup: async def _post(session: aiohttp.ClientSession, url: str, data: Any) -> BeautifulSoup:

View File

@@ -1,21 +1,17 @@
import os import os
import re import re
from collections.abc import Awaitable, Generator, Iterable
from dataclasses import dataclass from dataclasses import dataclass
from datetime import datetime from datetime import datetime
from pathlib import PurePath from pathlib import PurePath
from re import Pattern from typing import Any, Awaitable, Generator, Iterable, List, Optional, Pattern, Tuple, Union
from typing import Any, Optional, Union, cast
from urllib.parse import urljoin from urllib.parse import urljoin
import aiohttp
from bs4 import BeautifulSoup, Tag from bs4 import BeautifulSoup, Tag
from ..auth import Authenticator
from ..config import Config from ..config import Config
from ..logging import ProgressBar, log from ..logging import ProgressBar, log
from ..output_dir import FileSink from ..output_dir import FileSink
from ..utils import sanitize_path_name, soupify from ..utils import soupify
from .crawler import CrawlError from .crawler import CrawlError
from .http_crawler import HttpCrawler, HttpCrawlerSection from .http_crawler import HttpCrawler, HttpCrawlerSection
@@ -35,15 +31,6 @@ class KitIpdCrawlerSection(HttpCrawlerSection):
regex = self.s.get("link_regex", r"^.*?[^/]+\.(pdf|zip|c|cpp|java)$") regex = self.s.get("link_regex", r"^.*?[^/]+\.(pdf|zip|c|cpp|java)$")
return re.compile(regex) return re.compile(regex)
def basic_auth(self, authenticators: dict[str, Authenticator]) -> Optional[Authenticator]:
value: Optional[str] = self.s.get("auth")
if value is None:
return None
auth = authenticators.get(value)
if auth is None:
self.invalid_value("auth", value, "No such auth section exists")
return auth
@dataclass @dataclass
class KitIpdFile: class KitIpdFile:
@@ -57,7 +44,7 @@ class KitIpdFile:
@dataclass @dataclass
class KitIpdFolder: class KitIpdFolder:
name: str name: str
entries: list[Union[KitIpdFile, "KitIpdFolder"]] entries: List[Union[KitIpdFile, "KitIpdFolder"]]
def explain(self) -> None: def explain(self) -> None:
log.explain_topic(f"Folder {self.name!r}") log.explain_topic(f"Folder {self.name!r}")
@@ -66,29 +53,23 @@ class KitIpdFolder:
class KitIpdCrawler(HttpCrawler): class KitIpdCrawler(HttpCrawler):
def __init__( def __init__(
self, self,
name: str, name: str,
section: KitIpdCrawlerSection, section: KitIpdCrawlerSection,
config: Config, config: Config,
authenticators: dict[str, Authenticator],
): ):
super().__init__(name, section, config) super().__init__(name, section, config)
self._url = section.target() self._url = section.target()
self._file_regex = section.link_regex() self._file_regex = section.link_regex()
self._authenticator = section.basic_auth(authenticators)
self._basic_auth: Optional[aiohttp.BasicAuth] = None
async def _run(self) -> None: async def _run(self) -> None:
if self._authenticator:
username, password = await self._authenticator.credentials()
self._basic_auth = aiohttp.BasicAuth(username, password)
maybe_cl = await self.crawl(PurePath(".")) maybe_cl = await self.crawl(PurePath("."))
if not maybe_cl: if not maybe_cl:
return return
tasks: list[Awaitable[None]] = [] tasks: List[Awaitable[None]] = []
async with maybe_cl: async with maybe_cl:
for item in await self._fetch_items(): for item in await self._fetch_items():
@@ -106,7 +87,7 @@ class KitIpdCrawler(HttpCrawler):
await self.gather(tasks) await self.gather(tasks)
async def _crawl_folder(self, parent: PurePath, folder: KitIpdFolder) -> None: async def _crawl_folder(self, parent: PurePath, folder: KitIpdFolder) -> None:
path = parent / sanitize_path_name(folder.name) path = parent / folder.name
if not await self.crawl(path): if not await self.crawl(path):
return return
@@ -123,9 +104,13 @@ class KitIpdCrawler(HttpCrawler):
await self.gather(tasks) await self.gather(tasks)
async def _download_file( async def _download_file(
self, parent: PurePath, file: KitIpdFile, etag: Optional[str], mtime: Optional[datetime] self,
parent: PurePath,
file: KitIpdFile,
etag: Optional[str],
mtime: Optional[datetime]
) -> None: ) -> None:
element_path = parent / sanitize_path_name(file.name) element_path = parent / file.name
prev_etag = self._get_previous_etag_from_report(element_path) prev_etag = self._get_previous_etag_from_report(element_path)
etag_differs = None if prev_etag is None else prev_etag != etag etag_differs = None if prev_etag is None else prev_etag != etag
@@ -140,9 +125,9 @@ class KitIpdCrawler(HttpCrawler):
async with maybe_dl as (bar, sink): async with maybe_dl as (bar, sink):
await self._stream_from_url(file.url, element_path, sink, bar) await self._stream_from_url(file.url, element_path, sink, bar)
async def _fetch_items(self) -> Iterable[KitIpdFile | KitIpdFolder]: async def _fetch_items(self) -> Iterable[Union[KitIpdFile, KitIpdFolder]]:
page, url = await self.get_page() page, url = await self.get_page()
elements: list[Tag] = self._find_file_links(page) elements: List[Tag] = self._find_file_links(page)
# do not add unnecessary nesting for a single <h1> heading # do not add unnecessary nesting for a single <h1> heading
drop_h1: bool = len(page.find_all(name="h1")) <= 1 drop_h1: bool = len(page.find_all(name="h1")) <= 1
@@ -171,21 +156,16 @@ class KitIpdCrawler(HttpCrawler):
name = os.path.basename(url) name = os.path.basename(url)
return KitIpdFile(name, url) return KitIpdFile(name, url)
def _find_file_links(self, tag: Tag | BeautifulSoup) -> list[Tag]: def _find_file_links(self, tag: Union[Tag, BeautifulSoup]) -> List[Tag]:
return cast(list[Tag], tag.find_all(name="a", attrs={"href": self._file_regex})) return tag.findAll(name="a", attrs={"href": self._file_regex})
def _abs_url_from_link(self, url: str, link_tag: Tag) -> str: def _abs_url_from_link(self, url: str, link_tag: Tag) -> str:
return urljoin(url, cast(str, link_tag.get("href"))) return urljoin(url, link_tag.get("href"))
async def _stream_from_url(self, url: str, path: PurePath, sink: FileSink, bar: ProgressBar) -> None: async def _stream_from_url(self, url: str, path: PurePath, sink: FileSink, bar: ProgressBar) -> None:
async with self.session.get(url, allow_redirects=False, auth=self._basic_auth) as resp: async with self.session.get(url, allow_redirects=False) as resp:
if resp.status == 403: if resp.status == 403:
raise CrawlError("Received a 403. Are you within the KIT network/VPN?") raise CrawlError("Received a 403. Are you within the KIT network/VPN?")
if resp.status == 401:
raise CrawlError("Received a 401. Do you maybe need credentials?")
if resp.status >= 400:
raise CrawlError(f"Received HTTP {resp.status} when trying to download {url!r}")
if resp.content_length: if resp.content_length:
bar.set_total(resp.content_length) bar.set_total(resp.content_length)
@@ -197,8 +177,8 @@ class KitIpdCrawler(HttpCrawler):
self._add_etag_to_report(path, resp.headers.get("ETag")) self._add_etag_to_report(path, resp.headers.get("ETag"))
async def get_page(self) -> tuple[BeautifulSoup, str]: async def get_page(self) -> Tuple[BeautifulSoup, str]:
async with self.session.get(self._url, auth=self._basic_auth) as request: async with self.session.get(self._url) as request:
# The web page for Algorithmen für Routenplanung contains some # The web page for Algorithmen für Routenplanung contains some
# weird comments that beautifulsoup doesn't parse correctly. This # weird comments that beautifulsoup doesn't parse correctly. This
# hack enables those pages to be crawled, and should hopefully not # hack enables those pages to be crawled, and should hopefully not

View File

@@ -18,28 +18,31 @@ class LocalCrawlerSection(CrawlerSection):
def crawl_delay(self) -> float: def crawl_delay(self) -> float:
value = self.s.getfloat("crawl_delay", fallback=0.0) value = self.s.getfloat("crawl_delay", fallback=0.0)
if value < 0: if value < 0:
self.invalid_value("crawl_delay", value, "Must not be negative") self.invalid_value("crawl_delay", value,
"Must not be negative")
return value return value
def download_delay(self) -> float: def download_delay(self) -> float:
value = self.s.getfloat("download_delay", fallback=0.0) value = self.s.getfloat("download_delay", fallback=0.0)
if value < 0: if value < 0:
self.invalid_value("download_delay", value, "Must not be negative") self.invalid_value("download_delay", value,
"Must not be negative")
return value return value
def download_speed(self) -> Optional[int]: def download_speed(self) -> Optional[int]:
value = self.s.getint("download_speed") value = self.s.getint("download_speed")
if value is not None and value <= 0: if value is not None and value <= 0:
self.invalid_value("download_speed", value, "Must be greater than 0") self.invalid_value("download_speed", value,
"Must be greater than 0")
return value return value
class LocalCrawler(Crawler): class LocalCrawler(Crawler):
def __init__( def __init__(
self, self,
name: str, name: str,
section: LocalCrawlerSection, section: LocalCrawlerSection,
config: Config, config: Config,
): ):
super().__init__(name, section, config) super().__init__(name, section, config)
@@ -71,12 +74,10 @@ class LocalCrawler(Crawler):
tasks = [] tasks = []
async with cl: async with cl:
await asyncio.sleep( await asyncio.sleep(random.uniform(
random.uniform( 0.5 * self._crawl_delay,
0.5 * self._crawl_delay, self._crawl_delay,
self._crawl_delay, ))
)
)
for child in path.iterdir(): for child in path.iterdir():
pure_child = cl.path / child.name pure_child = cl.path / child.name
@@ -92,12 +93,10 @@ class LocalCrawler(Crawler):
return return
async with dl as (bar, sink): async with dl as (bar, sink):
await asyncio.sleep( await asyncio.sleep(random.uniform(
random.uniform( 0.5 * self._download_delay,
0.5 * self._download_delay, self._download_delay,
self._download_delay, ))
)
)
bar.set_total(stat.st_size) bar.set_total(stat.st_size)

View File

@@ -1,5 +1,5 @@
from collections.abc import Iterator
from pathlib import PurePath from pathlib import PurePath
from typing import Iterator, Set
from .logging import log from .logging import log
from .utils import fmt_path from .utils import fmt_path
@@ -16,34 +16,15 @@ def name_variants(path: PurePath) -> Iterator[PurePath]:
class Deduplicator: class Deduplicator:
FORBIDDEN_CHARS = '<>:"/\\|?*' + "".join([chr(i) for i in range(0, 32)]) FORBIDDEN_CHARS = '<>:"/\\|?*' + "".join([chr(i) for i in range(0, 32)])
FORBIDDEN_NAMES = { FORBIDDEN_NAMES = {
"CON", "CON", "PRN", "AUX", "NUL",
"PRN", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"AUX", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
} }
def __init__(self, windows_paths: bool) -> None: def __init__(self, windows_paths: bool) -> None:
self._windows_paths = windows_paths self._windows_paths = windows_paths
self._known: set[PurePath] = set() self._known: Set[PurePath] = set()
def _add(self, path: PurePath) -> None: def _add(self, path: PurePath) -> None:
self._known.add(path) self._known.add(path)

View File

@@ -1,9 +1,8 @@
import asyncio import asyncio
import time import time
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional from typing import AsyncIterator, Optional
@dataclass @dataclass
@@ -13,7 +12,12 @@ class Slot:
class Limiter: class Limiter:
def __init__(self, task_limit: int, download_limit: int, task_delay: float): def __init__(
self,
task_limit: int,
download_limit: int,
task_delay: float
):
if task_limit <= 0: if task_limit <= 0:
raise ValueError("task limit must be at least 1") raise ValueError("task limit must be at least 1")
if download_limit <= 0: if download_limit <= 0:

View File

@@ -1,23 +1,16 @@
import asyncio import asyncio
import sys import sys
import traceback import traceback
from collections.abc import AsyncIterator, Iterator from contextlib import asynccontextmanager, contextmanager
from contextlib import AbstractContextManager, asynccontextmanager, contextmanager # TODO In Python 3.9 and above, ContextManager is deprecated
from typing import Any, Optional from typing import AsyncIterator, ContextManager, Iterator, List, Optional
from rich.console import Console, Group from rich.console import Console, Group
from rich.live import Live from rich.live import Live
from rich.markup import escape from rich.markup import escape
from rich.panel import Panel from rich.panel import Panel
from rich.progress import ( from rich.progress import (BarColumn, DownloadColumn, Progress, TaskID, TextColumn, TimeRemainingColumn,
BarColumn, TransferSpeedColumn)
DownloadColumn,
Progress,
TaskID,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
from rich.table import Column from rich.table import Column
@@ -61,7 +54,7 @@ class Log:
self._showing_progress = False self._showing_progress = False
self._progress_suspended = False self._progress_suspended = False
self._lock = asyncio.Lock() self._lock = asyncio.Lock()
self._lines: list[str] = [] self._lines: List[str] = []
# Whether different parts of the output are enabled or disabled # Whether different parts of the output are enabled or disabled
self.output_explain = False self.output_explain = False
@@ -122,7 +115,7 @@ class Log:
for line in self._lines: for line in self._lines:
self.print(line) self.print(line)
def print(self, text: Any) -> None: def print(self, text: str) -> None:
""" """
Print a normal message. Allows markup. Print a normal message. Allows markup.
""" """
@@ -184,14 +177,10 @@ class Log:
# Our print function doesn't take types other than strings, but the # Our print function doesn't take types other than strings, but the
# underlying rich.print function does. This call is a special case # underlying rich.print function does. This call is a special case
# anyways, and we're calling it internally, so this should be fine. # anyways, and we're calling it internally, so this should be fine.
self.print( self.print(Panel.fit("""
Panel.fit(
"""
Please copy your program output and send it to the PFERD maintainers, either Please copy your program output and send it to the PFERD maintainers, either
directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new
""".strip() """.strip())) # type: ignore
)
)
def explain_topic(self, text: str) -> None: def explain_topic(self, text: str) -> None:
""" """
@@ -248,10 +237,10 @@ directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new
@contextmanager @contextmanager
def _bar( def _bar(
self, self,
progress: Progress, progress: Progress,
description: str, description: str,
total: Optional[float], total: Optional[float],
) -> Iterator[ProgressBar]: ) -> Iterator[ProgressBar]:
if total is None: if total is None:
# Indeterminate progress bar # Indeterminate progress bar
@@ -267,12 +256,12 @@ directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new
self._update_live() self._update_live()
def crawl_bar( def crawl_bar(
self, self,
style: str, style: str,
action: str, action: str,
text: str, text: str,
total: Optional[float] = None, total: Optional[float] = None,
) -> AbstractContextManager[ProgressBar]: ) -> ContextManager[ProgressBar]:
""" """
Allows markup in the "style" argument which will be applied to the Allows markup in the "style" argument which will be applied to the
"action" string. "action" string.
@@ -283,12 +272,12 @@ directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new
return self._bar(self._crawl_progress, description, total) return self._bar(self._crawl_progress, description, total)
def download_bar( def download_bar(
self, self,
style: str, style: str,
action: str, action: str,
text: str, text: str,
total: Optional[float] = None, total: Optional[float] = None,
) -> AbstractContextManager[ProgressBar]: ) -> ContextManager[ProgressBar]:
""" """
Allows markup in the "style" argument which will be applied to the Allows markup in the "style" argument which will be applied to the
"action" string. "action" string.

View File

@@ -4,13 +4,12 @@ import os
import random import random
import shutil import shutil
import string import string
from collections.abc import Iterator from contextlib import contextmanager
from contextlib import contextmanager, suppress
from dataclasses import dataclass from dataclasses import dataclass
from datetime import datetime from datetime import datetime
from enum import Enum from enum import Enum
from pathlib import Path, PurePath from pathlib import Path, PurePath
from typing import BinaryIO, Optional from typing import BinaryIO, Iterator, Optional, Tuple
from .logging import log from .logging import log
from .report import Report, ReportLoadError from .report import Report, ReportLoadError
@@ -36,7 +35,8 @@ class Redownload(Enum):
try: try:
return Redownload(string) return Redownload(string)
except ValueError: except ValueError:
raise ValueError("must be one of 'never', 'never-smart', 'always', 'always-smart'") from None raise ValueError("must be one of 'never', 'never-smart',"
" 'always', 'always-smart'")
class OnConflict(Enum): class OnConflict(Enum):
@@ -51,10 +51,8 @@ class OnConflict(Enum):
try: try:
return OnConflict(string) return OnConflict(string)
except ValueError: except ValueError:
raise ValueError( raise ValueError("must be one of 'prompt', 'local-first',"
"must be one of 'prompt', 'local-first'," " 'remote-first', 'no-delete', 'no-delete-prompt-overwrite'")
" 'remote-first', 'no-delete', 'no-delete-prompt-overwrite'"
) from None
@dataclass @dataclass
@@ -98,13 +96,13 @@ class FileSinkToken(ReusableAsyncContextManager[FileSink]):
# download handed back to the OutputDirectory. # download handed back to the OutputDirectory.
def __init__( def __init__(
self, self,
output_dir: "OutputDirectory", output_dir: "OutputDirectory",
remote_path: PurePath, remote_path: PurePath,
path: PurePath, path: PurePath,
local_path: Path, local_path: Path,
heuristics: Heuristics, heuristics: Heuristics,
on_conflict: OnConflict, on_conflict: OnConflict,
): ):
super().__init__() super().__init__()
@@ -120,17 +118,15 @@ class FileSinkToken(ReusableAsyncContextManager[FileSink]):
sink = FileSink(file) sink = FileSink(file)
async def after_download() -> None: async def after_download() -> None:
await self._output_dir._after_download( await self._output_dir._after_download(DownloadInfo(
DownloadInfo( self._remote_path,
self._remote_path, self._path,
self._path, self._local_path,
self._local_path, tmp_path,
tmp_path, self._heuristics,
self._heuristics, self._on_conflict,
self._on_conflict, sink.is_done(),
sink.is_done(), ))
)
)
self._stack.push_async_callback(after_download) self._stack.push_async_callback(after_download)
self._stack.enter_context(file) self._stack.enter_context(file)
@@ -142,10 +138,10 @@ class OutputDirectory:
REPORT_FILE = PurePath(".report") REPORT_FILE = PurePath(".report")
def __init__( def __init__(
self, self,
root: Path, root: Path,
redownload: Redownload, redownload: Redownload,
on_conflict: OnConflict, on_conflict: OnConflict,
): ):
if os.name == "nt": if os.name == "nt":
# Windows limits the path length to 260 for some historical reason. # Windows limits the path length to 260 for some historical reason.
@@ -178,8 +174,8 @@ class OutputDirectory:
try: try:
self._root.mkdir(parents=True, exist_ok=True) self._root.mkdir(parents=True, exist_ok=True)
except OSError as e: except OSError:
raise OutputDirError("Failed to create base directory") from e raise OutputDirError("Failed to create base directory")
def register_reserved(self, path: PurePath) -> None: def register_reserved(self, path: PurePath) -> None:
self._report.mark_reserved(path) self._report.mark_reserved(path)
@@ -197,11 +193,11 @@ class OutputDirectory:
return self._root / path return self._root / path
def _should_download( def _should_download(
self, self,
local_path: Path, local_path: Path,
heuristics: Heuristics, heuristics: Heuristics,
redownload: Redownload, redownload: Redownload,
on_conflict: OnConflict, on_conflict: OnConflict,
) -> bool: ) -> bool:
if not local_path.exists(): if not local_path.exists():
log.explain("No corresponding file present locally") log.explain("No corresponding file present locally")
@@ -274,9 +270,9 @@ class OutputDirectory:
# files. # files.
async def _conflict_lfrf( async def _conflict_lfrf(
self, self,
on_conflict: OnConflict, on_conflict: OnConflict,
path: PurePath, path: PurePath,
) -> bool: ) -> bool:
if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}: if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}:
async with log.exclusive_output(): async with log.exclusive_output():
@@ -293,9 +289,9 @@ class OutputDirectory:
raise ValueError(f"{on_conflict!r} is not a valid conflict policy") raise ValueError(f"{on_conflict!r} is not a valid conflict policy")
async def _conflict_ldrf( async def _conflict_ldrf(
self, self,
on_conflict: OnConflict, on_conflict: OnConflict,
path: PurePath, path: PurePath,
) -> bool: ) -> bool:
if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}: if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}:
async with log.exclusive_output(): async with log.exclusive_output():
@@ -312,10 +308,10 @@ class OutputDirectory:
raise ValueError(f"{on_conflict!r} is not a valid conflict policy") raise ValueError(f"{on_conflict!r} is not a valid conflict policy")
async def _conflict_lfrd( async def _conflict_lfrd(
self, self,
on_conflict: OnConflict, on_conflict: OnConflict,
path: PurePath, path: PurePath,
parent: PurePath, parent: PurePath,
) -> bool: ) -> bool:
if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}: if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}:
async with log.exclusive_output(): async with log.exclusive_output():
@@ -332,9 +328,9 @@ class OutputDirectory:
raise ValueError(f"{on_conflict!r} is not a valid conflict policy") raise ValueError(f"{on_conflict!r} is not a valid conflict policy")
async def _conflict_delete_lf( async def _conflict_delete_lf(
self, self,
on_conflict: OnConflict, on_conflict: OnConflict,
path: PurePath, path: PurePath,
) -> bool: ) -> bool:
if on_conflict == OnConflict.PROMPT: if on_conflict == OnConflict.PROMPT:
async with log.exclusive_output(): async with log.exclusive_output():
@@ -357,9 +353,9 @@ class OutputDirectory:
return base.parent / name return base.parent / name
async def _create_tmp_file( async def _create_tmp_file(
self, self,
local_path: Path, local_path: Path,
) -> tuple[Path, BinaryIO]: ) -> Tuple[Path, BinaryIO]:
""" """
May raise an OutputDirError. May raise an OutputDirError.
""" """
@@ -375,31 +371,15 @@ class OutputDirectory:
raise OutputDirError("Failed to create temporary file") raise OutputDirError("Failed to create temporary file")
def should_try_download(
self,
path: PurePath,
*,
etag_differs: Optional[bool] = None,
mtime: Optional[datetime] = None,
redownload: Optional[Redownload] = None,
on_conflict: Optional[OnConflict] = None,
) -> bool:
heuristics = Heuristics(etag_differs, mtime)
redownload = self._redownload if redownload is None else redownload
on_conflict = self._on_conflict if on_conflict is None else on_conflict
local_path = self.resolve(path)
return self._should_download(local_path, heuristics, redownload, on_conflict)
async def download( async def download(
self, self,
remote_path: PurePath, remote_path: PurePath,
path: PurePath, path: PurePath,
*, *,
etag_differs: Optional[bool] = None, etag_differs: Optional[bool] = None,
mtime: Optional[datetime] = None, mtime: Optional[datetime] = None,
redownload: Optional[Redownload] = None, redownload: Optional[Redownload] = None,
on_conflict: Optional[OnConflict] = None, on_conflict: Optional[OnConflict] = None,
) -> Optional[FileSinkToken]: ) -> Optional[FileSinkToken]:
""" """
May throw an OutputDirError, a MarkDuplicateError or a May throw an OutputDirError, a MarkDuplicateError or a
@@ -510,8 +490,10 @@ class OutputDirectory:
await self._cleanup(child, pure_child) await self._cleanup(child, pure_child)
if delete_self: if delete_self:
with suppress(OSError): try:
path.rmdir() path.rmdir()
except OSError:
pass
async def _cleanup_file(self, path: Path, pure: PurePath) -> None: async def _cleanup_file(self, path: Path, pure: PurePath) -> None:
if self._report.is_marked(pure): if self._report.is_marked(pure):

View File

@@ -1,5 +1,6 @@
from pathlib import Path, PurePath from pathlib import Path, PurePath
from typing import Optional from typing import Dict, List, Optional
from urllib.parse import quote
from rich.markup import escape from rich.markup import escape
@@ -15,7 +16,7 @@ class PferdLoadError(Exception):
class Pferd: class Pferd:
def __init__(self, config: Config, cli_crawlers: Optional[list[str]], cli_skips: Optional[list[str]]): def __init__(self, config: Config, cli_crawlers: Optional[List[str]], cli_skips: Optional[List[str]]):
""" """
May throw PferdLoadError. May throw PferdLoadError.
""" """
@@ -23,10 +24,10 @@ class Pferd:
self._config = config self._config = config
self._crawlers_to_run = self._find_crawlers_to_run(config, cli_crawlers, cli_skips) self._crawlers_to_run = self._find_crawlers_to_run(config, cli_crawlers, cli_skips)
self._authenticators: dict[str, Authenticator] = {} self._authenticators: Dict[str, Authenticator] = {}
self._crawlers: dict[str, Crawler] = {} self._crawlers: Dict[str, Crawler] = {}
def _find_config_crawlers(self, config: Config) -> list[str]: def _find_config_crawlers(self, config: Config) -> List[str]:
crawl_sections = [] crawl_sections = []
for name, section in config.crawl_sections(): for name, section in config.crawl_sections():
@@ -37,7 +38,7 @@ class Pferd:
return crawl_sections return crawl_sections
def _find_cli_crawlers(self, config: Config, cli_crawlers: list[str]) -> list[str]: def _find_cli_crawlers(self, config: Config, cli_crawlers: List[str]) -> List[str]:
if len(cli_crawlers) != len(set(cli_crawlers)): if len(cli_crawlers) != len(set(cli_crawlers)):
raise PferdLoadError("Some crawlers were selected multiple times") raise PferdLoadError("Some crawlers were selected multiple times")
@@ -66,14 +67,14 @@ class Pferd:
return crawlers_to_run return crawlers_to_run
def _find_crawlers_to_run( def _find_crawlers_to_run(
self, self,
config: Config, config: Config,
cli_crawlers: Optional[list[str]], cli_crawlers: Optional[List[str]],
cli_skips: Optional[list[str]], cli_skips: Optional[List[str]],
) -> list[str]: ) -> List[str]:
log.explain_topic("Deciding which crawlers to run") log.explain_topic("Deciding which crawlers to run")
crawlers: list[str] crawlers: List[str]
if cli_crawlers is None: if cli_crawlers is None:
log.explain("No crawlers specified on CLI") log.explain("No crawlers specified on CLI")
log.explain("Running crawlers specified in config") log.explain("Running crawlers specified in config")
@@ -104,7 +105,7 @@ class Pferd:
def _load_crawlers(self) -> None: def _load_crawlers(self) -> None:
# Cookie sharing # Cookie sharing
kit_ilias_web_paths: dict[Authenticator, list[Path]] = {} kit_ilias_web_paths: Dict[Authenticator, List[Path]] = {}
for name, section in self._config.crawl_sections(): for name, section in self._config.crawl_sections():
log.print(f"[bold bright_cyan]Loading[/] {escape(name)}") log.print(f"[bold bright_cyan]Loading[/] {escape(name)}")
@@ -117,8 +118,9 @@ class Pferd:
crawler = crawler_constructor(name, section, self._config, self._authenticators) crawler = crawler_constructor(name, section, self._config, self._authenticators)
self._crawlers[name] = crawler self._crawlers[name] = crawler
if self._config.default_section.share_cookies() and isinstance(crawler, KitIliasWebCrawler): if self._config.default_section.share_cookies():
crawler.share_cookies(kit_ilias_web_paths) if isinstance(crawler, KitIliasWebCrawler):
crawler.share_cookies(kit_ilias_web_paths)
def debug_transforms(self) -> None: def debug_transforms(self) -> None:
for name in self._crawlers_to_run: for name in self._crawlers_to_run:
@@ -160,17 +162,18 @@ class Pferd:
def print_report(self) -> None: def print_report(self) -> None:
for name in self._crawlers_to_run: for name in self._crawlers_to_run:
crawlerOpt = self._crawlers.get(name) crawler = self._crawlers.get(name)
if crawlerOpt is None: if crawler is None:
continue # Crawler failed to load continue # Crawler failed to load
crawler = crawlerOpt
log.report("") log.report("")
log.report(f"[bold bright_cyan]Report[/] for {escape(name)}") log.report(f"[bold bright_cyan]Report[/] for {escape(name)}")
def fmt_path_link(relative_path: PurePath) -> str: def fmt_path_link(relative_path: PurePath) -> str:
# We need to URL-encode the path because it might contain spaces or special characters # We need to URL-encode the path because it might contain spaces or special characters
link = crawler.output_dir.resolve(relative_path).absolute().as_uri() absolute_path = str(crawler.output_dir.resolve(relative_path).absolute())
absolute_path = absolute_path.replace("\\\\?\\", "")
link = f"file://{quote(absolute_path)}"
return f"[link={link}]{fmt_path(relative_path)}[/link]" return f"[link={link}]{fmt_path(relative_path)}[/link]"
something_changed = False something_changed = False

View File

@@ -1,6 +1,6 @@
import json import json
from pathlib import Path, PurePath from pathlib import Path, PurePath
from typing import Any, Optional from typing import Any, Dict, List, Optional, Set
class ReportLoadError(Exception): class ReportLoadError(Exception):
@@ -34,6 +34,15 @@ class MarkConflictError(Exception):
self.collides_with = collides_with self.collides_with = collides_with
# TODO Use PurePath.is_relative_to when updating to 3.9
def is_relative_to(a: PurePath, b: PurePath) -> bool:
try:
a.relative_to(b)
return True
except ValueError:
return False
class Report: class Report:
""" """
A report of a synchronization. Includes all files found by the crawler, as A report of a synchronization. Includes all files found by the crawler, as
@@ -42,32 +51,32 @@ class Report:
def __init__(self) -> None: def __init__(self) -> None:
# Paths found by the crawler, untransformed # Paths found by the crawler, untransformed
self.found_paths: set[PurePath] = set() self.found_paths: Set[PurePath] = set()
# Files reserved for metadata files (e. g. the report file or cookies) # Files reserved for metadata files (e. g. the report file or cookies)
# that can't be overwritten by user transforms and won't be cleaned up # that can't be overwritten by user transforms and won't be cleaned up
# at the end. # at the end.
self.reserved_files: set[PurePath] = set() self.reserved_files: Set[PurePath] = set()
# Files found by the crawler, transformed. Only includes files that # Files found by the crawler, transformed. Only includes files that
# were downloaded (or a download was attempted) # were downloaded (or a download was attempted)
self.known_files: set[PurePath] = set() self.known_files: Set[PurePath] = set()
self.added_files: set[PurePath] = set() self.added_files: Set[PurePath] = set()
self.changed_files: set[PurePath] = set() self.changed_files: Set[PurePath] = set()
self.deleted_files: set[PurePath] = set() self.deleted_files: Set[PurePath] = set()
# Files that should have been deleted by the cleanup but weren't # Files that should have been deleted by the cleanup but weren't
self.not_deleted_files: set[PurePath] = set() self.not_deleted_files: Set[PurePath] = set()
# Custom crawler-specific data # Custom crawler-specific data
self.custom: dict[str, Any] = dict() self.custom: Dict[str, Any] = dict()
# Encountered errors and warnings # Encountered errors and warnings
self.encountered_warnings: list[str] = [] self.encountered_warnings: List[str] = []
self.encountered_errors: list[str] = [] self.encountered_errors: List[str] = []
@staticmethod @staticmethod
def _get_list_of_strs(data: dict[str, Any], key: str) -> list[str]: def _get_list_of_strs(data: Dict[str, Any], key: str) -> List[str]:
result: Any = data.get(key, []) result: Any = data.get(key, [])
if not isinstance(result, list): if not isinstance(result, list):
@@ -80,8 +89,8 @@ class Report:
return result return result
@staticmethod @staticmethod
def _get_str_dictionary(data: dict[str, Any], key: str) -> dict[str, Any]: def _get_str_dictionary(data: Dict[str, Any], key: str) -> Dict[str, Any]:
result: dict[str, Any] = data.get(key, {}) result: Dict[str, Any] = data.get(key, {})
if not isinstance(result, dict): if not isinstance(result, dict):
raise ReportLoadError(f"Incorrect format: {key!r} is not a dictionary") raise ReportLoadError(f"Incorrect format: {key!r} is not a dictionary")
@@ -164,13 +173,13 @@ class Report:
if path == other: if path == other:
raise MarkDuplicateError(path) raise MarkDuplicateError(path)
if path.is_relative_to(other) or other.is_relative_to(path): if is_relative_to(path, other) or is_relative_to(other, path):
raise MarkConflictError(path, other) raise MarkConflictError(path, other)
self.known_files.add(path) self.known_files.add(path)
@property @property
def marked(self) -> set[PurePath]: def marked(self) -> Set[PurePath]:
return self.known_files | self.reserved_files return self.known_files | self.reserved_files
def is_marked(self, path: PurePath) -> bool: def is_marked(self, path: PurePath) -> bool:

View File

@@ -1,12 +1,10 @@
import ast import ast
import contextlib
import re import re
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections.abc import Callable, Sequence
from dataclasses import dataclass from dataclasses import dataclass
from enum import Enum from enum import Enum
from pathlib import PurePath from pathlib import PurePath
from typing import Optional, TypeVar from typing import Callable, Dict, List, Optional, Sequence, TypeVar, Union
from .logging import log from .logging import log
from .utils import fmt_path, str_path from .utils import fmt_path, str_path
@@ -25,7 +23,7 @@ class Empty:
pass pass
RightSide = str | Ignore | Empty RightSide = Union[str, Ignore, Empty]
@dataclass @dataclass
@@ -37,7 +35,7 @@ class Ignored:
pass pass
TransformResult = Transformed | Ignored | None TransformResult = Optional[Union[Transformed, Ignored]]
@dataclass @dataclass
@@ -49,7 +47,7 @@ class Rule:
right: RightSide right: RightSide
right_index: int right_index: int
def right_result(self, path: PurePath) -> str | Transformed | Ignored: def right_result(self, path: PurePath) -> Union[str, Transformed, Ignored]:
if isinstance(self.right, str): if isinstance(self.right, str):
return self.right return self.right
elif isinstance(self.right, Ignore): elif isinstance(self.right, Ignore):
@@ -95,20 +93,24 @@ class ExactReTf(Transformation):
# since elements of "match.groups()" can be None, mypy is wrong. # since elements of "match.groups()" can be None, mypy is wrong.
groups: Sequence[Optional[str]] = [match[0]] + list(match.groups()) groups: Sequence[Optional[str]] = [match[0]] + list(match.groups())
locals_dir: dict[str, str | int | float] = {} locals_dir: Dict[str, Union[str, int, float]] = {}
for i, group in enumerate(groups): for i, group in enumerate(groups):
if group is None: if group is None:
continue continue
locals_dir[f"g{i}"] = group locals_dir[f"g{i}"] = group
with contextlib.suppress(ValueError): try:
locals_dir[f"i{i}"] = int(group) locals_dir[f"i{i}"] = int(group)
except ValueError:
pass
with contextlib.suppress(ValueError): try:
locals_dir[f"f{i}"] = float(group) locals_dir[f"f{i}"] = float(group)
except ValueError:
pass
named_groups: dict[str, str] = match.groupdict() named_groups: Dict[str, str] = match.groupdict()
for name, capture in named_groups.items(): for name, capture in named_groups.items():
locals_dir[name] = capture locals_dir[name] = capture
@@ -206,7 +208,7 @@ class Line:
@property @property
def rest(self) -> str: def rest(self) -> str:
return self.line[self.index :] return self.line[self.index:]
def peek(self, amount: int = 1) -> str: def peek(self, amount: int = 1) -> str:
return self.rest[:amount] return self.rest[:amount]
@@ -226,7 +228,7 @@ class Line:
self.expect(string) self.expect(string)
return value return value
def one_of(self, parsers: list[Callable[[], T]], description: str) -> T: def one_of(self, parsers: List[Callable[[], T]], description: str) -> T:
for parser in parsers: for parser in parsers:
index = self.index index = self.index
try: try:
@@ -313,7 +315,7 @@ def parse_left(line: Line) -> str:
return parse_str(line) return parse_str(line)
def parse_right(line: Line) -> str | Ignore: def parse_right(line: Line) -> Union[str, Ignore]:
c = line.peek() c = line.peek()
if c in QUOTATION_MARKS: if c in QUOTATION_MARKS:
return parse_quoted_str(line) return parse_quoted_str(line)
@@ -325,27 +327,21 @@ def parse_right(line: Line) -> str | Ignore:
def parse_arrow_name(line: Line) -> str: def parse_arrow_name(line: Line) -> str:
return line.one_of( return line.one_of([
[ lambda: line.expect("exact-re"),
lambda: line.expect("exact-re"), lambda: line.expect("exact"),
lambda: line.expect("exact"), lambda: line.expect("name-re"),
lambda: line.expect("name-re"), lambda: line.expect("name"),
lambda: line.expect("name"), lambda: line.expect("re"),
lambda: line.expect("re"), lambda: line.expect(""),
lambda: line.expect(""), ], "Expected arrow name")
],
"Expected arrow name",
)
def parse_arrow_head(line: Line) -> ArrowHead: def parse_arrow_head(line: Line) -> ArrowHead:
return line.one_of( return line.one_of([
[ lambda: line.expect_with(">>", ArrowHead.SEQUENCE),
lambda: line.expect_with(">>", ArrowHead.SEQUENCE), lambda: line.expect_with(">", ArrowHead.NORMAL),
lambda: line.expect_with(">", ArrowHead.NORMAL), ], "Expected arrow head")
],
"Expected arrow head",
)
def parse_eol(line: Line) -> None: def parse_eol(line: Line) -> None:
@@ -417,12 +413,12 @@ class Transformer:
def transform(self, path: PurePath) -> Optional[PurePath]: def transform(self, path: PurePath) -> Optional[PurePath]:
for i, (line, tf) in enumerate(self._tfs): for i, (line, tf) in enumerate(self._tfs):
log.explain(f"Testing rule {i + 1}: {line}") log.explain(f"Testing rule {i+1}: {line}")
try: try:
result = tf.transform(path) result = tf.transform(path)
except Exception as e: except Exception as e:
log.warn(f"Error while testing rule {i + 1}: {line}") log.warn(f"Error while testing rule {i+1}: {line}")
log.warn_contd(str(e)) log.warn_contd(str(e))
continue continue

View File

@@ -3,11 +3,10 @@ import getpass
import sys import sys
import threading import threading
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections.abc import Callable
from contextlib import AsyncExitStack from contextlib import AsyncExitStack
from pathlib import Path, PurePath from pathlib import Path, PurePath
from types import TracebackType from types import TracebackType
from typing import Any, Generic, Optional, TypeVar from typing import Any, Callable, Dict, Generic, Optional, Type, TypeVar
from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit
import bs4 import bs4
@@ -80,7 +79,7 @@ def url_set_query_param(url: str, param: str, value: str) -> str:
return urlunsplit((scheme, netloc, path, new_query_string, fragment)) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def url_set_query_params(url: str, params: dict[str, str]) -> str: def url_set_query_params(url: str, params: Dict[str, str]) -> str:
""" """
Sets multiple query parameters in an url, overwriting existing ones. Sets multiple query parameters in an url, overwriting existing ones.
""" """
@@ -106,10 +105,6 @@ def fmt_real_path(path: Path) -> str:
return repr(str(path.absolute())) return repr(str(path.absolute()))
def sanitize_path_name(name: str) -> str:
return name.replace("/", "-").replace("\\", "-").strip()
class ReusableAsyncContextManager(ABC, Generic[T]): class ReusableAsyncContextManager(ABC, Generic[T]):
def __init__(self) -> None: def __init__(self) -> None:
self._active = False self._active = False
@@ -129,17 +124,17 @@ class ReusableAsyncContextManager(ABC, Generic[T]):
# See https://stackoverflow.com/a/13075071 # See https://stackoverflow.com/a/13075071
try: try:
result: T = await self._on_aenter() result: T = await self._on_aenter()
return result except: # noqa: E722 do not use bare 'except'
except:
if not await self.__aexit__(*sys.exc_info()): if not await self.__aexit__(*sys.exc_info()):
raise raise
raise
return result
async def __aexit__( async def __aexit__(
self, self,
exc_type: Optional[type[BaseException]], exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException], exc_value: Optional[BaseException],
traceback: Optional[TracebackType], traceback: Optional[TracebackType],
) -> Optional[bool]: ) -> Optional[bool]:
if not self._active: if not self._active:
raise RuntimeError("__aexit__ called too many times") raise RuntimeError("__aexit__ called too many times")

View File

@@ -1,2 +1,2 @@
NAME = "PFERD" NAME = "PFERD"
VERSION = "3.8.3" VERSION = "3.7.0"

View File

@@ -17,7 +17,7 @@ Binaries for Linux, Windows and Mac can be downloaded directly from the
### With pip ### With pip
Ensure you have at least Python 3.11 installed. Run the following command to Ensure you have at least Python 3.9 installed. Run the following command to
install PFERD or upgrade it to the latest version: install PFERD or upgrade it to the latest version:
``` ```

8
flake.lock generated
View File

@@ -2,16 +2,16 @@
"nodes": { "nodes": {
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1760725957, "lastModified": 1708979614,
"narHash": "sha256-tdoIhL/NlER290HfSjOkgi4jfmjeqmqrzgnmiMtGepE=", "narHash": "sha256-FWLWmYojIg6TeqxSnHkKpHu5SGnFP5um1uUjH+wRV6g=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "81b927b14b7b3988334d5282ef9cba802e193fe1", "rev": "b7ee09cf5614b02d289cd86fcfa6f24d4e078c2a",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "nixos-25.05", "ref": "nixos-23.11",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }

View File

@@ -2,7 +2,7 @@
description = "Tool for downloading course-related files from ILIAS"; description = "Tool for downloading course-related files from ILIAS";
inputs = { inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05"; nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11";
}; };
outputs = { self, nixpkgs }: outputs = { self, nixpkgs }:

View File

@@ -12,7 +12,7 @@ dependencies = [
"certifi>=2021.10.8" "certifi>=2021.10.8"
] ]
dynamic = ["version"] dynamic = ["version"]
requires-python = ">=3.11" requires-python = ">=3.9"
[project.scripts] [project.scripts]
pferd = "PFERD.__main__:main" pferd = "PFERD.__main__:main"
@@ -20,33 +20,23 @@ pferd = "PFERD.__main__:main"
[tool.setuptools.dynamic] [tool.setuptools.dynamic]
version = {attr = "PFERD.version.VERSION"} version = {attr = "PFERD.version.VERSION"}
[tool.ruff] [tool.flake8]
line-length = 110 max-line-length = 110
[tool.ruff.lint] [tool.isort]
select = [ line_length = 110
# pycodestyle
"E",
# Pyflakes
"F",
# pyupgrade
"UP",
# flake8-bugbear
"B",
# flake8-simplify
"SIM",
# isort
"I",
]
ignore = [
"UP045",
"SIM114",
"B023"
]
[dependency-groups] [tool.autopep8]
dev = [ max_line_length = 110
"pyinstaller>=6.16.0", in-place = true
"pyright>=1.1.406", recursive = true
"ruff>=0.14.1",
] [tool.mypy]
disallow_any_generics = true
disallow_untyped_defs = true
disallow_incomplete_defs = true
no_implicit_optional = true
warn_unused_ignores = true
warn_unreachable = true
show_error_context = true
ignore_missing_imports = true

View File

@@ -2,4 +2,4 @@
set -e set -e
uv run pyinstaller --onefile pferd.py pyinstaller --onefile pferd.py

View File

@@ -2,5 +2,5 @@
set -e set -e
uv run pyright . mypy .
uv run ruff check flake8 PFERD

View File

@@ -2,4 +2,5 @@
set -e set -e
uv run ruff format autopep8 .
isort .

1056
uv.lock generated

File diff suppressed because it is too large Load Diff