mirror of
https://github.com/Garmelon/PFERD.git
synced 2025-07-12 22:22:30 +02:00
Compare commits
86 Commits
Author | SHA1 | Date | |
---|---|---|---|
7291382430 | |||
1a430ad5d1 | |||
f6bdeb6b9d | |||
63f25277b0 | |||
c8eff04ae0 | |||
edc482cdf4 | |||
72cd0f77e2 | |||
be175f9347 | |||
ba2833dba5 | |||
2f0e792670 | |||
5f88539f7e | |||
bd9d7efe64 | |||
16a2dd5b15 | |||
678283d341 | |||
287173b0b1 | |||
712217e959 | |||
6dda4c55a8 | |||
596b6a7688 | |||
5983200247 | |||
26e802d88b | |||
f5c4e82816 | |||
f5273f7ca0 | |||
fa71a9f44f | |||
81d6ff53c4 | |||
d7a2b6e019 | |||
71c65e89d1 | |||
c1046498e7 | |||
8fbd1978af | |||
739dd95850 | |||
c54c3bcfa1 | |||
d7f2229978 | |||
52fdeae752 | |||
f9bb2e41cf | |||
4f9e2ab48d | |||
19beb8f07b | |||
c897d9e2f5 | |||
21a266e302 | |||
b29b6f93f8 | |||
318226d7cb | |||
422cf05f15 | |||
819c6673c7 | |||
89b44c69a7 | |||
4b4f72b2ca | |||
778517d8c6 | |||
428b0179fc | |||
ade6309dd9 | |||
fd6cb7b966 | |||
5c87517ceb | |||
b01f093474 | |||
3a05b90525 | |||
7a00f73e0e | |||
5d0621420e | |||
df98153169 | |||
fc1f68ccd9 | |||
3e831c7e23 | |||
bbcfe9c8dd | |||
eb01aa86cb | |||
3db186a978 | |||
4a5959fd58 | |||
1cbc2b717a | |||
da627ff929 | |||
c1b592ac29 | |||
eb0c956d32 | |||
ab0cb2d956 | |||
a117126389 | |||
e9f8901520 | |||
266812f90e | |||
533bc27439 | |||
0113a0ca10 | |||
40f8a05ad6 | |||
50b50513c6 | |||
df3514cd03 | |||
ad53185247 | |||
87b67e9271 | |||
b54b3b979c | |||
2184ac8040 | |||
b3d412360b | |||
dbc2553b11 | |||
68c398f1fe | |||
123a57beec | |||
d204dac8ce | |||
443f7fe839 | |||
0294ceb7d5 | |||
6f30c6583d | |||
467fc526e8 | |||
722d2eb393 |
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: github-actions
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: monthly
|
||||||
|
groups:
|
||||||
|
gh-actions:
|
||||||
|
patterns:
|
||||||
|
- "*"
|
31
.github/workflows/build-and-release.yml
vendored
31
.github/workflows/build-and-release.yml
vendored
@ -1,6 +1,6 @@
|
|||||||
name: build-and-release
|
name: build-and-release
|
||||||
|
|
||||||
on: push
|
on: [push, pull_request]
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
@ -13,13 +13,12 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
os: [ubuntu-latest, windows-latest, macos-13, macos-latest]
|
||||||
python: ["3.9"]
|
python: ["3.11"]
|
||||||
steps:
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/setup-python@v5
|
||||||
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python }}
|
python-version: ${{ matrix.python }}
|
||||||
|
|
||||||
@ -34,7 +33,12 @@ jobs:
|
|||||||
run: ./scripts/setup --no-pip
|
run: ./scripts/setup --no-pip
|
||||||
|
|
||||||
- name: Run checks
|
- name: Run checks
|
||||||
run: ./scripts/check
|
run: |
|
||||||
|
./scripts/check
|
||||||
|
./scripts/format
|
||||||
|
|
||||||
|
- name: Assert no changes
|
||||||
|
run: git diff --exit-code
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: ./scripts/build
|
run: ./scripts/build
|
||||||
@ -45,9 +49,9 @@ jobs:
|
|||||||
run: mv dist/pferd* dist/pferd-${{ matrix.os }}
|
run: mv dist/pferd* dist/pferd-${{ matrix.os }}
|
||||||
|
|
||||||
- name: Upload binary
|
- name: Upload binary
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: Binaries
|
name: pferd-${{ matrix.os }}
|
||||||
path: dist/pferd-${{ matrix.os }}
|
path: dist/pferd-${{ matrix.os }}
|
||||||
|
|
||||||
release:
|
release:
|
||||||
@ -57,18 +61,20 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Download binaries
|
- name: Download binaries
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: Binaries
|
pattern: pferd-*
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
- name: Rename binaries
|
- name: Rename binaries
|
||||||
run: |
|
run: |
|
||||||
mv pferd-ubuntu-latest pferd-linux
|
mv pferd-ubuntu-latest pferd-linux
|
||||||
mv pferd-windows-latest pferd-windows.exe
|
mv pferd-windows-latest pferd-windows.exe
|
||||||
|
mv pferd-macos-13 pferd-mac-x86_64
|
||||||
mv pferd-macos-latest pferd-mac
|
mv pferd-macos-latest pferd-mac
|
||||||
|
|
||||||
- name: Create release
|
- name: Create release
|
||||||
uses: softprops/action-gh-release@v1
|
uses: softprops/action-gh-release@v2
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
@ -76,3 +82,4 @@ jobs:
|
|||||||
pferd-linux
|
pferd-linux
|
||||||
pferd-windows.exe
|
pferd-windows.exe
|
||||||
pferd-mac
|
pferd-mac
|
||||||
|
pferd-mac-x86_64
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,6 +3,7 @@
|
|||||||
/PFERD.egg-info/
|
/PFERD.egg-info/
|
||||||
__pycache__/
|
__pycache__/
|
||||||
/.vscode/
|
/.vscode/
|
||||||
|
/.idea/
|
||||||
|
|
||||||
# pyinstaller
|
# pyinstaller
|
||||||
/pferd.spec
|
/pferd.spec
|
||||||
|
85
CHANGELOG.md
85
CHANGELOG.md
@ -22,6 +22,91 @@ ambiguous situations.
|
|||||||
|
|
||||||
## Unreleased
|
## Unreleased
|
||||||
|
|
||||||
|
## 3.8.0 - 2025-04-15
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support for ILIAS 9
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Added prettier CSS to forum threads
|
||||||
|
- Increase minimum supported Python version to 3.11
|
||||||
|
|
||||||
|
## Fixed
|
||||||
|
- File links in report on Windows
|
||||||
|
- TOTP authentication in KIT Shibboleth
|
||||||
|
- Forum crawling only considering the first 20 entries
|
||||||
|
|
||||||
|
## 3.7.0 - 2024-11-13
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support for MOB videos in page descriptions
|
||||||
|
- Clickable links in the report to directly open new/modified/not-deleted files
|
||||||
|
- Support for non KIT shibboleth login
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Remove videos from description pages
|
||||||
|
- Perform ILIAS cycle detection after processing the transform to allow
|
||||||
|
ignoring duplicated elements
|
||||||
|
- Parse headings (h1-h3) as folders in kit-ipd crawler
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Personal desktop/dashboard/favorites crawling
|
||||||
|
- Crawling of nested courses
|
||||||
|
- Downloading of links with no target URL
|
||||||
|
- Handle row flex on description pages
|
||||||
|
- Add `<!DOCTYPE html>` heading to forum threads to fix mime type detection
|
||||||
|
- Handle groups in cards
|
||||||
|
|
||||||
|
## 3.6.0 - 2024-10-23
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Generic `ilias-web` crawler and `ilias-web` CLI command
|
||||||
|
- Support for the course overview page. Using this URL as a target might cause
|
||||||
|
duplication warnings, as subgroups are listed separately.
|
||||||
|
- Support for named capture groups in regex transforms
|
||||||
|
- Crawl custom item groups as folders
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Normalization of meeting names in cards
|
||||||
|
- Sanitization of slashes in exercise container names
|
||||||
|
|
||||||
|
## 3.5.2 - 2024-04-14
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Crawling of personal desktop with ILIAS 8
|
||||||
|
- Crawling of empty personal desktops
|
||||||
|
|
||||||
|
## 3.5.1 - 2024-04-09
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support for ILIAS 8
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Video name deduplication
|
||||||
|
|
||||||
|
## 3.5.0 - 2023-09-13
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- `no-delete-prompt-override` conflict resolution strategy
|
||||||
|
- Support for ILIAS learning modules
|
||||||
|
- `show_not_deleted` option to stop printing the "Not Deleted" status or report
|
||||||
|
message. This combines nicely with the `no-delete-prompt-override` strategy,
|
||||||
|
causing PFERD to mostly ignore local-only files.
|
||||||
|
- Support for mediacast video listings
|
||||||
|
- Crawling of files in info tab
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Remove size suffix for files in content pages
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Crawling of courses with the timeline view as the default tab
|
||||||
|
- Crawling of file and custom opencast cards
|
||||||
|
- Crawling of button cards without descriptions
|
||||||
|
- Abort crawling when encountering an unexpected ilias root page redirect
|
||||||
|
- Sanitize ascii control characters on Windows
|
||||||
|
- Crawling of paginated past meetings
|
||||||
|
- Ignore SCORM learning modules
|
||||||
|
|
||||||
## 3.4.3 - 2022-11-29
|
## 3.4.3 - 2022-11-29
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
90
CONFIG.md
90
CONFIG.md
@ -4,11 +4,11 @@ A config file consists of sections. A section begins with a `[section]` header,
|
|||||||
which is followed by a list of `key = value` pairs. Comments must be on their
|
which is followed by a list of `key = value` pairs. Comments must be on their
|
||||||
own line and start with `#`. Multiline values must be indented beyond their key.
|
own line and start with `#`. Multiline values must be indented beyond their key.
|
||||||
Boolean values can be `yes` or `no`. For more details and some examples on the
|
Boolean values can be `yes` or `no`. For more details and some examples on the
|
||||||
format, see the [configparser documentation][1] ([interpolation][2] is
|
format, see the [configparser documentation][cp-file]
|
||||||
disabled).
|
([interpolation][cp-interp] is disabled).
|
||||||
|
|
||||||
[1]: <https://docs.python.org/3/library/configparser.html#supported-ini-file-structure> "Supported INI File Structure"
|
[cp-file]: <https://docs.python.org/3/library/configparser.html#supported-ini-file-structure> "Supported INI File Structure"
|
||||||
[2]: <https://docs.python.org/3/library/configparser.html#interpolation-of-values> "Interpolation of values"
|
[cp-interp]: <https://docs.python.org/3/library/configparser.html#interpolation-of-values> "Interpolation of values"
|
||||||
|
|
||||||
## The `DEFAULT` section
|
## The `DEFAULT` section
|
||||||
|
|
||||||
@ -26,6 +26,9 @@ default values for the other sections.
|
|||||||
`Added ...`) while running a crawler. (Default: `yes`)
|
`Added ...`) while running a crawler. (Default: `yes`)
|
||||||
- `report`: Whether PFERD should print a report of added, changed and deleted
|
- `report`: Whether PFERD should print a report of added, changed and deleted
|
||||||
local files for all crawlers before exiting. (Default: `yes`)
|
local files for all crawlers before exiting. (Default: `yes`)
|
||||||
|
- `show_not_deleted`: Whether PFERD should print messages in status and report
|
||||||
|
when a local-only file wasn't deleted. Combines nicely with the
|
||||||
|
`no-delete-prompt-override` conflict resolution strategy.
|
||||||
- `share_cookies`: Whether crawlers should share cookies where applicable. For
|
- `share_cookies`: Whether crawlers should share cookies where applicable. For
|
||||||
example, some crawlers share cookies if they crawl the same website using the
|
example, some crawlers share cookies if they crawl the same website using the
|
||||||
same account. (Default: `yes`)
|
same account. (Default: `yes`)
|
||||||
@ -75,6 +78,9 @@ common to all crawlers:
|
|||||||
using `prompt` and always choosing "yes".
|
using `prompt` and always choosing "yes".
|
||||||
- `no-delete`: Never delete local files, but overwrite local files if the
|
- `no-delete`: Never delete local files, but overwrite local files if the
|
||||||
remote file is different.
|
remote file is different.
|
||||||
|
- `no-delete-prompt-overwrite`: Never delete local files, but prompt to
|
||||||
|
overwrite local files if the remote file is different. Combines nicely
|
||||||
|
with the `show_not_deleted` option.
|
||||||
- `transform`: Rules for renaming and excluding certain files and directories.
|
- `transform`: Rules for renaming and excluding certain files and directories.
|
||||||
For more details, see [this section](#transformation-rules). (Default: empty)
|
For more details, see [this section](#transformation-rules). (Default: empty)
|
||||||
- `tasks`: The maximum number of concurrent tasks (such as crawling or
|
- `tasks`: The maximum number of concurrent tasks (such as crawling or
|
||||||
@ -140,7 +146,7 @@ crawler simulate a slower, network-based crawler.
|
|||||||
|
|
||||||
This crawler crawls a KIT-IPD page by url. The root page can be crawled from
|
This crawler crawls a KIT-IPD page by url. The root page can be crawled from
|
||||||
outside the KIT network so you will be informed about any new/deleted files,
|
outside the KIT network so you will be informed about any new/deleted files,
|
||||||
but downloading files requires you to be within. Adding a show delay between
|
but downloading files requires you to be within. Adding a short delay between
|
||||||
requests is likely a good idea.
|
requests is likely a good idea.
|
||||||
|
|
||||||
- `target`: URL to a KIT-IPD page
|
- `target`: URL to a KIT-IPD page
|
||||||
@ -148,6 +154,63 @@ requests is likely a good idea.
|
|||||||
matches, the given link is downloaded as a file. This is used to extract
|
matches, the given link is downloaded as a file. This is used to extract
|
||||||
files from KIT-IPD pages. (Default: `^.*?[^/]+\.(pdf|zip|c|cpp|java)$`)
|
files from KIT-IPD pages. (Default: `^.*?[^/]+\.(pdf|zip|c|cpp|java)$`)
|
||||||
|
|
||||||
|
### The `ilias-web` crawler
|
||||||
|
|
||||||
|
This crawler crawls a generic ILIAS instance.
|
||||||
|
|
||||||
|
Inspired by [this ILIAS downloader][ilias-dl], the following configurations should work
|
||||||
|
out of the box for the corresponding universities:
|
||||||
|
|
||||||
|
[ilias-dl]: https://github.com/V3lop5/ilias-downloader/blob/main/configs "ilias-downloader configs"
|
||||||
|
|
||||||
|
| University | `base_url` | `login_type` | `client_id` |
|
||||||
|
|---------------|-----------------------------------------|--------------|---------------|
|
||||||
|
| FH Aachen | https://www.ili.fh-aachen.de | local | elearning |
|
||||||
|
| Uni Köln | https://www.ilias.uni-koeln.de/ilias | local | uk |
|
||||||
|
| Uni Konstanz | https://ilias.uni-konstanz.de | local | ILIASKONSTANZ |
|
||||||
|
| Uni Stuttgart | https://ilias3.uni-stuttgart.de | local | Uni_Stuttgart |
|
||||||
|
| Uni Tübingen | https://ovidius.uni-tuebingen.de/ilias3 | shibboleth | |
|
||||||
|
|
||||||
|
If your university isn't listed, try navigating to your instance's login page.
|
||||||
|
Assuming no custom login service is used, the URL will look something like this:
|
||||||
|
|
||||||
|
```jinja
|
||||||
|
{{ base_url }}/login.php?client_id={{ client_id }}&cmd=force_login&lang=
|
||||||
|
```
|
||||||
|
|
||||||
|
If the values work, feel free to submit a PR and add them to the table above.
|
||||||
|
|
||||||
|
- `base_url`: The URL where the ILIAS instance is located. (Required)
|
||||||
|
- `login_type`: How you authenticate. (Required)
|
||||||
|
- `local`: Use `client_id` for authentication.
|
||||||
|
- `shibboleth`: Use shibboleth for authentication.
|
||||||
|
- `client_id`: An ID used for authentication if `login_type` is `local`. Is
|
||||||
|
ignored if `login_type` is `shibboleth`.
|
||||||
|
- `target`: The ILIAS element to crawl. (Required)
|
||||||
|
- `desktop`: Crawl your personal desktop / dashboard
|
||||||
|
- `<course id>`: Crawl the course with the given id
|
||||||
|
- `<url>`: Crawl a given element by URL (preferably the permanent URL linked
|
||||||
|
at the bottom of its ILIAS page).
|
||||||
|
This also supports the "My Courses" overview page to download *all*
|
||||||
|
courses. Note that this might produce confusing local directory layouts
|
||||||
|
and duplication warnings if you are a member of an ILIAS group. The
|
||||||
|
`desktop` target is generally preferable.
|
||||||
|
- `auth`: Name of auth section to use for login. (Required)
|
||||||
|
- `tfa_auth`: Name of auth section to use for two-factor authentication. Only
|
||||||
|
uses the auth section's password. (Default: Anonymous `tfa` authenticator)
|
||||||
|
- `links`: How to represent external links. (Default: `fancy`)
|
||||||
|
- `ignore`: Don't download links.
|
||||||
|
- `plaintext`: A text file containing only the URL.
|
||||||
|
- `fancy`: A HTML file looking like the ILIAS link element.
|
||||||
|
- `internet-shortcut`: An internet shortcut file (`.url` file).
|
||||||
|
- `link_redirect_delay`: Time (in seconds) until `fancy` link files will
|
||||||
|
redirect to the actual URL. Set to a negative value to disable the automatic
|
||||||
|
redirect. (Default: `-1`)
|
||||||
|
- `videos`: Whether to download videos. (Default: `no`)
|
||||||
|
- `forums`: Whether to download forum threads. (Default: `no`)
|
||||||
|
- `http_timeout`: The timeout (in seconds) for all HTTP requests. (Default:
|
||||||
|
`20.0`)
|
||||||
|
|
||||||
### The `kit-ilias-web` crawler
|
### The `kit-ilias-web` crawler
|
||||||
|
|
||||||
This crawler crawls the KIT ILIAS instance.
|
This crawler crawls the KIT ILIAS instance.
|
||||||
@ -226,10 +289,10 @@ is stored in the keyring.
|
|||||||
|
|
||||||
### The `pass` authenticator
|
### The `pass` authenticator
|
||||||
|
|
||||||
This authenticator queries the [`pass` password manager][3] for a username and
|
This authenticator queries the [`pass` password manager][pass] for a username
|
||||||
password. It tries to be mostly compatible with [browserpass][4] and
|
and password. It tries to be mostly compatible with [browserpass][browserpass]
|
||||||
[passff][5], so see those links for an overview of the format. If PFERD fails
|
and [passff][passff], so see those links for an overview of the format. If PFERD
|
||||||
to load your password, you can use the `--explain` flag to see why.
|
fails to load your password, you can use the `--explain` flag to see why.
|
||||||
|
|
||||||
- `passname`: The name of the password to use (Required)
|
- `passname`: The name of the password to use (Required)
|
||||||
- `username_prefixes`: A comma-separated list of username line prefixes
|
- `username_prefixes`: A comma-separated list of username line prefixes
|
||||||
@ -237,9 +300,9 @@ to load your password, you can use the `--explain` flag to see why.
|
|||||||
- `password_prefixes`: A comma-separated list of password line prefixes
|
- `password_prefixes`: A comma-separated list of password line prefixes
|
||||||
(Default: `password,pass,secret`)
|
(Default: `password,pass,secret`)
|
||||||
|
|
||||||
[3]: <https://www.passwordstore.org/> "Pass: The Standard Unix Password Manager"
|
[pass]: <https://www.passwordstore.org/> "Pass: The Standard Unix Password Manager"
|
||||||
[4]: <https://github.com/browserpass/browserpass-extension#organizing-password-store> "Organizing password store"
|
[browserpass]: <https://github.com/browserpass/browserpass-extension#organizing-password-store> "Organizing password store"
|
||||||
[5]: <https://github.com/passff/passff#multi-line-format> "Multi-line format"
|
[passff]: <https://github.com/passff/passff#multi-line-format> "Multi-line format"
|
||||||
|
|
||||||
### The `tfa` authenticator
|
### The `tfa` authenticator
|
||||||
|
|
||||||
@ -338,7 +401,8 @@ matches `SOURCE`, the output path is created using `TARGET` as template.
|
|||||||
be referred to as `{g<n>}` (e.g. `{g3}`). `{g0}` refers to the original path.
|
be referred to as `{g<n>}` (e.g. `{g3}`). `{g0}` refers to the original path.
|
||||||
If capturing group *n*'s contents are a valid integer, the integer value is
|
If capturing group *n*'s contents are a valid integer, the integer value is
|
||||||
available as `{i<n>}` (e.g. `{i3}`). If capturing group *n*'s contents are a
|
available as `{i<n>}` (e.g. `{i3}`). If capturing group *n*'s contents are a
|
||||||
valid float, the float value is available as `{f<n>}` (e.g. `{f3}`). If a
|
valid float, the float value is available as `{f<n>}` (e.g. `{f3}`). Named capture
|
||||||
|
groups (e.g. `(?P<name>)`) are available by their name (e.g. `{name}`). If a
|
||||||
capturing group is not present (e.g. when matching the string `cd` with the
|
capturing group is not present (e.g. when matching the string `cd` with the
|
||||||
regex `(ab)?cd`), the corresponding variables are not defined.
|
regex `(ab)?cd`), the corresponding variables are not defined.
|
||||||
|
|
||||||
|
5
LICENSE
5
LICENSE
@ -1,5 +1,6 @@
|
|||||||
Copyright 2019-2021 Garmelon, I-Al-Istannen, danstooamerican, pavelzw,
|
Copyright 2019-2024 Garmelon, I-Al-Istannen, danstooamerican, pavelzw,
|
||||||
TheChristophe, Scriptim, thelukasprobst, Toorero
|
TheChristophe, Scriptim, thelukasprobst, Toorero,
|
||||||
|
Mr-Pine, p-fruck, PinieP
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
@ -47,6 +47,8 @@ def configure_logging_from_args(args: argparse.Namespace) -> None:
|
|||||||
log.output_explain = args.explain
|
log.output_explain = args.explain
|
||||||
if args.status is not None:
|
if args.status is not None:
|
||||||
log.output_status = args.status
|
log.output_status = args.status
|
||||||
|
if args.show_not_deleted is not None:
|
||||||
|
log.output_not_deleted = args.show_not_deleted
|
||||||
if args.report is not None:
|
if args.report is not None:
|
||||||
log.output_report = args.report
|
log.output_report = args.report
|
||||||
|
|
||||||
@ -72,6 +74,8 @@ def configure_logging_from_config(args: argparse.Namespace, config: Config) -> N
|
|||||||
log.output_status = config.default_section.status()
|
log.output_status = config.default_section.status()
|
||||||
if args.report is None:
|
if args.report is None:
|
||||||
log.output_report = config.default_section.report()
|
log.output_report = config.default_section.report()
|
||||||
|
if args.show_not_deleted is None:
|
||||||
|
log.output_not_deleted = config.default_section.show_not_deleted()
|
||||||
except ConfigOptionError as e:
|
except ConfigOptionError as e:
|
||||||
log.error(str(e))
|
log.error(str(e))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple, cast
|
||||||
|
|
||||||
import keyring
|
import keyring
|
||||||
|
|
||||||
@ -13,7 +13,7 @@ class KeyringAuthSection(AuthSection):
|
|||||||
return self.s.get("username")
|
return self.s.get("username")
|
||||||
|
|
||||||
def keyring_name(self) -> str:
|
def keyring_name(self) -> str:
|
||||||
return self.s.get("keyring_name", fallback=NAME)
|
return cast(str, self.s.get("keyring_name", fallback=NAME))
|
||||||
|
|
||||||
|
|
||||||
class KeyringAuthenticator(Authenticator):
|
class KeyringAuthenticator(Authenticator):
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
# well.
|
# well.
|
||||||
|
|
||||||
from . import command_local # noqa: F401 imported but unused
|
from . import command_local # noqa: F401 imported but unused
|
||||||
|
from . import command_ilias_web # noqa: F401 imported but unused
|
||||||
from . import command_kit_ilias_web # noqa: F401 imported but unused
|
from . import command_kit_ilias_web # noqa: F401 imported but unused
|
||||||
from . import command_kit_ipd # noqa: F401 imported but unused
|
from . import command_kit_ipd # noqa: F401 imported but unused
|
||||||
from .parser import PARSER, ParserLoadError, load_default_section # noqa: F401 imported but unused
|
from .parser import PARSER, ParserLoadError, load_default_section # noqa: F401 imported but unused
|
||||||
|
56
PFERD/cli/command_ilias_web.py
Normal file
56
PFERD/cli/command_ilias_web.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
import argparse
|
||||||
|
import configparser
|
||||||
|
|
||||||
|
from ..logging import log
|
||||||
|
from .common_ilias_args import configure_common_group_args, load_common
|
||||||
|
from .parser import CRAWLER_PARSER, SUBPARSERS, load_crawler
|
||||||
|
|
||||||
|
COMMAND_NAME = "ilias-web"
|
||||||
|
|
||||||
|
SUBPARSER = SUBPARSERS.add_parser(
|
||||||
|
COMMAND_NAME,
|
||||||
|
parents=[CRAWLER_PARSER],
|
||||||
|
)
|
||||||
|
|
||||||
|
GROUP = SUBPARSER.add_argument_group(
|
||||||
|
title=f"{COMMAND_NAME} crawler arguments",
|
||||||
|
description=f"arguments for the '{COMMAND_NAME}' crawler",
|
||||||
|
)
|
||||||
|
|
||||||
|
GROUP.add_argument(
|
||||||
|
"--base-url",
|
||||||
|
type=str,
|
||||||
|
metavar="BASE_URL",
|
||||||
|
help="The base url of the ilias instance"
|
||||||
|
)
|
||||||
|
|
||||||
|
GROUP.add_argument(
|
||||||
|
"--client-id",
|
||||||
|
type=str,
|
||||||
|
metavar="CLIENT_ID",
|
||||||
|
help="The client id of the ilias instance"
|
||||||
|
)
|
||||||
|
|
||||||
|
configure_common_group_args(GROUP)
|
||||||
|
|
||||||
|
|
||||||
|
def load(
|
||||||
|
args: argparse.Namespace,
|
||||||
|
parser: configparser.ConfigParser,
|
||||||
|
) -> None:
|
||||||
|
log.explain(f"Creating config for command '{COMMAND_NAME}'")
|
||||||
|
|
||||||
|
parser["crawl:ilias"] = {}
|
||||||
|
section = parser["crawl:ilias"]
|
||||||
|
load_crawler(args, section)
|
||||||
|
|
||||||
|
section["type"] = COMMAND_NAME
|
||||||
|
if args.ilias_url is not None:
|
||||||
|
section["base_url"] = args.ilias_url
|
||||||
|
if args.client_id is not None:
|
||||||
|
section["client_id"] = args.client_id
|
||||||
|
|
||||||
|
load_common(section, args, parser)
|
||||||
|
|
||||||
|
|
||||||
|
SUBPARSER.set_defaults(command=load)
|
@ -1,120 +1,37 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import configparser
|
import configparser
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from ..crawl.ilias.file_templates import Links
|
|
||||||
from ..logging import log
|
from ..logging import log
|
||||||
from .parser import (CRAWLER_PARSER, SUBPARSERS, BooleanOptionalAction, ParserLoadError, load_crawler,
|
from .common_ilias_args import configure_common_group_args, load_common
|
||||||
show_value_error)
|
from .parser import CRAWLER_PARSER, SUBPARSERS, load_crawler
|
||||||
|
|
||||||
|
COMMAND_NAME = "kit-ilias-web"
|
||||||
|
|
||||||
SUBPARSER = SUBPARSERS.add_parser(
|
SUBPARSER = SUBPARSERS.add_parser(
|
||||||
"kit-ilias-web",
|
COMMAND_NAME,
|
||||||
parents=[CRAWLER_PARSER],
|
parents=[CRAWLER_PARSER],
|
||||||
)
|
)
|
||||||
|
|
||||||
GROUP = SUBPARSER.add_argument_group(
|
GROUP = SUBPARSER.add_argument_group(
|
||||||
title="kit-ilias-web crawler arguments",
|
title=f"{COMMAND_NAME} crawler arguments",
|
||||||
description="arguments for the 'kit-ilias-web' crawler",
|
description=f"arguments for the '{COMMAND_NAME}' crawler",
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"target",
|
|
||||||
type=str,
|
|
||||||
metavar="TARGET",
|
|
||||||
help="course id, 'desktop', or ILIAS URL to crawl"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"output",
|
|
||||||
type=Path,
|
|
||||||
metavar="OUTPUT",
|
|
||||||
help="output directory"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"--username", "-u",
|
|
||||||
type=str,
|
|
||||||
metavar="USERNAME",
|
|
||||||
help="user name for authentication"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"--keyring",
|
|
||||||
action=BooleanOptionalAction,
|
|
||||||
help="use the system keyring to store and retrieve passwords"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"--credential-file",
|
|
||||||
type=Path,
|
|
||||||
metavar="PATH",
|
|
||||||
help="read username and password from a credential file"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"--links",
|
|
||||||
type=show_value_error(Links.from_string),
|
|
||||||
metavar="OPTION",
|
|
||||||
help="how to represent external links"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"--link-redirect-delay",
|
|
||||||
type=int,
|
|
||||||
metavar="SECONDS",
|
|
||||||
help="time before 'fancy' links redirect to to their target (-1 to disable)"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"--videos",
|
|
||||||
action=BooleanOptionalAction,
|
|
||||||
help="crawl and download videos"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"--forums",
|
|
||||||
action=BooleanOptionalAction,
|
|
||||||
help="crawl and download forum posts"
|
|
||||||
)
|
|
||||||
GROUP.add_argument(
|
|
||||||
"--http-timeout", "-t",
|
|
||||||
type=float,
|
|
||||||
metavar="SECONDS",
|
|
||||||
help="timeout for all HTTP requests"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
configure_common_group_args(GROUP)
|
||||||
|
|
||||||
|
|
||||||
def load(
|
def load(
|
||||||
args: argparse.Namespace,
|
args: argparse.Namespace,
|
||||||
parser: configparser.ConfigParser,
|
parser: configparser.ConfigParser,
|
||||||
) -> None:
|
) -> None:
|
||||||
log.explain("Creating config for command 'kit-ilias-web'")
|
log.explain(f"Creating config for command '{COMMAND_NAME}'")
|
||||||
|
|
||||||
parser["crawl:ilias"] = {}
|
parser["crawl:ilias"] = {}
|
||||||
section = parser["crawl:ilias"]
|
section = parser["crawl:ilias"]
|
||||||
load_crawler(args, section)
|
load_crawler(args, section)
|
||||||
|
|
||||||
section["type"] = "kit-ilias-web"
|
section["type"] = COMMAND_NAME
|
||||||
section["target"] = str(args.target)
|
load_common(section, args, parser)
|
||||||
section["output_dir"] = str(args.output)
|
|
||||||
section["auth"] = "auth:ilias"
|
|
||||||
if args.links is not None:
|
|
||||||
section["links"] = str(args.links.value)
|
|
||||||
if args.link_redirect_delay is not None:
|
|
||||||
section["link_redirect_delay"] = str(args.link_redirect_delay)
|
|
||||||
if args.videos is not None:
|
|
||||||
section["videos"] = "yes" if args.videos else "no"
|
|
||||||
if args.forums is not None:
|
|
||||||
section["forums"] = "yes" if args.forums else "no"
|
|
||||||
if args.http_timeout is not None:
|
|
||||||
section["http_timeout"] = str(args.http_timeout)
|
|
||||||
|
|
||||||
parser["auth:ilias"] = {}
|
|
||||||
auth_section = parser["auth:ilias"]
|
|
||||||
if args.credential_file is not None:
|
|
||||||
if args.username is not None:
|
|
||||||
raise ParserLoadError("--credential-file and --username can't be used together")
|
|
||||||
if args.keyring:
|
|
||||||
raise ParserLoadError("--credential-file and --keyring can't be used together")
|
|
||||||
auth_section["type"] = "credential-file"
|
|
||||||
auth_section["path"] = str(args.credential_file)
|
|
||||||
elif args.keyring:
|
|
||||||
auth_section["type"] = "keyring"
|
|
||||||
else:
|
|
||||||
auth_section["type"] = "simple"
|
|
||||||
if args.username is not None:
|
|
||||||
auth_section["username"] = args.username
|
|
||||||
|
|
||||||
|
|
||||||
SUBPARSER.set_defaults(command=load)
|
SUBPARSER.set_defaults(command=load)
|
||||||
|
104
PFERD/cli/common_ilias_args.py
Normal file
104
PFERD/cli/common_ilias_args.py
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
import argparse
|
||||||
|
import configparser
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from ..crawl.ilias.file_templates import Links
|
||||||
|
from .parser import BooleanOptionalAction, ParserLoadError, show_value_error
|
||||||
|
|
||||||
|
|
||||||
|
def configure_common_group_args(group: argparse._ArgumentGroup) -> None:
|
||||||
|
"""These arguments are shared between the KIT and generic Ilias web command."""
|
||||||
|
group.add_argument(
|
||||||
|
"target",
|
||||||
|
type=str,
|
||||||
|
metavar="TARGET",
|
||||||
|
help="course id, 'desktop', or ILIAS URL to crawl"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"output",
|
||||||
|
type=Path,
|
||||||
|
metavar="OUTPUT",
|
||||||
|
help="output directory"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--username", "-u",
|
||||||
|
type=str,
|
||||||
|
metavar="USERNAME",
|
||||||
|
help="user name for authentication"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--keyring",
|
||||||
|
action=BooleanOptionalAction,
|
||||||
|
help="use the system keyring to store and retrieve passwords"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--credential-file",
|
||||||
|
type=Path,
|
||||||
|
metavar="PATH",
|
||||||
|
help="read username and password from a credential file"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--links",
|
||||||
|
type=show_value_error(Links.from_string),
|
||||||
|
metavar="OPTION",
|
||||||
|
help="how to represent external links"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--link-redirect-delay",
|
||||||
|
type=int,
|
||||||
|
metavar="SECONDS",
|
||||||
|
help="time before 'fancy' links redirect to to their target (-1 to disable)"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--videos",
|
||||||
|
action=BooleanOptionalAction,
|
||||||
|
help="crawl and download videos"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--forums",
|
||||||
|
action=BooleanOptionalAction,
|
||||||
|
help="crawl and download forum posts"
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--http-timeout", "-t",
|
||||||
|
type=float,
|
||||||
|
metavar="SECONDS",
|
||||||
|
help="timeout for all HTTP requests"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_common(
|
||||||
|
section: configparser.SectionProxy,
|
||||||
|
args: argparse.Namespace,
|
||||||
|
parser: configparser.ConfigParser,
|
||||||
|
) -> None:
|
||||||
|
"""Load common config between generic and KIT ilias web command"""
|
||||||
|
section["target"] = str(args.target)
|
||||||
|
section["output_dir"] = str(args.output)
|
||||||
|
section["auth"] = "auth:ilias"
|
||||||
|
if args.links is not None:
|
||||||
|
section["links"] = str(args.links.value)
|
||||||
|
if args.link_redirect_delay is not None:
|
||||||
|
section["link_redirect_delay"] = str(args.link_redirect_delay)
|
||||||
|
if args.videos is not None:
|
||||||
|
section["videos"] = "yes" if args.videos else "no"
|
||||||
|
if args.forums is not None:
|
||||||
|
section["forums"] = "yes" if args.forums else "no"
|
||||||
|
if args.http_timeout is not None:
|
||||||
|
section["http_timeout"] = str(args.http_timeout)
|
||||||
|
|
||||||
|
parser["auth:ilias"] = {}
|
||||||
|
auth_section = parser["auth:ilias"]
|
||||||
|
if args.credential_file is not None:
|
||||||
|
if args.username is not None:
|
||||||
|
raise ParserLoadError("--credential-file and --username can't be used together")
|
||||||
|
if args.keyring:
|
||||||
|
raise ParserLoadError("--credential-file and --keyring can't be used together")
|
||||||
|
auth_section["type"] = "credential-file"
|
||||||
|
auth_section["path"] = str(args.credential_file)
|
||||||
|
elif args.keyring:
|
||||||
|
auth_section["type"] = "keyring"
|
||||||
|
else:
|
||||||
|
auth_section["type"] = "simple"
|
||||||
|
if args.username is not None:
|
||||||
|
auth_section["username"] = args.username
|
@ -215,6 +215,11 @@ PARSER.add_argument(
|
|||||||
action=BooleanOptionalAction,
|
action=BooleanOptionalAction,
|
||||||
help="whether crawlers should share cookies where applicable"
|
help="whether crawlers should share cookies where applicable"
|
||||||
)
|
)
|
||||||
|
PARSER.add_argument(
|
||||||
|
"--show-not-deleted",
|
||||||
|
action=BooleanOptionalAction,
|
||||||
|
help="print messages in status and report when PFERD did not delete a local only file"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def load_default_section(
|
def load_default_section(
|
||||||
@ -233,6 +238,8 @@ def load_default_section(
|
|||||||
section["report"] = "yes" if args.report else "no"
|
section["report"] = "yes" if args.report else "no"
|
||||||
if args.share_cookies is not None:
|
if args.share_cookies is not None:
|
||||||
section["share_cookies"] = "yes" if args.share_cookies else "no"
|
section["share_cookies"] = "yes" if args.share_cookies else "no"
|
||||||
|
if args.show_not_deleted is not None:
|
||||||
|
section["show_not_deleted"] = "yes" if args.show_not_deleted else "no"
|
||||||
|
|
||||||
|
|
||||||
SUBPARSERS = PARSER.add_subparsers(title="crawlers")
|
SUBPARSERS = PARSER.add_subparsers(title="crawlers")
|
||||||
|
@ -82,6 +82,9 @@ class DefaultSection(Section):
|
|||||||
def report(self) -> bool:
|
def report(self) -> bool:
|
||||||
return self.s.getboolean("report", fallback=True)
|
return self.s.getboolean("report", fallback=True)
|
||||||
|
|
||||||
|
def show_not_deleted(self) -> bool:
|
||||||
|
return self.s.getboolean("show_not_deleted", fallback=True)
|
||||||
|
|
||||||
def share_cookies(self) -> bool:
|
def share_cookies(self) -> bool:
|
||||||
return self.s.getboolean("share_cookies", fallback=True)
|
return self.s.getboolean("share_cookies", fallback=True)
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ from typing import Callable, Dict
|
|||||||
from ..auth import Authenticator
|
from ..auth import Authenticator
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
from .crawler import Crawler, CrawlError, CrawlerSection # noqa: F401
|
from .crawler import Crawler, CrawlError, CrawlerSection # noqa: F401
|
||||||
from .ilias import KitIliasWebCrawler, KitIliasWebCrawlerSection
|
from .ilias import IliasWebCrawler, IliasWebCrawlerSection, KitIliasWebCrawler, KitIliasWebCrawlerSection
|
||||||
from .kit_ipd_crawler import KitIpdCrawler, KitIpdCrawlerSection
|
from .kit_ipd_crawler import KitIpdCrawler, KitIpdCrawlerSection
|
||||||
from .local_crawler import LocalCrawler, LocalCrawlerSection
|
from .local_crawler import LocalCrawler, LocalCrawlerSection
|
||||||
|
|
||||||
@ -18,6 +18,8 @@ CrawlerConstructor = Callable[[
|
|||||||
CRAWLERS: Dict[str, CrawlerConstructor] = {
|
CRAWLERS: Dict[str, CrawlerConstructor] = {
|
||||||
"local": lambda n, s, c, a:
|
"local": lambda n, s, c, a:
|
||||||
LocalCrawler(n, LocalCrawlerSection(s), c),
|
LocalCrawler(n, LocalCrawlerSection(s), c),
|
||||||
|
"ilias-web": lambda n, s, c, a:
|
||||||
|
IliasWebCrawler(n, IliasWebCrawlerSection(s), c, a),
|
||||||
"kit-ilias-web": lambda n, s, c, a:
|
"kit-ilias-web": lambda n, s, c, a:
|
||||||
KitIliasWebCrawler(n, KitIliasWebCrawlerSection(s), c, a),
|
KitIliasWebCrawler(n, KitIliasWebCrawlerSection(s), c, a),
|
||||||
"kit-ipd": lambda n, s, c, a:
|
"kit-ipd": lambda n, s, c, a:
|
||||||
|
@ -149,9 +149,7 @@ class CrawlerSection(Section):
|
|||||||
return self.s.getboolean("skip", fallback=False)
|
return self.s.getboolean("skip", fallback=False)
|
||||||
|
|
||||||
def output_dir(self, name: str) -> Path:
|
def output_dir(self, name: str) -> Path:
|
||||||
# TODO Use removeprefix() after switching to 3.9
|
name = name.removeprefix("crawl:")
|
||||||
if name.startswith("crawl:"):
|
|
||||||
name = name[len("crawl:"):]
|
|
||||||
return Path(self.s.get("output_dir", name)).expanduser()
|
return Path(self.s.get("output_dir", name)).expanduser()
|
||||||
|
|
||||||
def redownload(self) -> Redownload:
|
def redownload(self) -> Redownload:
|
||||||
@ -258,6 +256,10 @@ class Crawler(ABC):
|
|||||||
def prev_report(self) -> Optional[Report]:
|
def prev_report(self) -> Optional[Report]:
|
||||||
return self._output_dir.prev_report
|
return self._output_dir.prev_report
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_dir(self) -> OutputDirectory:
|
||||||
|
return self._output_dir
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def gather(awaitables: Sequence[Awaitable[Any]]) -> List[Any]:
|
async def gather(awaitables: Sequence[Awaitable[Any]]) -> List[Any]:
|
||||||
"""
|
"""
|
||||||
@ -290,9 +292,40 @@ class Crawler(ABC):
|
|||||||
log.explain("Answer: Yes")
|
log.explain("Answer: Yes")
|
||||||
return CrawlToken(self._limiter, path)
|
return CrawlToken(self._limiter, path)
|
||||||
|
|
||||||
|
def should_try_download(
|
||||||
|
self,
|
||||||
|
path: PurePath,
|
||||||
|
*,
|
||||||
|
etag_differs: Optional[bool] = None,
|
||||||
|
mtime: Optional[datetime] = None,
|
||||||
|
redownload: Optional[Redownload] = None,
|
||||||
|
on_conflict: Optional[OnConflict] = None,
|
||||||
|
) -> bool:
|
||||||
|
log.explain_topic(f"Decision: Should Download {fmt_path(path)}")
|
||||||
|
|
||||||
|
if self._transformer.transform(path) is None:
|
||||||
|
log.explain("Answer: No (ignored)")
|
||||||
|
return False
|
||||||
|
|
||||||
|
should_download = self._output_dir.should_try_download(
|
||||||
|
path,
|
||||||
|
etag_differs=etag_differs,
|
||||||
|
mtime=mtime,
|
||||||
|
redownload=redownload,
|
||||||
|
on_conflict=on_conflict
|
||||||
|
)
|
||||||
|
if should_download:
|
||||||
|
log.explain("Answer: Yes")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
log.explain("Answer: No")
|
||||||
|
return False
|
||||||
|
|
||||||
async def download(
|
async def download(
|
||||||
self,
|
self,
|
||||||
path: PurePath,
|
path: PurePath,
|
||||||
|
*,
|
||||||
|
etag_differs: Optional[bool] = None,
|
||||||
mtime: Optional[datetime] = None,
|
mtime: Optional[datetime] = None,
|
||||||
redownload: Optional[Redownload] = None,
|
redownload: Optional[Redownload] = None,
|
||||||
on_conflict: Optional[OnConflict] = None,
|
on_conflict: Optional[OnConflict] = None,
|
||||||
@ -307,7 +340,14 @@ class Crawler(ABC):
|
|||||||
log.status("[bold bright_black]", "Ignored", fmt_path(path))
|
log.status("[bold bright_black]", "Ignored", fmt_path(path))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
fs_token = await self._output_dir.download(path, transformed_path, mtime, redownload, on_conflict)
|
fs_token = await self._output_dir.download(
|
||||||
|
path,
|
||||||
|
transformed_path,
|
||||||
|
etag_differs=etag_differs,
|
||||||
|
mtime=mtime,
|
||||||
|
redownload=redownload,
|
||||||
|
on_conflict=on_conflict
|
||||||
|
)
|
||||||
if fs_token is None:
|
if fs_token is None:
|
||||||
log.explain("Answer: No")
|
log.explain("Answer: No")
|
||||||
return None
|
return None
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import http.cookies
|
import http.cookies
|
||||||
import ssl
|
import ssl
|
||||||
|
from datetime import datetime
|
||||||
from pathlib import Path, PurePath
|
from pathlib import Path, PurePath
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional, Tuple, cast
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import certifi
|
import certifi
|
||||||
from aiohttp.client import ClientTimeout
|
from aiohttp.client import ClientTimeout
|
||||||
|
from bs4 import Tag
|
||||||
|
|
||||||
from ..auth import Authenticator
|
from ..auth import Authenticator
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
@ -15,10 +17,12 @@ from ..utils import fmt_real_path
|
|||||||
from ..version import NAME, VERSION
|
from ..version import NAME, VERSION
|
||||||
from .crawler import Crawler, CrawlerSection
|
from .crawler import Crawler, CrawlerSection
|
||||||
|
|
||||||
|
ETAGS_CUSTOM_REPORT_VALUE_KEY = "etags"
|
||||||
|
|
||||||
|
|
||||||
class HttpCrawlerSection(CrawlerSection):
|
class HttpCrawlerSection(CrawlerSection):
|
||||||
def http_timeout(self) -> float:
|
def http_timeout(self) -> float:
|
||||||
return self.s.getfloat("http_timeout", fallback=20)
|
return self.s.getfloat("http_timeout", fallback=30)
|
||||||
|
|
||||||
|
|
||||||
class HttpCrawler(Crawler):
|
class HttpCrawler(Crawler):
|
||||||
@ -169,6 +173,79 @@ class HttpCrawler(Crawler):
|
|||||||
log.warn(f"Failed to save cookies to {fmt_real_path(self._cookie_jar_path)}")
|
log.warn(f"Failed to save cookies to {fmt_real_path(self._cookie_jar_path)}")
|
||||||
log.warn(str(e))
|
log.warn(str(e))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_folder_structure_from_heading_hierarchy(file_link: Tag, drop_h1: bool = False) -> PurePath:
|
||||||
|
"""
|
||||||
|
Retrieves the hierarchy of headings associated with the give file link and constructs a folder
|
||||||
|
structure from them.
|
||||||
|
|
||||||
|
<h1> level headings usually only appear once and serve as the page title, so they would introduce
|
||||||
|
redundant nesting. To avoid this, <h1> headings are ignored via the drop_h1 parameter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def find_associated_headings(tag: Tag, level: int) -> PurePath:
|
||||||
|
if level == 0 or (level == 1 and drop_h1):
|
||||||
|
return PurePath()
|
||||||
|
|
||||||
|
level_heading = cast(Optional[Tag], tag.find_previous(name=f"h{level}"))
|
||||||
|
|
||||||
|
if level_heading is None:
|
||||||
|
return find_associated_headings(tag, level - 1)
|
||||||
|
|
||||||
|
folder_name = level_heading.get_text().strip()
|
||||||
|
return find_associated_headings(level_heading, level - 1) / folder_name
|
||||||
|
|
||||||
|
# start at level <h3> because paragraph-level headings are usually too granular for folder names
|
||||||
|
return find_associated_headings(file_link, 3)
|
||||||
|
|
||||||
|
def _get_previous_etag_from_report(self, path: PurePath) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
If available, retrieves the entity tag for a given path which was stored in the previous report.
|
||||||
|
"""
|
||||||
|
if not self._output_dir.prev_report:
|
||||||
|
return None
|
||||||
|
|
||||||
|
etags = self._output_dir.prev_report.get_custom_value(ETAGS_CUSTOM_REPORT_VALUE_KEY) or {}
|
||||||
|
return etags.get(str(path))
|
||||||
|
|
||||||
|
def _add_etag_to_report(self, path: PurePath, etag: Optional[str]) -> None:
|
||||||
|
"""
|
||||||
|
Adds an entity tag for a given path to the report's custom values.
|
||||||
|
"""
|
||||||
|
if not etag:
|
||||||
|
return
|
||||||
|
|
||||||
|
etags = self._output_dir.report.get_custom_value(ETAGS_CUSTOM_REPORT_VALUE_KEY) or {}
|
||||||
|
etags[str(path)] = etag
|
||||||
|
self._output_dir.report.add_custom_value(ETAGS_CUSTOM_REPORT_VALUE_KEY, etags)
|
||||||
|
|
||||||
|
async def _request_resource_version(self, resource_url: str) -> Tuple[Optional[str], Optional[datetime]]:
|
||||||
|
"""
|
||||||
|
Requests the ETag and Last-Modified headers of a resource via a HEAD request.
|
||||||
|
If no entity tag / modification date can be obtained, the according value will be None.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
async with self.session.head(resource_url) as resp:
|
||||||
|
if resp.status != 200:
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
etag_header = resp.headers.get("ETag")
|
||||||
|
last_modified_header = resp.headers.get("Last-Modified")
|
||||||
|
last_modified = None
|
||||||
|
|
||||||
|
if last_modified_header:
|
||||||
|
try:
|
||||||
|
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified#directives
|
||||||
|
datetime_format = "%a, %d %b %Y %H:%M:%S GMT"
|
||||||
|
last_modified = datetime.strptime(last_modified_header, datetime_format)
|
||||||
|
except ValueError:
|
||||||
|
# last_modified remains None
|
||||||
|
pass
|
||||||
|
|
||||||
|
return etag_header, last_modified
|
||||||
|
except aiohttp.ClientError:
|
||||||
|
return None, None
|
||||||
|
|
||||||
async def run(self) -> None:
|
async def run(self) -> None:
|
||||||
self._request_count = 0
|
self._request_count = 0
|
||||||
self._cookie_jar = aiohttp.CookieJar()
|
self._cookie_jar = aiohttp.CookieJar()
|
||||||
@ -186,7 +263,12 @@ class HttpCrawler(Crawler):
|
|||||||
connect=self._http_timeout,
|
connect=self._http_timeout,
|
||||||
sock_connect=self._http_timeout,
|
sock_connect=self._http_timeout,
|
||||||
sock_read=self._http_timeout,
|
sock_read=self._http_timeout,
|
||||||
)
|
),
|
||||||
|
# See https://github.com/aio-libs/aiohttp/issues/6626
|
||||||
|
# Without this aiohttp will mangle the redirect header from Shibboleth, invalidating the
|
||||||
|
# passed signature. Shibboleth will not accept the broken signature and authentication will
|
||||||
|
# fail.
|
||||||
|
requote_redirect_url=False
|
||||||
) as session:
|
) as session:
|
||||||
self.session = session
|
self.session = session
|
||||||
try:
|
try:
|
||||||
|
@ -1,3 +1,9 @@
|
|||||||
from .kit_ilias_web_crawler import KitIliasWebCrawler, KitIliasWebCrawlerSection
|
from .kit_ilias_web_crawler import (IliasWebCrawler, IliasWebCrawlerSection, KitIliasWebCrawler,
|
||||||
|
KitIliasWebCrawlerSection)
|
||||||
|
|
||||||
__all__ = ["KitIliasWebCrawler", "KitIliasWebCrawlerSection"]
|
__all__ = [
|
||||||
|
"IliasWebCrawler",
|
||||||
|
"IliasWebCrawlerSection",
|
||||||
|
"KitIliasWebCrawler",
|
||||||
|
"KitIliasWebCrawlerSection",
|
||||||
|
]
|
||||||
|
40
PFERD/crawl/ilias/async_helper.py
Normal file
40
PFERD/crawl/ilias/async_helper.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
import asyncio
|
||||||
|
from typing import Any, Callable, Optional
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
from ...logging import log
|
||||||
|
from ..crawler import AWrapped, CrawlError, CrawlWarning
|
||||||
|
|
||||||
|
|
||||||
|
def _iorepeat(attempts: int, name: str, failure_is_error: bool = False) -> Callable[[AWrapped], AWrapped]:
|
||||||
|
def decorator(f: AWrapped) -> AWrapped:
|
||||||
|
async def wrapper(*args: Any, **kwargs: Any) -> Optional[Any]:
|
||||||
|
last_exception: Optional[BaseException] = None
|
||||||
|
for round in range(attempts):
|
||||||
|
try:
|
||||||
|
return await f(*args, **kwargs)
|
||||||
|
except aiohttp.ContentTypeError: # invalid content type
|
||||||
|
raise CrawlWarning("ILIAS returned an invalid content type")
|
||||||
|
except aiohttp.TooManyRedirects:
|
||||||
|
raise CrawlWarning("Got stuck in a redirect loop")
|
||||||
|
except aiohttp.ClientPayloadError as e: # encoding or not enough bytes
|
||||||
|
last_exception = e
|
||||||
|
except aiohttp.ClientConnectionError as e: # e.g. timeout, disconnect, resolve failed, etc.
|
||||||
|
last_exception = e
|
||||||
|
except asyncio.exceptions.TimeoutError as e: # explicit http timeouts in HttpCrawler
|
||||||
|
last_exception = e
|
||||||
|
log.explain_topic(f"Retrying operation {name}. Retries left: {attempts - 1 - round}")
|
||||||
|
log.explain(f"Last exception: {last_exception!r}")
|
||||||
|
|
||||||
|
if last_exception:
|
||||||
|
message = f"Error in I/O Operation: {last_exception!r}"
|
||||||
|
if failure_is_error:
|
||||||
|
raise CrawlError(message) from last_exception
|
||||||
|
else:
|
||||||
|
raise CrawlWarning(message) from last_exception
|
||||||
|
raise CrawlError("Impossible return in ilias _iorepeat")
|
||||||
|
|
||||||
|
return wrapper # type: ignore
|
||||||
|
|
||||||
|
return decorator
|
@ -1,5 +1,9 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional
|
from typing import Optional, cast
|
||||||
|
|
||||||
|
import bs4
|
||||||
|
|
||||||
|
from PFERD.utils import soupify
|
||||||
|
|
||||||
_link_template_plain = "{{link}}"
|
_link_template_plain = "{{link}}"
|
||||||
_link_template_fancy = """
|
_link_template_fancy = """
|
||||||
@ -94,6 +98,162 @@ _link_template_internet_shortcut = """
|
|||||||
URL={{link}}
|
URL={{link}}
|
||||||
""".strip()
|
""".strip()
|
||||||
|
|
||||||
|
_learning_module_template = """
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>{{name}}</title>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<style>
|
||||||
|
* {
|
||||||
|
box-sizing: border-box;
|
||||||
|
}
|
||||||
|
.center-flex {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
.nav {
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<body class="center-flex">
|
||||||
|
{{body}}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
_forum_thread_template = """
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>ILIAS - Forum: {{name}}</title>
|
||||||
|
<style>
|
||||||
|
* {
|
||||||
|
box-sizing: border-box;
|
||||||
|
}
|
||||||
|
body {
|
||||||
|
font-family: 'Open Sans', Verdana, Arial, Helvetica, sans-serif;
|
||||||
|
padding: 8px;
|
||||||
|
}
|
||||||
|
ul, ol, p {
|
||||||
|
margin: 1.2em 0;
|
||||||
|
}
|
||||||
|
p {
|
||||||
|
margin-top: 8px;
|
||||||
|
margin-bottom: 8px;
|
||||||
|
}
|
||||||
|
a {
|
||||||
|
color: #00876c;
|
||||||
|
text-decoration: none;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
a:hover {
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
body > p:first-child > span:first-child {
|
||||||
|
font-size: 1.6em;
|
||||||
|
}
|
||||||
|
body > p:first-child > span:first-child ~ span.default {
|
||||||
|
display: inline-block;
|
||||||
|
font-size: 1.2em;
|
||||||
|
padding-bottom: 8px;
|
||||||
|
}
|
||||||
|
.ilFrmPostContent {
|
||||||
|
margin-top: 8px;
|
||||||
|
max-width: 64em;
|
||||||
|
}
|
||||||
|
.ilFrmPostContent > *:first-child {
|
||||||
|
margin-top: 0px;
|
||||||
|
}
|
||||||
|
.ilFrmPostTitle {
|
||||||
|
margin-top: 24px;
|
||||||
|
color: #00876c;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
#ilFrmPostList {
|
||||||
|
list-style: none;
|
||||||
|
padding-left: 0;
|
||||||
|
}
|
||||||
|
li.ilFrmPostRow {
|
||||||
|
padding: 3px 0 3px 3px;
|
||||||
|
margin-bottom: 24px;
|
||||||
|
border-left: 6px solid #dddddd;
|
||||||
|
}
|
||||||
|
.ilFrmPostRow > div {
|
||||||
|
display: flex;
|
||||||
|
}
|
||||||
|
.ilFrmPostImage img {
|
||||||
|
margin: 0 !important;
|
||||||
|
padding: 6px 9px 9px 6px;
|
||||||
|
}
|
||||||
|
.ilUserIcon {
|
||||||
|
width: 115px;
|
||||||
|
}
|
||||||
|
.small {
|
||||||
|
text-decoration: none;
|
||||||
|
font-size: 0.75rem;
|
||||||
|
color: #6f6f6f;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
{{heading}}
|
||||||
|
{{content}}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
""".strip() # noqa: E501 line too long
|
||||||
|
|
||||||
|
|
||||||
|
def learning_module_template(body: bs4.Tag, name: str, prev: Optional[str], next: Optional[str]) -> str:
|
||||||
|
# Seems to be comments, ignore those.
|
||||||
|
for elem in body.select(".il-copg-mob-fullscreen-modal"):
|
||||||
|
elem.decompose()
|
||||||
|
|
||||||
|
nav_template = """
|
||||||
|
<div class="nav">
|
||||||
|
{{left}}
|
||||||
|
{{right}}
|
||||||
|
</div>
|
||||||
|
"""
|
||||||
|
if prev and body.select_one(".ilc_page_lnav_LeftNavigation"):
|
||||||
|
text = cast(bs4.Tag, body.select_one(".ilc_page_lnav_LeftNavigation")).get_text().strip()
|
||||||
|
left = f'<a href="{prev}">{text}</a>'
|
||||||
|
else:
|
||||||
|
left = "<span></span>"
|
||||||
|
|
||||||
|
if next and body.select_one(".ilc_page_rnav_RightNavigation"):
|
||||||
|
text = cast(bs4.Tag, body.select_one(".ilc_page_rnav_RightNavigation")).get_text().strip()
|
||||||
|
right = f'<a href="{next}">{text}</a>'
|
||||||
|
else:
|
||||||
|
right = "<span></span>"
|
||||||
|
|
||||||
|
if top_nav := body.select_one(".ilc_page_tnav_TopNavigation"):
|
||||||
|
top_nav.replace_with(
|
||||||
|
soupify(nav_template.replace("{{left}}", left).replace("{{right}}", right).encode())
|
||||||
|
)
|
||||||
|
|
||||||
|
if bot_nav := body.select_one(".ilc_page_bnav_BottomNavigation"):
|
||||||
|
bot_nav.replace_with(soupify(nav_template.replace(
|
||||||
|
"{{left}}", left).replace("{{right}}", right).encode())
|
||||||
|
)
|
||||||
|
|
||||||
|
body_str = cast(str, body.prettify())
|
||||||
|
return _learning_module_template.replace("{{body}}", body_str).replace("{{name}}", name)
|
||||||
|
|
||||||
|
|
||||||
|
def forum_thread_template(name: str, url: str, heading: bs4.Tag, content: bs4.Tag) -> str:
|
||||||
|
if title := cast(Optional[bs4.Tag], heading.find(name="b")):
|
||||||
|
title.wrap(bs4.Tag(name="a", attrs={"href": url}))
|
||||||
|
return _forum_thread_template \
|
||||||
|
.replace("{{name}}", name) \
|
||||||
|
.replace("{{heading}}", cast(str, heading.prettify())) \
|
||||||
|
.replace("{{content}}", cast(str, content.prettify()))
|
||||||
|
|
||||||
|
|
||||||
class Links(Enum):
|
class Links(Enum):
|
||||||
IGNORE = "ignore"
|
IGNORE = "ignore"
|
||||||
@ -102,24 +262,24 @@ class Links(Enum):
|
|||||||
INTERNET_SHORTCUT = "internet-shortcut"
|
INTERNET_SHORTCUT = "internet-shortcut"
|
||||||
|
|
||||||
def template(self) -> Optional[str]:
|
def template(self) -> Optional[str]:
|
||||||
if self == self.FANCY:
|
if self == Links.FANCY:
|
||||||
return _link_template_fancy
|
return _link_template_fancy
|
||||||
elif self == self.PLAINTEXT:
|
elif self == Links.PLAINTEXT:
|
||||||
return _link_template_plain
|
return _link_template_plain
|
||||||
elif self == self.INTERNET_SHORTCUT:
|
elif self == Links.INTERNET_SHORTCUT:
|
||||||
return _link_template_internet_shortcut
|
return _link_template_internet_shortcut
|
||||||
elif self == self.IGNORE:
|
elif self == Links.IGNORE:
|
||||||
return None
|
return None
|
||||||
raise ValueError("Missing switch case")
|
raise ValueError("Missing switch case")
|
||||||
|
|
||||||
def extension(self) -> Optional[str]:
|
def extension(self) -> Optional[str]:
|
||||||
if self == self.FANCY:
|
if self == Links.FANCY:
|
||||||
return ".html"
|
return ".html"
|
||||||
elif self == self.PLAINTEXT:
|
elif self == Links.PLAINTEXT:
|
||||||
return ".txt"
|
return ".txt"
|
||||||
elif self == self.INTERNET_SHORTCUT:
|
elif self == Links.INTERNET_SHORTCUT:
|
||||||
return ".url"
|
return ".url"
|
||||||
elif self == self.IGNORE:
|
elif self == Links.IGNORE:
|
||||||
return None
|
return None
|
||||||
raise ValueError("Missing switch case")
|
raise ValueError("Missing switch case")
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from typing import cast
|
||||||
|
|
||||||
from bs4 import BeautifulSoup, Comment, Tag
|
from bs4 import BeautifulSoup, Comment, Tag
|
||||||
|
|
||||||
_STYLE_TAG_CONTENT = """
|
_STYLE_TAG_CONTENT = """
|
||||||
@ -12,6 +14,13 @@ _STYLE_TAG_CONTENT = """
|
|||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.row-flex {
|
||||||
|
display: flex;
|
||||||
|
}
|
||||||
|
.row-flex-wrap {
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
|
||||||
.accordion-head {
|
.accordion-head {
|
||||||
background-color: #f5f7fa;
|
background-color: #f5f7fa;
|
||||||
padding: 0.5rem 0;
|
padding: 0.5rem 0;
|
||||||
@ -63,18 +72,18 @@ def insert_base_markup(soup: BeautifulSoup) -> BeautifulSoup:
|
|||||||
|
|
||||||
|
|
||||||
def clean(soup: BeautifulSoup) -> BeautifulSoup:
|
def clean(soup: BeautifulSoup) -> BeautifulSoup:
|
||||||
for block in soup.find_all(class_=lambda x: x in _ARTICLE_WORTHY_CLASSES):
|
for block in cast(list[Tag], soup.find_all(class_=lambda x: x in _ARTICLE_WORTHY_CLASSES)):
|
||||||
block.name = "article"
|
block.name = "article"
|
||||||
|
|
||||||
for block in soup.find_all("h3"):
|
for block in cast(list[Tag], soup.find_all("h3")):
|
||||||
block.name = "div"
|
block.name = "div"
|
||||||
|
|
||||||
for block in soup.find_all("h1"):
|
for block in cast(list[Tag], soup.find_all("h1")):
|
||||||
block.name = "h3"
|
block.name = "h3"
|
||||||
|
|
||||||
for block in soup.find_all(class_="ilc_va_ihcap_VAccordIHeadCap"):
|
for block in cast(list[Tag], soup.find_all(class_="ilc_va_ihcap_VAccordIHeadCap")):
|
||||||
block.name = "h3"
|
block.name = "h3"
|
||||||
block["class"] += ["accordion-head"]
|
block["class"] += ["accordion-head"] # type: ignore
|
||||||
|
|
||||||
for dummy in soup.select(".ilc_text_block_Standard.ilc_Paragraph"):
|
for dummy in soup.select(".ilc_text_block_Standard.ilc_Paragraph"):
|
||||||
children = list(dummy.children)
|
children = list(dummy.children)
|
||||||
@ -82,10 +91,15 @@ def clean(soup: BeautifulSoup) -> BeautifulSoup:
|
|||||||
dummy.decompose()
|
dummy.decompose()
|
||||||
if len(children) > 1:
|
if len(children) > 1:
|
||||||
continue
|
continue
|
||||||
if type(children[0]) == Comment:
|
if isinstance(type(children[0]), Comment):
|
||||||
dummy.decompose()
|
dummy.decompose()
|
||||||
|
|
||||||
for hrule_imposter in soup.find_all(class_="ilc_section_Separator"):
|
# Delete video figures, as they can not be internalized anyway
|
||||||
|
for video in soup.select(".ilc_media_cont_MediaContainerHighlighted .ilPageVideo"):
|
||||||
|
if figure := video.find_parent("figure"):
|
||||||
|
figure.decompose()
|
||||||
|
|
||||||
|
for hrule_imposter in cast(list[Tag], soup.find_all(class_="ilc_section_Separator")):
|
||||||
hrule_imposter.insert(0, soup.new_tag("hr"))
|
hrule_imposter.insert(0, soup.new_tag("hr"))
|
||||||
|
|
||||||
return soup
|
return soup
|
||||||
|
1096
PFERD/crawl/ilias/ilias_web_crawler.py
Normal file
1096
PFERD/crawl/ilias/ilias_web_crawler.py
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,982 +1,37 @@
|
|||||||
import asyncio
|
from typing import Dict, Literal
|
||||||
import re
|
|
||||||
from collections.abc import Awaitable, Coroutine
|
|
||||||
from pathlib import PurePath
|
|
||||||
from typing import Any, Callable, Dict, List, Optional, Set, Union, cast
|
|
||||||
|
|
||||||
import aiohttp
|
from ...auth import Authenticator
|
||||||
import yarl
|
|
||||||
from aiohttp import hdrs
|
|
||||||
from bs4 import BeautifulSoup, Tag
|
|
||||||
|
|
||||||
from ...auth import Authenticator, TfaAuthenticator
|
|
||||||
from ...config import Config
|
from ...config import Config
|
||||||
from ...logging import ProgressBar, log
|
from .ilias_web_crawler import IliasWebCrawler, IliasWebCrawlerSection
|
||||||
from ...output_dir import FileSink, Redownload
|
from .shibboleth_login import ShibbolethLogin
|
||||||
from ...utils import fmt_path, soupify, url_set_query_param
|
|
||||||
from ..crawler import AWrapped, CrawlError, CrawlToken, CrawlWarning, DownloadToken, anoncritical
|
|
||||||
from ..http_crawler import HttpCrawler, HttpCrawlerSection
|
|
||||||
from .file_templates import Links
|
|
||||||
from .ilias_html_cleaner import clean, insert_base_markup
|
|
||||||
from .kit_ilias_html import (IliasElementType, IliasForumThread, IliasPage, IliasPageElement,
|
|
||||||
_sanitize_path_name, parse_ilias_forum_export)
|
|
||||||
|
|
||||||
TargetType = Union[str, int]
|
|
||||||
|
|
||||||
_ILIAS_URL = "https://ilias.studium.kit.edu"
|
_ILIAS_URL = "https://ilias.studium.kit.edu"
|
||||||
|
|
||||||
|
|
||||||
class KitShibbolethBackgroundLoginSuccessful():
|
class KitShibbolethBackgroundLoginSuccessful:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class KitIliasWebCrawlerSection(HttpCrawlerSection):
|
class KitIliasWebCrawlerSection(IliasWebCrawlerSection):
|
||||||
def target(self) -> TargetType:
|
def base_url(self) -> str:
|
||||||
target = self.s.get("target")
|
return _ILIAS_URL
|
||||||
if not target:
|
|
||||||
self.missing_value("target")
|
|
||||||
|
|
||||||
if re.fullmatch(r"\d+", target):
|
def login(self) -> Literal["shibboleth"]:
|
||||||
# Course id
|
return "shibboleth"
|
||||||
return int(target)
|
|
||||||
if target == "desktop":
|
|
||||||
# Full personal desktop
|
|
||||||
return target
|
|
||||||
if target.startswith(_ILIAS_URL):
|
|
||||||
# ILIAS URL
|
|
||||||
return target
|
|
||||||
|
|
||||||
self.invalid_value("target", target, "Should be <course id | desktop | kit ilias URL>")
|
|
||||||
|
|
||||||
def tfa_auth(self, authenticators: Dict[str, Authenticator]) -> Optional[Authenticator]:
|
|
||||||
value: Optional[str] = self.s.get("tfa_auth")
|
|
||||||
if value is None:
|
|
||||||
return None
|
|
||||||
auth = authenticators.get(value)
|
|
||||||
if auth is None:
|
|
||||||
self.invalid_value("tfa_auth", value, "No such auth section exists")
|
|
||||||
return auth
|
|
||||||
|
|
||||||
def links(self) -> Links:
|
|
||||||
type_str: Optional[str] = self.s.get("links")
|
|
||||||
|
|
||||||
if type_str is None:
|
|
||||||
return Links.FANCY
|
|
||||||
|
|
||||||
try:
|
|
||||||
return Links.from_string(type_str)
|
|
||||||
except ValueError as e:
|
|
||||||
self.invalid_value("links", type_str, str(e).capitalize())
|
|
||||||
|
|
||||||
def link_redirect_delay(self) -> int:
|
|
||||||
return self.s.getint("link_redirect_delay", fallback=-1)
|
|
||||||
|
|
||||||
def videos(self) -> bool:
|
|
||||||
return self.s.getboolean("videos", fallback=False)
|
|
||||||
|
|
||||||
def forums(self) -> bool:
|
|
||||||
return self.s.getboolean("forums", fallback=False)
|
|
||||||
|
|
||||||
|
|
||||||
_DIRECTORY_PAGES: Set[IliasElementType] = set([
|
class KitIliasWebCrawler(IliasWebCrawler):
|
||||||
IliasElementType.EXERCISE,
|
|
||||||
IliasElementType.EXERCISE_FILES,
|
|
||||||
IliasElementType.FOLDER,
|
|
||||||
IliasElementType.MEETING,
|
|
||||||
IliasElementType.VIDEO_FOLDER,
|
|
||||||
IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED,
|
|
||||||
])
|
|
||||||
|
|
||||||
_VIDEO_ELEMENTS: Set[IliasElementType] = set([
|
|
||||||
IliasElementType.VIDEO,
|
|
||||||
IliasElementType.VIDEO_PLAYER,
|
|
||||||
IliasElementType.VIDEO_FOLDER,
|
|
||||||
IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED,
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
def _iorepeat(attempts: int, name: str, failure_is_error: bool = False) -> Callable[[AWrapped], AWrapped]:
|
|
||||||
def decorator(f: AWrapped) -> AWrapped:
|
|
||||||
async def wrapper(*args: Any, **kwargs: Any) -> Optional[Any]:
|
|
||||||
last_exception: Optional[BaseException] = None
|
|
||||||
for round in range(attempts):
|
|
||||||
try:
|
|
||||||
return await f(*args, **kwargs)
|
|
||||||
except aiohttp.ContentTypeError: # invalid content type
|
|
||||||
raise CrawlWarning("ILIAS returned an invalid content type")
|
|
||||||
except aiohttp.TooManyRedirects:
|
|
||||||
raise CrawlWarning("Got stuck in a redirect loop")
|
|
||||||
except aiohttp.ClientPayloadError as e: # encoding or not enough bytes
|
|
||||||
last_exception = e
|
|
||||||
except aiohttp.ClientConnectionError as e: # e.g. timeout, disconnect, resolve failed, etc.
|
|
||||||
last_exception = e
|
|
||||||
except asyncio.exceptions.TimeoutError as e: # explicit http timeouts in HttpCrawler
|
|
||||||
last_exception = e
|
|
||||||
log.explain_topic(f"Retrying operation {name}. Retries left: {attempts - 1 - round}")
|
|
||||||
|
|
||||||
if last_exception:
|
|
||||||
message = f"Error in I/O Operation: {last_exception}"
|
|
||||||
if failure_is_error:
|
|
||||||
raise CrawlError(message) from last_exception
|
|
||||||
else:
|
|
||||||
raise CrawlWarning(message) from last_exception
|
|
||||||
raise CrawlError("Impossible return in ilias _iorepeat")
|
|
||||||
|
|
||||||
return wrapper # type: ignore
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
def _wrap_io_in_warning(name: str) -> Callable[[AWrapped], AWrapped]:
|
|
||||||
"""
|
|
||||||
Wraps any I/O exception in a CrawlWarning.
|
|
||||||
"""
|
|
||||||
return _iorepeat(1, name)
|
|
||||||
|
|
||||||
|
|
||||||
# Crawler control flow:
|
|
||||||
#
|
|
||||||
# crawl_desktop -+
|
|
||||||
# |
|
|
||||||
# crawl_course --+
|
|
||||||
# |
|
|
||||||
# @_io_repeat | # retries internally (before the bar)
|
|
||||||
# +- crawl_url <-+
|
|
||||||
# |
|
|
||||||
# |
|
|
||||||
# | @_wrap_io_exception # does not need to retry as children acquire bars
|
|
||||||
# +> crawl_ilias_element -+
|
|
||||||
# ^ |
|
|
||||||
# | @_io_repeat | # retries internally (before the bar)
|
|
||||||
# +- crawl_ilias_page <---+
|
|
||||||
# | |
|
|
||||||
# +> get_page | # Handles and retries authentication
|
|
||||||
# |
|
|
||||||
# @_io_repeat | # retries internally (before the bar)
|
|
||||||
# +- download_link <---+
|
|
||||||
# | |
|
|
||||||
# +> resolve_target | # Handles and retries authentication
|
|
||||||
# |
|
|
||||||
# @_io_repeat | # retries internally (before the bar)
|
|
||||||
# +- download_video <---+
|
|
||||||
# | |
|
|
||||||
# | @_io_repeat | # retries internally (before the bar)
|
|
||||||
# +- download_file <---+
|
|
||||||
# |
|
|
||||||
# +> stream_from_url # Handles and retries authentication
|
|
||||||
|
|
||||||
class KitIliasWebCrawler(HttpCrawler):
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
section: KitIliasWebCrawlerSection,
|
section: KitIliasWebCrawlerSection,
|
||||||
config: Config,
|
config: Config,
|
||||||
authenticators: Dict[str, Authenticator]
|
authenticators: Dict[str, Authenticator],
|
||||||
):
|
):
|
||||||
# Setting a main authenticator for cookie sharing
|
super().__init__(name, section, config, authenticators)
|
||||||
auth = section.auth(authenticators)
|
|
||||||
super().__init__(name, section, config, shared_auth=auth)
|
|
||||||
|
|
||||||
if section.tasks() > 1:
|
self._shibboleth_login = ShibbolethLogin(
|
||||||
log.warn("""
|
_ILIAS_URL,
|
||||||
Please avoid using too many parallel requests as these are the KIT ILIAS
|
self._auth,
|
||||||
instance's greatest bottleneck.
|
|
||||||
""".strip())
|
|
||||||
|
|
||||||
self._shibboleth_login = KitShibbolethLogin(
|
|
||||||
auth,
|
|
||||||
section.tfa_auth(authenticators),
|
section.tfa_auth(authenticators),
|
||||||
)
|
)
|
||||||
|
|
||||||
self._base_url = _ILIAS_URL
|
|
||||||
|
|
||||||
self._target = section.target()
|
|
||||||
self._link_file_redirect_delay = section.link_redirect_delay()
|
|
||||||
self._links = section.links()
|
|
||||||
self._videos = section.videos()
|
|
||||||
self._forums = section.forums()
|
|
||||||
self._visited_urls: Dict[str, PurePath] = dict()
|
|
||||||
|
|
||||||
async def _run(self) -> None:
|
|
||||||
if isinstance(self._target, int):
|
|
||||||
log.explain_topic(f"Inferred crawl target: Course with id {self._target}")
|
|
||||||
await self._crawl_course(self._target)
|
|
||||||
elif self._target == "desktop":
|
|
||||||
log.explain_topic("Inferred crawl target: Personal desktop")
|
|
||||||
await self._crawl_desktop()
|
|
||||||
else:
|
|
||||||
log.explain_topic(f"Inferred crawl target: URL {self._target}")
|
|
||||||
await self._crawl_url(self._target)
|
|
||||||
|
|
||||||
async def _crawl_course(self, course_id: int) -> None:
|
|
||||||
# Start crawling at the given course
|
|
||||||
root_url = url_set_query_param(
|
|
||||||
self._base_url + "/goto.php", "target", f"crs_{course_id}"
|
|
||||||
)
|
|
||||||
|
|
||||||
await self._crawl_url(root_url, expected_id=course_id)
|
|
||||||
|
|
||||||
async def _crawl_desktop(self) -> None:
|
|
||||||
appendix = r"ILIAS\PersonalDesktop\PDMainBarProvider|mm_pd_sel_items"
|
|
||||||
appendix = appendix.encode("ASCII").hex()
|
|
||||||
await self._crawl_url(self._base_url + "/gs_content.php?item=" + appendix)
|
|
||||||
|
|
||||||
async def _crawl_url(self, url: str, expected_id: Optional[int] = None) -> None:
|
|
||||||
maybe_cl = await self.crawl(PurePath("."))
|
|
||||||
if not maybe_cl:
|
|
||||||
return
|
|
||||||
cl = maybe_cl # Not mypy's fault, but explained here: https://github.com/python/mypy/issues/2608
|
|
||||||
|
|
||||||
elements: List[IliasPageElement] = []
|
|
||||||
# A list as variable redefinitions are not propagated to outer scopes
|
|
||||||
description: List[BeautifulSoup] = []
|
|
||||||
|
|
||||||
@_iorepeat(3, "crawling url")
|
|
||||||
async def gather_elements() -> None:
|
|
||||||
elements.clear()
|
|
||||||
async with cl:
|
|
||||||
next_stage_url: Optional[str] = url
|
|
||||||
current_parent = None
|
|
||||||
|
|
||||||
# Duplicated code, but the root page is special - we want to avoid fetching it twice!
|
|
||||||
while next_stage_url:
|
|
||||||
soup = await self._get_page(next_stage_url)
|
|
||||||
|
|
||||||
if current_parent is None and expected_id is not None:
|
|
||||||
perma_link_element: Tag = soup.find(id="current_perma_link")
|
|
||||||
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
|
|
||||||
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
|
|
||||||
|
|
||||||
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
|
|
||||||
log.explain(f"URL: {next_stage_url}")
|
|
||||||
page = IliasPage(soup, next_stage_url, current_parent)
|
|
||||||
if next_element := page.get_next_stage_element():
|
|
||||||
current_parent = next_element
|
|
||||||
next_stage_url = next_element.url
|
|
||||||
else:
|
|
||||||
next_stage_url = None
|
|
||||||
|
|
||||||
elements.extend(page.get_child_elements())
|
|
||||||
if description_string := page.get_description():
|
|
||||||
description.append(description_string)
|
|
||||||
|
|
||||||
# Fill up our task list with the found elements
|
|
||||||
await gather_elements()
|
|
||||||
|
|
||||||
if description:
|
|
||||||
await self._download_description(PurePath("."), description[0])
|
|
||||||
|
|
||||||
elements.sort(key=lambda e: e.id())
|
|
||||||
|
|
||||||
tasks: List[Awaitable[None]] = []
|
|
||||||
for element in elements:
|
|
||||||
if handle := await self._handle_ilias_element(PurePath("."), element):
|
|
||||||
tasks.append(asyncio.create_task(handle))
|
|
||||||
|
|
||||||
# And execute them
|
|
||||||
await self.gather(tasks)
|
|
||||||
|
|
||||||
async def _handle_ilias_page(
|
|
||||||
self,
|
|
||||||
url: str,
|
|
||||||
parent: IliasPageElement,
|
|
||||||
path: PurePath,
|
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
|
||||||
maybe_cl = await self.crawl(path)
|
|
||||||
if not maybe_cl:
|
|
||||||
return None
|
|
||||||
return self._crawl_ilias_page(url, parent, maybe_cl)
|
|
||||||
|
|
||||||
@anoncritical
|
|
||||||
async def _crawl_ilias_page(
|
|
||||||
self,
|
|
||||||
url: str,
|
|
||||||
parent: IliasPageElement,
|
|
||||||
cl: CrawlToken,
|
|
||||||
) -> None:
|
|
||||||
elements: List[IliasPageElement] = []
|
|
||||||
# A list as variable redefinitions are not propagated to outer scopes
|
|
||||||
description: List[BeautifulSoup] = []
|
|
||||||
|
|
||||||
@_iorepeat(3, "crawling folder")
|
|
||||||
async def gather_elements() -> None:
|
|
||||||
elements.clear()
|
|
||||||
async with cl:
|
|
||||||
next_stage_url: Optional[str] = url
|
|
||||||
current_parent = parent
|
|
||||||
|
|
||||||
while next_stage_url:
|
|
||||||
soup = await self._get_page(next_stage_url)
|
|
||||||
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
|
|
||||||
log.explain(f"URL: {next_stage_url}")
|
|
||||||
page = IliasPage(soup, next_stage_url, current_parent)
|
|
||||||
if next_element := page.get_next_stage_element():
|
|
||||||
current_parent = next_element
|
|
||||||
next_stage_url = next_element.url
|
|
||||||
else:
|
|
||||||
next_stage_url = None
|
|
||||||
|
|
||||||
elements.extend(page.get_child_elements())
|
|
||||||
if description_string := page.get_description():
|
|
||||||
description.append(description_string)
|
|
||||||
|
|
||||||
# Fill up our task list with the found elements
|
|
||||||
await gather_elements()
|
|
||||||
|
|
||||||
if description:
|
|
||||||
await self._download_description(cl.path, description[0])
|
|
||||||
|
|
||||||
elements.sort(key=lambda e: e.id())
|
|
||||||
|
|
||||||
tasks: List[Awaitable[None]] = []
|
|
||||||
for element in elements:
|
|
||||||
if handle := await self._handle_ilias_element(cl.path, element):
|
|
||||||
tasks.append(asyncio.create_task(handle))
|
|
||||||
|
|
||||||
# And execute them
|
|
||||||
await self.gather(tasks)
|
|
||||||
|
|
||||||
# These decorators only apply *to this method* and *NOT* to the returned
|
|
||||||
# awaitables!
|
|
||||||
# This method does not await the handlers but returns them instead.
|
|
||||||
# This ensures one level is handled at a time and name deduplication
|
|
||||||
# works correctly.
|
|
||||||
@anoncritical
|
|
||||||
async def _handle_ilias_element(
|
|
||||||
self,
|
|
||||||
parent_path: PurePath,
|
|
||||||
element: IliasPageElement,
|
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
|
||||||
if element.url in self._visited_urls:
|
|
||||||
raise CrawlWarning(
|
|
||||||
f"Found second path to element {element.name!r} at {element.url!r}. "
|
|
||||||
+ f"First path: {fmt_path(self._visited_urls[element.url])}. "
|
|
||||||
+ f"Second path: {fmt_path(parent_path)}."
|
|
||||||
)
|
|
||||||
self._visited_urls[element.url] = parent_path
|
|
||||||
|
|
||||||
element_path = PurePath(parent_path, element.name)
|
|
||||||
|
|
||||||
if element.type in _VIDEO_ELEMENTS:
|
|
||||||
if not self._videos:
|
|
||||||
log.status(
|
|
||||||
"[bold bright_black]",
|
|
||||||
"Ignored",
|
|
||||||
fmt_path(element_path),
|
|
||||||
"[bright_black](enable with option 'videos')"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
if element.type == IliasElementType.FILE:
|
|
||||||
return await self._handle_file(element, element_path)
|
|
||||||
elif element.type == IliasElementType.FORUM:
|
|
||||||
if not self._forums:
|
|
||||||
log.status(
|
|
||||||
"[bold bright_black]",
|
|
||||||
"Ignored",
|
|
||||||
fmt_path(element_path),
|
|
||||||
"[bright_black](enable with option 'forums')"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
return await self._handle_forum(element, element_path)
|
|
||||||
elif element.type == IliasElementType.TEST:
|
|
||||||
log.status(
|
|
||||||
"[bold bright_black]",
|
|
||||||
"Ignored",
|
|
||||||
fmt_path(element_path),
|
|
||||||
"[bright_black](tests contain no relevant data)"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
elif element.type == IliasElementType.SURVEY:
|
|
||||||
log.status(
|
|
||||||
"[bold bright_black]",
|
|
||||||
"Ignored",
|
|
||||||
fmt_path(element_path),
|
|
||||||
"[bright_black](surveys contain no relevant data)"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
elif element.type == IliasElementType.LINK:
|
|
||||||
return await self._handle_link(element, element_path)
|
|
||||||
elif element.type == IliasElementType.BOOKING:
|
|
||||||
return await self._handle_booking(element, element_path)
|
|
||||||
elif element.type == IliasElementType.VIDEO:
|
|
||||||
return await self._handle_file(element, element_path)
|
|
||||||
elif element.type == IliasElementType.VIDEO_PLAYER:
|
|
||||||
return await self._handle_video(element, element_path)
|
|
||||||
elif element.type in _DIRECTORY_PAGES:
|
|
||||||
return await self._handle_ilias_page(element.url, element, element_path)
|
|
||||||
else:
|
|
||||||
# This will retry it a few times, failing everytime. It doesn't make any network
|
|
||||||
# requests, so that's fine.
|
|
||||||
raise CrawlWarning(f"Unknown element type: {element.type!r}")
|
|
||||||
|
|
||||||
async def _handle_link(
|
|
||||||
self,
|
|
||||||
element: IliasPageElement,
|
|
||||||
element_path: PurePath,
|
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
|
||||||
log.explain_topic(f"Decision: Crawl Link {fmt_path(element_path)}")
|
|
||||||
log.explain(f"Links type is {self._links}")
|
|
||||||
|
|
||||||
link_template_maybe = self._links.template()
|
|
||||||
link_extension = self._links.extension()
|
|
||||||
if not link_template_maybe or not link_extension:
|
|
||||||
log.explain("Answer: No")
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
log.explain("Answer: Yes")
|
|
||||||
element_path = element_path.with_name(element_path.name + link_extension)
|
|
||||||
|
|
||||||
maybe_dl = await self.download(element_path, mtime=element.mtime)
|
|
||||||
if not maybe_dl:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return self._download_link(element, link_template_maybe, maybe_dl)
|
|
||||||
|
|
||||||
@anoncritical
|
|
||||||
@_iorepeat(3, "resolving link")
|
|
||||||
async def _download_link(self, element: IliasPageElement, link_template: str, dl: DownloadToken) -> None:
|
|
||||||
async with dl as (bar, sink):
|
|
||||||
export_url = element.url.replace("cmd=calldirectlink", "cmd=exportHTML")
|
|
||||||
real_url = await self._resolve_link_target(export_url)
|
|
||||||
self._write_link_content(link_template, real_url, element.name, element.description, sink)
|
|
||||||
|
|
||||||
def _write_link_content(
|
|
||||||
self,
|
|
||||||
link_template: str,
|
|
||||||
url: str,
|
|
||||||
name: str,
|
|
||||||
description: Optional[str],
|
|
||||||
sink: FileSink,
|
|
||||||
) -> None:
|
|
||||||
content = link_template
|
|
||||||
content = content.replace("{{link}}", url)
|
|
||||||
content = content.replace("{{name}}", name)
|
|
||||||
content = content.replace("{{description}}", str(description))
|
|
||||||
content = content.replace("{{redirect_delay}}", str(self._link_file_redirect_delay))
|
|
||||||
sink.file.write(content.encode("utf-8"))
|
|
||||||
sink.done()
|
|
||||||
|
|
||||||
async def _handle_booking(
|
|
||||||
self,
|
|
||||||
element: IliasPageElement,
|
|
||||||
element_path: PurePath,
|
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
|
||||||
log.explain_topic(f"Decision: Crawl Booking Link {fmt_path(element_path)}")
|
|
||||||
log.explain(f"Links type is {self._links}")
|
|
||||||
|
|
||||||
link_template_maybe = self._links.template()
|
|
||||||
link_extension = self._links.extension()
|
|
||||||
if not link_template_maybe or not link_extension:
|
|
||||||
log.explain("Answer: No")
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
log.explain("Answer: Yes")
|
|
||||||
element_path = element_path.with_name(element_path.name + link_extension)
|
|
||||||
|
|
||||||
maybe_dl = await self.download(element_path, mtime=element.mtime)
|
|
||||||
if not maybe_dl:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return self._download_booking(element, link_template_maybe, maybe_dl)
|
|
||||||
|
|
||||||
@anoncritical
|
|
||||||
@_iorepeat(1, "downloading description")
|
|
||||||
async def _download_description(self, parent_path: PurePath, description: BeautifulSoup) -> None:
|
|
||||||
path = parent_path / "Description.html"
|
|
||||||
dl = await self.download(path, redownload=Redownload.ALWAYS)
|
|
||||||
if not dl:
|
|
||||||
return
|
|
||||||
|
|
||||||
async with dl as (bar, sink):
|
|
||||||
description = clean(insert_base_markup(description))
|
|
||||||
sink.file.write(description.prettify().encode("utf-8"))
|
|
||||||
sink.done()
|
|
||||||
|
|
||||||
@anoncritical
|
|
||||||
@_iorepeat(3, "resolving booking")
|
|
||||||
async def _download_booking(
|
|
||||||
self,
|
|
||||||
element: IliasPageElement,
|
|
||||||
link_template: str,
|
|
||||||
dl: DownloadToken,
|
|
||||||
) -> None:
|
|
||||||
async with dl as (bar, sink):
|
|
||||||
self._write_link_content(link_template, element.url, element.name, element.description, sink)
|
|
||||||
|
|
||||||
async def _resolve_link_target(self, export_url: str) -> str:
|
|
||||||
async with self.session.get(export_url, allow_redirects=False) as resp:
|
|
||||||
# No redirect means we were authenticated
|
|
||||||
if hdrs.LOCATION not in resp.headers:
|
|
||||||
return soupify(await resp.read()).select_one("a").get("href").strip()
|
|
||||||
|
|
||||||
await self._authenticate()
|
|
||||||
|
|
||||||
async with self.session.get(export_url, allow_redirects=False) as resp:
|
|
||||||
# No redirect means we were authenticated
|
|
||||||
if hdrs.LOCATION not in resp.headers:
|
|
||||||
return soupify(await resp.read()).select_one("a").get("href").strip()
|
|
||||||
|
|
||||||
raise CrawlError("resolve_link_target failed even after authenticating")
|
|
||||||
|
|
||||||
async def _handle_video(
|
|
||||||
self,
|
|
||||||
element: IliasPageElement,
|
|
||||||
element_path: PurePath,
|
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
|
||||||
# Copy old mapping as it is likely still relevant
|
|
||||||
if self.prev_report:
|
|
||||||
self.report.add_custom_value(
|
|
||||||
str(element_path),
|
|
||||||
self.prev_report.get_custom_value(str(element_path))
|
|
||||||
)
|
|
||||||
|
|
||||||
# A video might contain other videos, so let's "crawl" the video first
|
|
||||||
# to ensure rate limits apply. This must be a download as *this token*
|
|
||||||
# is re-used if the video consists of a single stream. In that case the
|
|
||||||
# file name is used and *not* the stream name the ilias html parser reported
|
|
||||||
# to ensure backwards compatibility.
|
|
||||||
maybe_dl = await self.download(element_path, mtime=element.mtime, redownload=Redownload.ALWAYS)
|
|
||||||
|
|
||||||
# If we do not want to crawl it (user filter) or we have every file
|
|
||||||
# from the cached mapping already, we can ignore this and bail
|
|
||||||
if not maybe_dl or self._all_videos_locally_present(element_path):
|
|
||||||
# Mark all existing cideos as known so they do not get deleted
|
|
||||||
# during dleanup. We "downloaded" them, just without actually making
|
|
||||||
# a network request as we assumed they did not change.
|
|
||||||
for video in self._previous_contained_videos(element_path):
|
|
||||||
await self.download(video)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
return self._download_video(element_path, element, maybe_dl)
|
|
||||||
|
|
||||||
def _previous_contained_videos(self, video_path: PurePath) -> List[PurePath]:
|
|
||||||
if not self.prev_report:
|
|
||||||
return []
|
|
||||||
custom_value = self.prev_report.get_custom_value(str(video_path))
|
|
||||||
if not custom_value:
|
|
||||||
return []
|
|
||||||
names = cast(List[str], custom_value)
|
|
||||||
folder = video_path.parent
|
|
||||||
return [PurePath(folder, name) for name in names]
|
|
||||||
|
|
||||||
def _all_videos_locally_present(self, video_path: PurePath) -> bool:
|
|
||||||
if contained_videos := self._previous_contained_videos(video_path):
|
|
||||||
log.explain_topic(f"Checking local cache for video {video_path.name}")
|
|
||||||
all_found_locally = True
|
|
||||||
for video in contained_videos:
|
|
||||||
transformed_path = self._to_local_video_path(video)
|
|
||||||
if transformed_path:
|
|
||||||
exists_locally = self._output_dir.resolve(transformed_path).exists()
|
|
||||||
all_found_locally = all_found_locally and exists_locally
|
|
||||||
if all_found_locally:
|
|
||||||
log.explain("Found all videos locally, skipping enumeration request")
|
|
||||||
return True
|
|
||||||
log.explain("Missing at least one video, continuing with requests!")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _to_local_video_path(self, path: PurePath) -> Optional[PurePath]:
|
|
||||||
if transformed := self._transformer.transform(path):
|
|
||||||
return self._deduplicator.fixup_path(transformed)
|
|
||||||
return None
|
|
||||||
|
|
||||||
@anoncritical
|
|
||||||
@_iorepeat(3, "downloading video")
|
|
||||||
async def _download_video(
|
|
||||||
self,
|
|
||||||
original_path: PurePath,
|
|
||||||
element: IliasPageElement,
|
|
||||||
dl: DownloadToken
|
|
||||||
) -> None:
|
|
||||||
stream_elements: List[IliasPageElement] = []
|
|
||||||
async with dl as (bar, sink):
|
|
||||||
page = IliasPage(await self._get_page(element.url), element.url, element)
|
|
||||||
stream_elements = page.get_child_elements()
|
|
||||||
|
|
||||||
if len(stream_elements) > 1:
|
|
||||||
log.explain(f"Found multiple video streams for {element.name}")
|
|
||||||
else:
|
|
||||||
log.explain(f"Using single video mode for {element.name}")
|
|
||||||
stream_element = stream_elements[0]
|
|
||||||
|
|
||||||
transformed_path = self._to_local_video_path(original_path)
|
|
||||||
if not transformed_path:
|
|
||||||
raise CrawlError(f"Download returned a path but transform did not for {original_path}")
|
|
||||||
|
|
||||||
# We do not have a local cache yet
|
|
||||||
if self._output_dir.resolve(transformed_path).exists():
|
|
||||||
log.explain(f"Video for {element.name} existed locally")
|
|
||||||
else:
|
|
||||||
await self._stream_from_url(stream_element.url, sink, bar, is_video=True)
|
|
||||||
self.report.add_custom_value(str(original_path), [original_path.name])
|
|
||||||
return
|
|
||||||
|
|
||||||
contained_video_paths: List[str] = []
|
|
||||||
|
|
||||||
for stream_element in stream_elements:
|
|
||||||
video_path = original_path.parent / stream_element.name
|
|
||||||
contained_video_paths.append(str(video_path))
|
|
||||||
|
|
||||||
maybe_dl = await self.download(video_path, mtime=element.mtime, redownload=Redownload.NEVER)
|
|
||||||
if not maybe_dl:
|
|
||||||
continue
|
|
||||||
async with maybe_dl as (bar, sink):
|
|
||||||
log.explain(f"Streaming video from real url {stream_element.url}")
|
|
||||||
await self._stream_from_url(stream_element.url, sink, bar, is_video=True)
|
|
||||||
|
|
||||||
self.report.add_custom_value(str(original_path), contained_video_paths)
|
|
||||||
|
|
||||||
async def _handle_file(
|
|
||||||
self,
|
|
||||||
element: IliasPageElement,
|
|
||||||
element_path: PurePath,
|
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
|
||||||
maybe_dl = await self.download(element_path, mtime=element.mtime)
|
|
||||||
if not maybe_dl:
|
|
||||||
return None
|
|
||||||
return self._download_file(element, maybe_dl)
|
|
||||||
|
|
||||||
@anoncritical
|
|
||||||
@_iorepeat(3, "downloading file")
|
|
||||||
async def _download_file(self, element: IliasPageElement, dl: DownloadToken) -> None:
|
|
||||||
assert dl # The function is only reached when dl is not None
|
|
||||||
async with dl as (bar, sink):
|
|
||||||
await self._stream_from_url(element.url, sink, bar, is_video=False)
|
|
||||||
|
|
||||||
async def _stream_from_url(self, url: str, sink: FileSink, bar: ProgressBar, is_video: bool) -> None:
|
|
||||||
async def try_stream() -> bool:
|
|
||||||
async with self.session.get(url, allow_redirects=is_video) as resp:
|
|
||||||
if not is_video:
|
|
||||||
# Redirect means we weren't authenticated
|
|
||||||
if hdrs.LOCATION in resp.headers:
|
|
||||||
return False
|
|
||||||
# we wanted a video but got HTML
|
|
||||||
if is_video and "html" in resp.content_type:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if resp.content_length:
|
|
||||||
bar.set_total(resp.content_length)
|
|
||||||
|
|
||||||
async for data in resp.content.iter_chunked(1024):
|
|
||||||
sink.file.write(data)
|
|
||||||
bar.advance(len(data))
|
|
||||||
|
|
||||||
sink.done()
|
|
||||||
return True
|
|
||||||
|
|
||||||
auth_id = await self._current_auth_id()
|
|
||||||
if await try_stream():
|
|
||||||
return
|
|
||||||
|
|
||||||
await self.authenticate(auth_id)
|
|
||||||
|
|
||||||
if not await try_stream():
|
|
||||||
raise CrawlError("File streaming failed after authenticate()")
|
|
||||||
|
|
||||||
async def _handle_forum(
|
|
||||||
self,
|
|
||||||
element: IliasPageElement,
|
|
||||||
element_path: PurePath,
|
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
|
||||||
maybe_cl = await self.crawl(element_path)
|
|
||||||
if not maybe_cl:
|
|
||||||
return None
|
|
||||||
return self._crawl_forum(element, maybe_cl)
|
|
||||||
|
|
||||||
@_iorepeat(3, "crawling forum")
|
|
||||||
@anoncritical
|
|
||||||
async def _crawl_forum(self, element: IliasPageElement, cl: CrawlToken) -> None:
|
|
||||||
elements: List[IliasForumThread] = []
|
|
||||||
|
|
||||||
async with cl:
|
|
||||||
next_stage_url = element.url
|
|
||||||
while next_stage_url:
|
|
||||||
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
|
|
||||||
log.explain(f"URL: {next_stage_url}")
|
|
||||||
|
|
||||||
soup = await self._get_page(next_stage_url)
|
|
||||||
page = IliasPage(soup, next_stage_url, None)
|
|
||||||
|
|
||||||
if next := page.get_next_stage_element():
|
|
||||||
next_stage_url = next.url
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
download_data = page.get_download_forum_data()
|
|
||||||
if not download_data:
|
|
||||||
raise CrawlWarning("Failed to extract forum data")
|
|
||||||
if download_data.empty:
|
|
||||||
log.explain("Forum had no threads")
|
|
||||||
elements = []
|
|
||||||
return
|
|
||||||
html = await self._post_authenticated(download_data.url, download_data.form_data)
|
|
||||||
elements = parse_ilias_forum_export(soupify(html))
|
|
||||||
|
|
||||||
elements.sort(key=lambda elem: elem.title)
|
|
||||||
|
|
||||||
tasks: List[Awaitable[None]] = []
|
|
||||||
for elem in elements:
|
|
||||||
tasks.append(asyncio.create_task(self._download_forum_thread(cl.path, elem)))
|
|
||||||
|
|
||||||
# And execute them
|
|
||||||
await self.gather(tasks)
|
|
||||||
|
|
||||||
@anoncritical
|
|
||||||
@_iorepeat(3, "saving forum thread")
|
|
||||||
async def _download_forum_thread(
|
|
||||||
self,
|
|
||||||
parent_path: PurePath,
|
|
||||||
element: IliasForumThread,
|
|
||||||
) -> None:
|
|
||||||
path = parent_path / (_sanitize_path_name(element.title) + ".html")
|
|
||||||
maybe_dl = await self.download(path, mtime=element.mtime)
|
|
||||||
if not maybe_dl:
|
|
||||||
return
|
|
||||||
|
|
||||||
async with maybe_dl as (bar, sink):
|
|
||||||
content = element.title_tag.prettify()
|
|
||||||
content += element.content_tag.prettify()
|
|
||||||
sink.file.write(content.encode("utf-8"))
|
|
||||||
sink.done()
|
|
||||||
|
|
||||||
async def _get_page(self, url: str) -> BeautifulSoup:
|
|
||||||
auth_id = await self._current_auth_id()
|
|
||||||
async with self.session.get(url) as request:
|
|
||||||
soup = soupify(await request.read())
|
|
||||||
if self._is_logged_in(soup):
|
|
||||||
return soup
|
|
||||||
|
|
||||||
# We weren't authenticated, so try to do that
|
|
||||||
await self.authenticate(auth_id)
|
|
||||||
|
|
||||||
# Retry once after authenticating. If this fails, we will die.
|
|
||||||
async with self.session.get(url) as request:
|
|
||||||
soup = soupify(await request.read())
|
|
||||||
if self._is_logged_in(soup):
|
|
||||||
return soup
|
|
||||||
raise CrawlError("get_page failed even after authenticating")
|
|
||||||
|
|
||||||
async def _post_authenticated(
|
|
||||||
self,
|
|
||||||
url: str,
|
|
||||||
data: dict[str, Union[str, List[str]]]
|
|
||||||
) -> BeautifulSoup:
|
|
||||||
auth_id = await self._current_auth_id()
|
|
||||||
|
|
||||||
form_data = aiohttp.FormData()
|
|
||||||
for key, val in data.items():
|
|
||||||
form_data.add_field(key, val)
|
|
||||||
|
|
||||||
async with self.session.post(url, data=form_data(), allow_redirects=False) as request:
|
|
||||||
if request.status == 200:
|
|
||||||
return await request.read()
|
|
||||||
|
|
||||||
# We weren't authenticated, so try to do that
|
|
||||||
await self.authenticate(auth_id)
|
|
||||||
|
|
||||||
# Retry once after authenticating. If this fails, we will die.
|
|
||||||
async with self.session.post(url, data=data, allow_redirects=False) as request:
|
|
||||||
if request.status == 200:
|
|
||||||
return await request.read()
|
|
||||||
raise CrawlError("post_authenticated failed even after authenticating")
|
|
||||||
|
|
||||||
# We repeat this as the login method in shibboleth doesn't handle I/O errors.
|
|
||||||
# Shibboleth is quite reliable as well, the repeat is likely not critical here.
|
|
||||||
@ _iorepeat(3, "Login", failure_is_error=True)
|
|
||||||
async def _authenticate(self) -> None:
|
|
||||||
await self._shibboleth_login.login(self.session)
|
|
||||||
|
|
||||||
@ staticmethod
|
|
||||||
def _is_logged_in(soup: BeautifulSoup) -> bool:
|
|
||||||
# Normal ILIAS pages
|
|
||||||
mainbar: Optional[Tag] = soup.find(class_="il-maincontrols-metabar")
|
|
||||||
if mainbar is not None:
|
|
||||||
login_button = mainbar.find(attrs={"href": lambda x: x and "login.php" in x})
|
|
||||||
shib_login = soup.find(id="button_shib_login")
|
|
||||||
return not login_button and not shib_login
|
|
||||||
|
|
||||||
# Personal Desktop
|
|
||||||
if soup.find("a", attrs={"href": lambda x: x and "block_type=pditems" in x}):
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Video listing embeds do not have complete ILIAS html. Try to match them by
|
|
||||||
# their video listing table
|
|
||||||
video_table = soup.find(
|
|
||||||
recursive=True,
|
|
||||||
name="table",
|
|
||||||
attrs={"id": lambda x: x is not None and x.startswith("tbl_xoct")}
|
|
||||||
)
|
|
||||||
if video_table is not None:
|
|
||||||
return True
|
|
||||||
# The individual video player wrapper page has nothing of the above.
|
|
||||||
# Match it by its playerContainer.
|
|
||||||
if soup.select_one("#playerContainer") is not None:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class KitShibbolethLogin:
|
|
||||||
"""
|
|
||||||
Login via KIT's shibboleth system.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, authenticator: Authenticator, tfa_authenticator: Optional[Authenticator]) -> None:
|
|
||||||
self._auth = authenticator
|
|
||||||
self._tfa_auth = tfa_authenticator
|
|
||||||
|
|
||||||
async def login(self, sess: aiohttp.ClientSession) -> None:
|
|
||||||
"""
|
|
||||||
Performs the ILIAS Shibboleth authentication dance and saves the login
|
|
||||||
cookies it receieves.
|
|
||||||
|
|
||||||
This function should only be called whenever it is detected that you're
|
|
||||||
not logged in. The cookies obtained should be good for a few minutes,
|
|
||||||
maybe even an hour or two.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Equivalent: Click on "Mit KIT-Account anmelden" button in
|
|
||||||
# https://ilias.studium.kit.edu/login.php
|
|
||||||
url = f"{_ILIAS_URL}/shib_login.php"
|
|
||||||
data = {
|
|
||||||
"sendLogin": "1",
|
|
||||||
"idp_selection": "https://idp.scc.kit.edu/idp/shibboleth",
|
|
||||||
"il_target": "",
|
|
||||||
"home_organization_selection": "Weiter",
|
|
||||||
}
|
|
||||||
soup: Union[BeautifulSoup, KitShibbolethBackgroundLoginSuccessful] = await _shib_post(sess, url, data)
|
|
||||||
|
|
||||||
if isinstance(soup, KitShibbolethBackgroundLoginSuccessful):
|
|
||||||
return
|
|
||||||
|
|
||||||
# Attempt to login using credentials, if necessary
|
|
||||||
while not self._login_successful(soup):
|
|
||||||
# Searching the form here so that this fails before asking for
|
|
||||||
# credentials rather than after asking.
|
|
||||||
form = soup.find("form", {"class": "full content", "method": "post"})
|
|
||||||
action = form["action"]
|
|
||||||
|
|
||||||
csrf_token = form.find("input", {"name": "csrf_token"})["value"]
|
|
||||||
|
|
||||||
# Equivalent: Enter credentials in
|
|
||||||
# https://idp.scc.kit.edu/idp/profile/SAML2/Redirect/SSO
|
|
||||||
url = "https://idp.scc.kit.edu" + action
|
|
||||||
username, password = await self._auth.credentials()
|
|
||||||
data = {
|
|
||||||
"_eventId_proceed": "",
|
|
||||||
"j_username": username,
|
|
||||||
"j_password": password,
|
|
||||||
"csrf_token": csrf_token
|
|
||||||
}
|
|
||||||
soup = await _post(sess, url, data)
|
|
||||||
|
|
||||||
if soup.find(id="attributeRelease"):
|
|
||||||
raise CrawlError(
|
|
||||||
"ILIAS Shibboleth entitlements changed! "
|
|
||||||
"Please log in once in your browser and review them"
|
|
||||||
)
|
|
||||||
|
|
||||||
if self._tfa_required(soup):
|
|
||||||
soup = await self._authenticate_tfa(sess, soup)
|
|
||||||
|
|
||||||
if not self._login_successful(soup):
|
|
||||||
self._auth.invalidate_credentials()
|
|
||||||
|
|
||||||
# Equivalent: Being redirected via JS automatically
|
|
||||||
# (or clicking "Continue" if you have JS disabled)
|
|
||||||
relay_state = soup.find("input", {"name": "RelayState"})
|
|
||||||
saml_response = soup.find("input", {"name": "SAMLResponse"})
|
|
||||||
url = f"{_ILIAS_URL}/Shibboleth.sso/SAML2/POST"
|
|
||||||
data = { # using the info obtained in the while loop above
|
|
||||||
"RelayState": relay_state["value"],
|
|
||||||
"SAMLResponse": saml_response["value"],
|
|
||||||
}
|
|
||||||
await sess.post(url, data=data)
|
|
||||||
|
|
||||||
async def _authenticate_tfa(
|
|
||||||
self,
|
|
||||||
session: aiohttp.ClientSession,
|
|
||||||
soup: BeautifulSoup
|
|
||||||
) -> BeautifulSoup:
|
|
||||||
if not self._tfa_auth:
|
|
||||||
self._tfa_auth = TfaAuthenticator("ilias-anon-tfa")
|
|
||||||
|
|
||||||
tfa_token = await self._tfa_auth.password()
|
|
||||||
|
|
||||||
# Searching the form here so that this fails before asking for
|
|
||||||
# credentials rather than after asking.
|
|
||||||
form = soup.find("form", {"method": "post"})
|
|
||||||
action = form["action"]
|
|
||||||
csrf_token = form.find("input", {"name": "csrf_token"})["value"]
|
|
||||||
|
|
||||||
# Equivalent: Enter token in
|
|
||||||
# https://idp.scc.kit.edu/idp/profile/SAML2/Redirect/SSO
|
|
||||||
url = "https://idp.scc.kit.edu" + action
|
|
||||||
data = {
|
|
||||||
"_eventId_proceed": "",
|
|
||||||
"j_tokenNumber": tfa_token,
|
|
||||||
"csrf_token": csrf_token
|
|
||||||
}
|
|
||||||
return await _post(session, url, data)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _login_successful(soup: BeautifulSoup) -> bool:
|
|
||||||
relay_state = soup.find("input", {"name": "RelayState"})
|
|
||||||
saml_response = soup.find("input", {"name": "SAMLResponse"})
|
|
||||||
return relay_state is not None and saml_response is not None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _tfa_required(soup: BeautifulSoup) -> bool:
|
|
||||||
return soup.find(id="j_tokenNumber") is not None
|
|
||||||
|
|
||||||
|
|
||||||
async def _post(session: aiohttp.ClientSession, url: str, data: Any) -> BeautifulSoup:
|
|
||||||
async with session.post(url, data=data) as response:
|
|
||||||
return soupify(await response.read())
|
|
||||||
|
|
||||||
|
|
||||||
async def _shib_post(
|
|
||||||
session: aiohttp.ClientSession,
|
|
||||||
url: str,
|
|
||||||
data: Any
|
|
||||||
) -> Union[BeautifulSoup, KitShibbolethBackgroundLoginSuccessful]:
|
|
||||||
"""
|
|
||||||
aiohttp unescapes '/' and ':' in URL query parameters which is not RFC compliant and rejected
|
|
||||||
by Shibboleth. Thanks a lot. So now we unroll the requests manually, parse location headers and
|
|
||||||
build encoded URL objects ourselves... Who thought mangling location header was a good idea??
|
|
||||||
"""
|
|
||||||
log.explain_topic("Shib login POST")
|
|
||||||
async with session.post(url, data=data, allow_redirects=False) as response:
|
|
||||||
location = response.headers.get("location")
|
|
||||||
log.explain(f"Got location {location!r}")
|
|
||||||
if not location:
|
|
||||||
raise CrawlWarning(f"Login failed (1), no location header present at {url}")
|
|
||||||
correct_url = yarl.URL(location, encoded=True)
|
|
||||||
log.explain(f"Corrected location to {correct_url!r}")
|
|
||||||
|
|
||||||
if str(correct_url).startswith(_ILIAS_URL):
|
|
||||||
log.explain("ILIAS recognized our shib token and logged us in in the background, returning")
|
|
||||||
return KitShibbolethBackgroundLoginSuccessful()
|
|
||||||
|
|
||||||
async with session.get(correct_url, allow_redirects=False) as response:
|
|
||||||
location = response.headers.get("location")
|
|
||||||
log.explain(f"Redirected to {location!r} with status {response.status}")
|
|
||||||
# If shib still still has a valid session, it will directly respond to the request
|
|
||||||
if location is None:
|
|
||||||
log.explain("Shib recognized us, returning its response directly")
|
|
||||||
return soupify(await response.read())
|
|
||||||
|
|
||||||
as_yarl = yarl.URL(response.url)
|
|
||||||
# Probably not needed anymore, but might catch a few weird situations with a nicer message
|
|
||||||
if not location or not as_yarl.host:
|
|
||||||
raise CrawlWarning(f"Login failed (2), no location header present at {correct_url}")
|
|
||||||
|
|
||||||
correct_url = yarl.URL.build(
|
|
||||||
scheme=as_yarl.scheme,
|
|
||||||
host=as_yarl.host,
|
|
||||||
path=location,
|
|
||||||
encoded=True
|
|
||||||
)
|
|
||||||
log.explain(f"Corrected location to {correct_url!r}")
|
|
||||||
|
|
||||||
async with session.get(correct_url, allow_redirects=False) as response:
|
|
||||||
return soupify(await response.read())
|
|
||||||
|
129
PFERD/crawl/ilias/shibboleth_login.py
Normal file
129
PFERD/crawl/ilias/shibboleth_login.py
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
from typing import Any, Optional, cast
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
import yarl
|
||||||
|
from bs4 import BeautifulSoup, Tag
|
||||||
|
|
||||||
|
from ...auth import Authenticator, TfaAuthenticator
|
||||||
|
from ...logging import log
|
||||||
|
from ...utils import soupify
|
||||||
|
from ..crawler import CrawlError
|
||||||
|
|
||||||
|
|
||||||
|
class ShibbolethLogin:
|
||||||
|
"""
|
||||||
|
Login via shibboleth system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, ilias_url: str, authenticator: Authenticator, tfa_authenticator: Optional[Authenticator]
|
||||||
|
) -> None:
|
||||||
|
self._ilias_url = ilias_url
|
||||||
|
self._auth = authenticator
|
||||||
|
self._tfa_auth = tfa_authenticator
|
||||||
|
|
||||||
|
async def login(self, sess: aiohttp.ClientSession) -> None:
|
||||||
|
"""
|
||||||
|
Performs the ILIAS Shibboleth authentication dance and saves the login
|
||||||
|
cookies it receieves.
|
||||||
|
|
||||||
|
This function should only be called whenever it is detected that you're
|
||||||
|
not logged in. The cookies obtained should be good for a few minutes,
|
||||||
|
maybe even an hour or two.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Equivalent: Click on "Mit KIT-Account anmelden" button in
|
||||||
|
# https://ilias.studium.kit.edu/login.php
|
||||||
|
url = f"{self._ilias_url}/shib_login.php"
|
||||||
|
async with sess.get(url) as response:
|
||||||
|
shib_url = response.url
|
||||||
|
if str(shib_url).startswith(self._ilias_url):
|
||||||
|
log.explain(
|
||||||
|
"ILIAS recognized our shib token and logged us in in the background, returning"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
soup: BeautifulSoup = soupify(await response.read())
|
||||||
|
|
||||||
|
# Attempt to login using credentials, if necessary
|
||||||
|
while not self._login_successful(soup):
|
||||||
|
# Searching the form here so that this fails before asking for
|
||||||
|
# credentials rather than after asking.
|
||||||
|
form = cast(Tag, soup.find("form", {"method": "post"}))
|
||||||
|
action = cast(str, form["action"])
|
||||||
|
|
||||||
|
# Equivalent: Enter credentials in
|
||||||
|
# https://idp.scc.kit.edu/idp/profile/SAML2/Redirect/SSO
|
||||||
|
url = str(shib_url.origin()) + action
|
||||||
|
username, password = await self._auth.credentials()
|
||||||
|
data = {
|
||||||
|
"_eventId_proceed": "",
|
||||||
|
"j_username": username,
|
||||||
|
"j_password": password,
|
||||||
|
"fudis_web_authn_assertion_input": "",
|
||||||
|
}
|
||||||
|
if csrf_token_input := form.find("input", {"name": "csrf_token"}):
|
||||||
|
data["csrf_token"] = csrf_token_input["value"] # type: ignore
|
||||||
|
soup = await _post(sess, url, data)
|
||||||
|
|
||||||
|
if soup.find(id="attributeRelease"):
|
||||||
|
raise CrawlError(
|
||||||
|
"ILIAS Shibboleth entitlements changed! "
|
||||||
|
"Please log in once in your browser and review them"
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._tfa_required(soup):
|
||||||
|
soup = await self._authenticate_tfa(sess, soup, shib_url)
|
||||||
|
|
||||||
|
if not self._login_successful(soup):
|
||||||
|
self._auth.invalidate_credentials()
|
||||||
|
|
||||||
|
# Equivalent: Being redirected via JS automatically
|
||||||
|
# (or clicking "Continue" if you have JS disabled)
|
||||||
|
relay_state = cast(Tag, soup.find("input", {"name": "RelayState"}))
|
||||||
|
saml_response = cast(Tag, soup.find("input", {"name": "SAMLResponse"}))
|
||||||
|
url = form = soup.find("form", {"method": "post"})["action"] # type: ignore
|
||||||
|
data = { # using the info obtained in the while loop above
|
||||||
|
"RelayState": cast(str, relay_state["value"]),
|
||||||
|
"SAMLResponse": cast(str, saml_response["value"]),
|
||||||
|
}
|
||||||
|
await sess.post(cast(str, url), data=data)
|
||||||
|
|
||||||
|
async def _authenticate_tfa(
|
||||||
|
self, session: aiohttp.ClientSession, soup: BeautifulSoup, shib_url: yarl.URL
|
||||||
|
) -> BeautifulSoup:
|
||||||
|
if not self._tfa_auth:
|
||||||
|
self._tfa_auth = TfaAuthenticator("ilias-anon-tfa")
|
||||||
|
|
||||||
|
tfa_token = await self._tfa_auth.password()
|
||||||
|
|
||||||
|
# Searching the form here so that this fails before asking for
|
||||||
|
# credentials rather than after asking.
|
||||||
|
form = cast(Tag, soup.find("form", {"method": "post"}))
|
||||||
|
action = cast(str, form["action"])
|
||||||
|
|
||||||
|
# Equivalent: Enter token in
|
||||||
|
# https://idp.scc.kit.edu/idp/profile/SAML2/Redirect/SSO
|
||||||
|
url = str(shib_url.origin()) + action
|
||||||
|
username, password = await self._auth.credentials()
|
||||||
|
data = {
|
||||||
|
"_eventId_proceed": "",
|
||||||
|
"fudis_otp_input": tfa_token,
|
||||||
|
}
|
||||||
|
if csrf_token_input := form.find("input", {"name": "csrf_token"}):
|
||||||
|
data["csrf_token"] = csrf_token_input["value"] # type: ignore
|
||||||
|
return await _post(session, url, data)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _login_successful(soup: BeautifulSoup) -> bool:
|
||||||
|
relay_state = soup.find("input", {"name": "RelayState"})
|
||||||
|
saml_response = soup.find("input", {"name": "SAMLResponse"})
|
||||||
|
return relay_state is not None and saml_response is not None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _tfa_required(soup: BeautifulSoup) -> bool:
|
||||||
|
return soup.find(id="fudiscr-form") is not None
|
||||||
|
|
||||||
|
|
||||||
|
async def _post(session: aiohttp.ClientSession, url: str, data: Any) -> BeautifulSoup:
|
||||||
|
async with session.post(url, data=data) as response:
|
||||||
|
return soupify(await response.read())
|
@ -1,8 +1,9 @@
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
from datetime import datetime
|
||||||
from pathlib import PurePath
|
from pathlib import PurePath
|
||||||
from typing import Awaitable, List, Optional, Pattern, Set, Tuple, Union
|
from typing import Any, Awaitable, Generator, Iterable, List, Optional, Pattern, Tuple, Union, cast
|
||||||
from urllib.parse import urljoin
|
from urllib.parse import urljoin
|
||||||
|
|
||||||
from bs4 import BeautifulSoup, Tag
|
from bs4 import BeautifulSoup, Tag
|
||||||
@ -31,24 +32,24 @@ class KitIpdCrawlerSection(HttpCrawlerSection):
|
|||||||
return re.compile(regex)
|
return re.compile(regex)
|
||||||
|
|
||||||
|
|
||||||
@dataclass(unsafe_hash=True)
|
@dataclass
|
||||||
class KitIpdFile:
|
class KitIpdFile:
|
||||||
name: str
|
name: str
|
||||||
url: str
|
url: str
|
||||||
|
|
||||||
|
def explain(self) -> None:
|
||||||
|
log.explain(f"File {self.name!r} (href={self.url!r})")
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class KitIpdFolder:
|
class KitIpdFolder:
|
||||||
name: str
|
name: str
|
||||||
files: List[KitIpdFile]
|
entries: List[Union[KitIpdFile, "KitIpdFolder"]]
|
||||||
|
|
||||||
def explain(self) -> None:
|
def explain(self) -> None:
|
||||||
log.explain_topic(f"Folder {self.name!r}")
|
log.explain_topic(f"Folder {self.name!r}")
|
||||||
for file in self.files:
|
for entry in self.entries:
|
||||||
log.explain(f"File {file.name!r} (href={file.url!r})")
|
entry.explain()
|
||||||
|
|
||||||
def __hash__(self) -> int:
|
|
||||||
return self.name.__hash__()
|
|
||||||
|
|
||||||
|
|
||||||
class KitIpdCrawler(HttpCrawler):
|
class KitIpdCrawler(HttpCrawler):
|
||||||
@ -72,81 +73,96 @@ class KitIpdCrawler(HttpCrawler):
|
|||||||
|
|
||||||
async with maybe_cl:
|
async with maybe_cl:
|
||||||
for item in await self._fetch_items():
|
for item in await self._fetch_items():
|
||||||
|
item.explain()
|
||||||
if isinstance(item, KitIpdFolder):
|
if isinstance(item, KitIpdFolder):
|
||||||
tasks.append(self._crawl_folder(item))
|
tasks.append(self._crawl_folder(PurePath("."), item))
|
||||||
else:
|
else:
|
||||||
# Orphan files are placed in the root folder
|
log.explain_topic(f"Orphan file {item.name!r} (href={item.url!r})")
|
||||||
tasks.append(self._download_file(PurePath("."), item))
|
log.explain("Attributing it to root folder")
|
||||||
|
# do this here to at least be sequential and not parallel (rate limiting is hard, as the
|
||||||
|
# crawl abstraction does not hold for these requests)
|
||||||
|
etag, mtime = await self._request_resource_version(item.url)
|
||||||
|
tasks.append(self._download_file(PurePath("."), item, etag, mtime))
|
||||||
|
|
||||||
await self.gather(tasks)
|
await self.gather(tasks)
|
||||||
|
|
||||||
async def _crawl_folder(self, folder: KitIpdFolder) -> None:
|
async def _crawl_folder(self, parent: PurePath, folder: KitIpdFolder) -> None:
|
||||||
path = PurePath(folder.name)
|
path = parent / folder.name
|
||||||
if not await self.crawl(path):
|
if not await self.crawl(path):
|
||||||
return
|
return
|
||||||
|
|
||||||
tasks = [self._download_file(path, file) for file in folder.files]
|
tasks = []
|
||||||
|
for entry in folder.entries:
|
||||||
|
if isinstance(entry, KitIpdFolder):
|
||||||
|
tasks.append(self._crawl_folder(path, entry))
|
||||||
|
else:
|
||||||
|
# do this here to at least be sequential and not parallel (rate limiting is hard, as the crawl
|
||||||
|
# abstraction does not hold for these requests)
|
||||||
|
etag, mtime = await self._request_resource_version(entry.url)
|
||||||
|
tasks.append(self._download_file(path, entry, etag, mtime))
|
||||||
|
|
||||||
await self.gather(tasks)
|
await self.gather(tasks)
|
||||||
|
|
||||||
async def _download_file(self, parent: PurePath, file: KitIpdFile) -> None:
|
async def _download_file(
|
||||||
|
self,
|
||||||
|
parent: PurePath,
|
||||||
|
file: KitIpdFile,
|
||||||
|
etag: Optional[str],
|
||||||
|
mtime: Optional[datetime]
|
||||||
|
) -> None:
|
||||||
element_path = parent / file.name
|
element_path = parent / file.name
|
||||||
maybe_dl = await self.download(element_path)
|
|
||||||
|
prev_etag = self._get_previous_etag_from_report(element_path)
|
||||||
|
etag_differs = None if prev_etag is None else prev_etag != etag
|
||||||
|
|
||||||
|
maybe_dl = await self.download(element_path, etag_differs=etag_differs, mtime=mtime)
|
||||||
if not maybe_dl:
|
if not maybe_dl:
|
||||||
|
# keep storing the known file's etag
|
||||||
|
if prev_etag:
|
||||||
|
self._add_etag_to_report(element_path, prev_etag)
|
||||||
return
|
return
|
||||||
|
|
||||||
async with maybe_dl as (bar, sink):
|
async with maybe_dl as (bar, sink):
|
||||||
await self._stream_from_url(file.url, sink, bar)
|
await self._stream_from_url(file.url, element_path, sink, bar)
|
||||||
|
|
||||||
async def _fetch_items(self) -> Set[Union[KitIpdFile, KitIpdFolder]]:
|
async def _fetch_items(self) -> Iterable[Union[KitIpdFile, KitIpdFolder]]:
|
||||||
page, url = await self.get_page()
|
page, url = await self.get_page()
|
||||||
elements: List[Tag] = self._find_file_links(page)
|
elements: List[Tag] = self._find_file_links(page)
|
||||||
items: Set[Union[KitIpdFile, KitIpdFolder]] = set()
|
|
||||||
|
|
||||||
|
# do not add unnecessary nesting for a single <h1> heading
|
||||||
|
drop_h1: bool = len(page.find_all(name="h1")) <= 1
|
||||||
|
|
||||||
|
folder_tree: KitIpdFolder = KitIpdFolder(".", [])
|
||||||
for element in elements:
|
for element in elements:
|
||||||
folder_label = self._find_folder_label(element)
|
parent = HttpCrawler.get_folder_structure_from_heading_hierarchy(element, drop_h1)
|
||||||
if folder_label:
|
|
||||||
folder = self._extract_folder(folder_label, url)
|
|
||||||
if folder not in items:
|
|
||||||
items.add(folder)
|
|
||||||
folder.explain()
|
|
||||||
else:
|
|
||||||
file = self._extract_file(element, url)
|
file = self._extract_file(element, url)
|
||||||
items.add(file)
|
|
||||||
log.explain_topic(f"Orphan file {file.name!r} (href={file.url!r})")
|
|
||||||
log.explain("Attributing it to root folder")
|
|
||||||
|
|
||||||
return items
|
current_folder: KitIpdFolder = folder_tree
|
||||||
|
for folder_name in parent.parts:
|
||||||
|
# helps the type checker to verify that current_folder is indeed a folder
|
||||||
|
def subfolders() -> Generator[KitIpdFolder, Any, None]:
|
||||||
|
return (entry for entry in current_folder.entries if isinstance(entry, KitIpdFolder))
|
||||||
|
|
||||||
def _extract_folder(self, folder_tag: Tag, url: str) -> KitIpdFolder:
|
if not any(entry.name == folder_name for entry in subfolders()):
|
||||||
files: List[KitIpdFile] = []
|
current_folder.entries.append(KitIpdFolder(folder_name, []))
|
||||||
name = folder_tag.getText().strip()
|
current_folder = next(entry for entry in subfolders() if entry.name == folder_name)
|
||||||
|
|
||||||
container: Tag = folder_tag.findNextSibling(name="table")
|
current_folder.entries.append(file)
|
||||||
for link in self._find_file_links(container):
|
|
||||||
files.append(self._extract_file(link, url))
|
|
||||||
|
|
||||||
return KitIpdFolder(name, files)
|
return folder_tree.entries
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _find_folder_label(file_link: Tag) -> Optional[Tag]:
|
|
||||||
enclosing_table: Tag = file_link.findParent(name="table")
|
|
||||||
if enclosing_table is None:
|
|
||||||
return None
|
|
||||||
return enclosing_table.findPreviousSibling(name=re.compile("^h[1-6]$"))
|
|
||||||
|
|
||||||
def _extract_file(self, link: Tag, url: str) -> KitIpdFile:
|
def _extract_file(self, link: Tag, url: str) -> KitIpdFile:
|
||||||
url = self._abs_url_from_link(url, link)
|
url = self._abs_url_from_link(url, link)
|
||||||
name = os.path.basename(url)
|
name = os.path.basename(url)
|
||||||
return KitIpdFile(name, url)
|
return KitIpdFile(name, url)
|
||||||
|
|
||||||
def _find_file_links(self, tag: Union[Tag, BeautifulSoup]) -> List[Tag]:
|
def _find_file_links(self, tag: Union[Tag, BeautifulSoup]) -> list[Tag]:
|
||||||
return tag.findAll(name="a", attrs={"href": self._file_regex})
|
return cast(list[Tag], tag.find_all(name="a", attrs={"href": self._file_regex}))
|
||||||
|
|
||||||
def _abs_url_from_link(self, url: str, link_tag: Tag) -> str:
|
def _abs_url_from_link(self, url: str, link_tag: Tag) -> str:
|
||||||
return urljoin(url, link_tag.get("href"))
|
return urljoin(url, cast(str, link_tag.get("href")))
|
||||||
|
|
||||||
async def _stream_from_url(self, url: str, sink: FileSink, bar: ProgressBar) -> None:
|
async def _stream_from_url(self, url: str, path: PurePath, sink: FileSink, bar: ProgressBar) -> None:
|
||||||
async with self.session.get(url, allow_redirects=False) as resp:
|
async with self.session.get(url, allow_redirects=False) as resp:
|
||||||
if resp.status == 403:
|
if resp.status == 403:
|
||||||
raise CrawlError("Received a 403. Are you within the KIT network/VPN?")
|
raise CrawlError("Received a 403. Are you within the KIT network/VPN?")
|
||||||
@ -159,6 +175,8 @@ class KitIpdCrawler(HttpCrawler):
|
|||||||
|
|
||||||
sink.done()
|
sink.done()
|
||||||
|
|
||||||
|
self._add_etag_to_report(path, resp.headers.get("ETag"))
|
||||||
|
|
||||||
async def get_page(self) -> Tuple[BeautifulSoup, str]:
|
async def get_page(self) -> Tuple[BeautifulSoup, str]:
|
||||||
async with self.session.get(self._url) as request:
|
async with self.session.get(self._url) as request:
|
||||||
# The web page for Algorithmen für Routenplanung contains some
|
# The web page for Algorithmen für Routenplanung contains some
|
||||||
|
@ -14,7 +14,7 @@ def name_variants(path: PurePath) -> Iterator[PurePath]:
|
|||||||
|
|
||||||
|
|
||||||
class Deduplicator:
|
class Deduplicator:
|
||||||
FORBIDDEN_CHARS = '<>:"/\\|?*'
|
FORBIDDEN_CHARS = '<>:"/\\|?*' + "".join([chr(i) for i in range(0, 32)])
|
||||||
FORBIDDEN_NAMES = {
|
FORBIDDEN_NAMES = {
|
||||||
"CON", "PRN", "AUX", "NUL",
|
"CON", "PRN", "AUX", "NUL",
|
||||||
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
|
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
from contextlib import asynccontextmanager, contextmanager
|
from contextlib import AbstractContextManager, asynccontextmanager, contextmanager
|
||||||
# TODO In Python 3.9 and above, ContextManager is deprecated
|
from typing import AsyncIterator, Iterator, List, Optional
|
||||||
from typing import AsyncIterator, ContextManager, Iterator, List, Optional
|
|
||||||
|
|
||||||
from rich.console import Console, Group
|
from rich.console import Console, Group
|
||||||
from rich.live import Live
|
from rich.live import Live
|
||||||
@ -59,6 +58,7 @@ class Log:
|
|||||||
# Whether different parts of the output are enabled or disabled
|
# Whether different parts of the output are enabled or disabled
|
||||||
self.output_explain = False
|
self.output_explain = False
|
||||||
self.output_status = True
|
self.output_status = True
|
||||||
|
self.output_not_deleted = True
|
||||||
self.output_report = True
|
self.output_report = True
|
||||||
|
|
||||||
def _update_live(self) -> None:
|
def _update_live(self) -> None:
|
||||||
@ -207,6 +207,17 @@ directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new
|
|||||||
action = escape(f"{action:<{self.STATUS_WIDTH}}")
|
action = escape(f"{action:<{self.STATUS_WIDTH}}")
|
||||||
self.print(f"{style}{action}[/] {escape(text)} {suffix}")
|
self.print(f"{style}{action}[/] {escape(text)} {suffix}")
|
||||||
|
|
||||||
|
def not_deleted(self, style: str, action: str, text: str, suffix: str = "") -> None:
|
||||||
|
"""
|
||||||
|
Print a message for a local only file that wasn't
|
||||||
|
deleted while crawling. Allows markup in the "style"
|
||||||
|
argument which will be applied to the "action" string.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.output_status and self.output_not_deleted:
|
||||||
|
action = escape(f"{action:<{self.STATUS_WIDTH}}")
|
||||||
|
self.print(f"{style}{action}[/] {escape(text)} {suffix}")
|
||||||
|
|
||||||
def report(self, text: str) -> None:
|
def report(self, text: str) -> None:
|
||||||
"""
|
"""
|
||||||
Print a report after crawling. Allows markup.
|
Print a report after crawling. Allows markup.
|
||||||
@ -215,6 +226,14 @@ directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new
|
|||||||
if self.output_report:
|
if self.output_report:
|
||||||
self.print(text)
|
self.print(text)
|
||||||
|
|
||||||
|
def report_not_deleted(self, text: str) -> None:
|
||||||
|
"""
|
||||||
|
Print a report for a local only file that wasn't deleted after crawling. Allows markup.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.output_report and self.output_not_deleted:
|
||||||
|
self.print(text)
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def _bar(
|
def _bar(
|
||||||
self,
|
self,
|
||||||
@ -241,7 +260,7 @@ directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new
|
|||||||
action: str,
|
action: str,
|
||||||
text: str,
|
text: str,
|
||||||
total: Optional[float] = None,
|
total: Optional[float] = None,
|
||||||
) -> ContextManager[ProgressBar]:
|
) -> AbstractContextManager[ProgressBar]:
|
||||||
"""
|
"""
|
||||||
Allows markup in the "style" argument which will be applied to the
|
Allows markup in the "style" argument which will be applied to the
|
||||||
"action" string.
|
"action" string.
|
||||||
@ -257,7 +276,7 @@ directly or as a GitHub issue: https://github.com/Garmelon/PFERD/issues/new
|
|||||||
action: str,
|
action: str,
|
||||||
text: str,
|
text: str,
|
||||||
total: Optional[float] = None,
|
total: Optional[float] = None,
|
||||||
) -> ContextManager[ProgressBar]:
|
) -> AbstractContextManager[ProgressBar]:
|
||||||
"""
|
"""
|
||||||
Allows markup in the "style" argument which will be applied to the
|
Allows markup in the "style" argument which will be applied to the
|
||||||
"action" string.
|
"action" string.
|
||||||
|
@ -44,6 +44,7 @@ class OnConflict(Enum):
|
|||||||
LOCAL_FIRST = "local-first"
|
LOCAL_FIRST = "local-first"
|
||||||
REMOTE_FIRST = "remote-first"
|
REMOTE_FIRST = "remote-first"
|
||||||
NO_DELETE = "no-delete"
|
NO_DELETE = "no-delete"
|
||||||
|
NO_DELETE_PROMPT_OVERWRITE = "no-delete-prompt-overwrite"
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_string(string: str) -> "OnConflict":
|
def from_string(string: str) -> "OnConflict":
|
||||||
@ -51,11 +52,12 @@ class OnConflict(Enum):
|
|||||||
return OnConflict(string)
|
return OnConflict(string)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError("must be one of 'prompt', 'local-first',"
|
raise ValueError("must be one of 'prompt', 'local-first',"
|
||||||
" 'remote-first', 'no-delete'")
|
" 'remote-first', 'no-delete', 'no-delete-prompt-overwrite'")
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Heuristics:
|
class Heuristics:
|
||||||
|
etag_differs: Optional[bool]
|
||||||
mtime: Optional[datetime]
|
mtime: Optional[datetime]
|
||||||
|
|
||||||
|
|
||||||
@ -232,8 +234,16 @@ class OutputDirectory:
|
|||||||
|
|
||||||
remote_newer = None
|
remote_newer = None
|
||||||
|
|
||||||
|
# ETag should be a more reliable indicator than mtime, so we check it first
|
||||||
|
if heuristics.etag_differs is not None:
|
||||||
|
remote_newer = heuristics.etag_differs
|
||||||
|
if remote_newer:
|
||||||
|
log.explain("Remote file's entity tag differs")
|
||||||
|
else:
|
||||||
|
log.explain("Remote file's entity tag is the same")
|
||||||
|
|
||||||
# Python on Windows crashes when faced with timestamps around the unix epoch
|
# Python on Windows crashes when faced with timestamps around the unix epoch
|
||||||
if heuristics.mtime and (os.name != "nt" or heuristics.mtime.year > 1970):
|
if remote_newer is None and heuristics.mtime and (os.name != "nt" or heuristics.mtime.year > 1970):
|
||||||
mtime = heuristics.mtime
|
mtime = heuristics.mtime
|
||||||
remote_newer = mtime.timestamp() > stat.st_mtime
|
remote_newer = mtime.timestamp() > stat.st_mtime
|
||||||
if remote_newer:
|
if remote_newer:
|
||||||
@ -264,7 +274,7 @@ class OutputDirectory:
|
|||||||
on_conflict: OnConflict,
|
on_conflict: OnConflict,
|
||||||
path: PurePath,
|
path: PurePath,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
if on_conflict == OnConflict.PROMPT:
|
if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}:
|
||||||
async with log.exclusive_output():
|
async with log.exclusive_output():
|
||||||
prompt = f"Replace {fmt_path(path)} with remote file?"
|
prompt = f"Replace {fmt_path(path)} with remote file?"
|
||||||
return await prompt_yes_no(prompt, default=False)
|
return await prompt_yes_no(prompt, default=False)
|
||||||
@ -283,7 +293,7 @@ class OutputDirectory:
|
|||||||
on_conflict: OnConflict,
|
on_conflict: OnConflict,
|
||||||
path: PurePath,
|
path: PurePath,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
if on_conflict == OnConflict.PROMPT:
|
if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}:
|
||||||
async with log.exclusive_output():
|
async with log.exclusive_output():
|
||||||
prompt = f"Recursively delete {fmt_path(path)} and replace with remote file?"
|
prompt = f"Recursively delete {fmt_path(path)} and replace with remote file?"
|
||||||
return await prompt_yes_no(prompt, default=False)
|
return await prompt_yes_no(prompt, default=False)
|
||||||
@ -303,7 +313,7 @@ class OutputDirectory:
|
|||||||
path: PurePath,
|
path: PurePath,
|
||||||
parent: PurePath,
|
parent: PurePath,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
if on_conflict == OnConflict.PROMPT:
|
if on_conflict in {OnConflict.PROMPT, OnConflict.NO_DELETE_PROMPT_OVERWRITE}:
|
||||||
async with log.exclusive_output():
|
async with log.exclusive_output():
|
||||||
prompt = f"Delete {fmt_path(parent)} so remote file {fmt_path(path)} can be downloaded?"
|
prompt = f"Delete {fmt_path(parent)} so remote file {fmt_path(path)} can be downloaded?"
|
||||||
return await prompt_yes_no(prompt, default=False)
|
return await prompt_yes_no(prompt, default=False)
|
||||||
@ -330,7 +340,7 @@ class OutputDirectory:
|
|||||||
return False
|
return False
|
||||||
elif on_conflict == OnConflict.REMOTE_FIRST:
|
elif on_conflict == OnConflict.REMOTE_FIRST:
|
||||||
return True
|
return True
|
||||||
elif on_conflict == OnConflict.NO_DELETE:
|
elif on_conflict in {OnConflict.NO_DELETE, OnConflict.NO_DELETE_PROMPT_OVERWRITE}:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# This should never be reached
|
# This should never be reached
|
||||||
@ -361,10 +371,28 @@ class OutputDirectory:
|
|||||||
|
|
||||||
raise OutputDirError("Failed to create temporary file")
|
raise OutputDirError("Failed to create temporary file")
|
||||||
|
|
||||||
|
def should_try_download(
|
||||||
|
self,
|
||||||
|
path: PurePath,
|
||||||
|
*,
|
||||||
|
etag_differs: Optional[bool] = None,
|
||||||
|
mtime: Optional[datetime] = None,
|
||||||
|
redownload: Optional[Redownload] = None,
|
||||||
|
on_conflict: Optional[OnConflict] = None,
|
||||||
|
) -> bool:
|
||||||
|
heuristics = Heuristics(etag_differs, mtime)
|
||||||
|
redownload = self._redownload if redownload is None else redownload
|
||||||
|
on_conflict = self._on_conflict if on_conflict is None else on_conflict
|
||||||
|
local_path = self.resolve(path)
|
||||||
|
|
||||||
|
return self._should_download(local_path, heuristics, redownload, on_conflict)
|
||||||
|
|
||||||
async def download(
|
async def download(
|
||||||
self,
|
self,
|
||||||
remote_path: PurePath,
|
remote_path: PurePath,
|
||||||
path: PurePath,
|
path: PurePath,
|
||||||
|
*,
|
||||||
|
etag_differs: Optional[bool] = None,
|
||||||
mtime: Optional[datetime] = None,
|
mtime: Optional[datetime] = None,
|
||||||
redownload: Optional[Redownload] = None,
|
redownload: Optional[Redownload] = None,
|
||||||
on_conflict: Optional[OnConflict] = None,
|
on_conflict: Optional[OnConflict] = None,
|
||||||
@ -374,7 +402,7 @@ class OutputDirectory:
|
|||||||
MarkConflictError.
|
MarkConflictError.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
heuristics = Heuristics(mtime)
|
heuristics = Heuristics(etag_differs, mtime)
|
||||||
redownload = self._redownload if redownload is None else redownload
|
redownload = self._redownload if redownload is None else redownload
|
||||||
on_conflict = self._on_conflict if on_conflict is None else on_conflict
|
on_conflict = self._on_conflict if on_conflict is None else on_conflict
|
||||||
local_path = self.resolve(path)
|
local_path = self.resolve(path)
|
||||||
@ -495,7 +523,7 @@ class OutputDirectory:
|
|||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
log.status("[bold bright_magenta]", "Not deleted", fmt_path(pure))
|
log.not_deleted("[bold bright_magenta]", "Not deleted", fmt_path(pure))
|
||||||
self._report.not_delete_file(pure)
|
self._report.not_delete_file(pure)
|
||||||
|
|
||||||
def load_prev_report(self) -> None:
|
def load_prev_report(self) -> None:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from pathlib import Path
|
from pathlib import Path, PurePath
|
||||||
from typing import Dict, List, Optional
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
from rich.markup import escape
|
from rich.markup import escape
|
||||||
@ -168,19 +168,24 @@ class Pferd:
|
|||||||
log.report("")
|
log.report("")
|
||||||
log.report(f"[bold bright_cyan]Report[/] for {escape(name)}")
|
log.report(f"[bold bright_cyan]Report[/] for {escape(name)}")
|
||||||
|
|
||||||
|
def fmt_path_link(relative_path: PurePath) -> str:
|
||||||
|
# We need to URL-encode the path because it might contain spaces or special characters
|
||||||
|
link = crawler.output_dir.resolve(relative_path).absolute().as_uri()
|
||||||
|
return f"[link={link}]{fmt_path(relative_path)}[/link]"
|
||||||
|
|
||||||
something_changed = False
|
something_changed = False
|
||||||
for path in sorted(crawler.report.added_files):
|
for path in sorted(crawler.report.added_files):
|
||||||
something_changed = True
|
something_changed = True
|
||||||
log.report(f" [bold bright_green]Added[/] {fmt_path(path)}")
|
log.report(f" [bold bright_green]Added[/] {fmt_path_link(path)}")
|
||||||
for path in sorted(crawler.report.changed_files):
|
for path in sorted(crawler.report.changed_files):
|
||||||
something_changed = True
|
something_changed = True
|
||||||
log.report(f" [bold bright_yellow]Changed[/] {fmt_path(path)}")
|
log.report(f" [bold bright_yellow]Changed[/] {fmt_path_link(path)}")
|
||||||
for path in sorted(crawler.report.deleted_files):
|
for path in sorted(crawler.report.deleted_files):
|
||||||
something_changed = True
|
something_changed = True
|
||||||
log.report(f" [bold bright_magenta]Deleted[/] {fmt_path(path)}")
|
log.report(f" [bold bright_magenta]Deleted[/] {fmt_path(path)}")
|
||||||
for path in sorted(crawler.report.not_deleted_files):
|
for path in sorted(crawler.report.not_deleted_files):
|
||||||
something_changed = True
|
something_changed = True
|
||||||
log.report(f" [bold bright_magenta]Not deleted[/] {fmt_path(path)}")
|
log.report_not_deleted(f" [bold bright_magenta]Not deleted[/] {fmt_path_link(path)}")
|
||||||
|
|
||||||
for warning in crawler.report.encountered_warnings:
|
for warning in crawler.report.encountered_warnings:
|
||||||
something_changed = True
|
something_changed = True
|
||||||
|
@ -34,15 +34,6 @@ class MarkConflictError(Exception):
|
|||||||
self.collides_with = collides_with
|
self.collides_with = collides_with
|
||||||
|
|
||||||
|
|
||||||
# TODO Use PurePath.is_relative_to when updating to 3.9
|
|
||||||
def is_relative_to(a: PurePath, b: PurePath) -> bool:
|
|
||||||
try:
|
|
||||||
a.relative_to(b)
|
|
||||||
return True
|
|
||||||
except ValueError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class Report:
|
class Report:
|
||||||
"""
|
"""
|
||||||
A report of a synchronization. Includes all files found by the crawler, as
|
A report of a synchronization. Includes all files found by the crawler, as
|
||||||
@ -173,7 +164,7 @@ class Report:
|
|||||||
if path == other:
|
if path == other:
|
||||||
raise MarkDuplicateError(path)
|
raise MarkDuplicateError(path)
|
||||||
|
|
||||||
if is_relative_to(path, other) or is_relative_to(other, path):
|
if path.is_relative_to(other) or other.is_relative_to(path):
|
||||||
raise MarkConflictError(path, other)
|
raise MarkConflictError(path, other)
|
||||||
|
|
||||||
self.known_files.add(path)
|
self.known_files.add(path)
|
||||||
|
@ -110,6 +110,10 @@ class ExactReTf(Transformation):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
named_groups: Dict[str, str] = match.groupdict()
|
||||||
|
for name, capture in named_groups.items():
|
||||||
|
locals_dir[name] = capture
|
||||||
|
|
||||||
result = eval(f"f{right!r}", {}, locals_dir)
|
result = eval(f"f{right!r}", {}, locals_dir)
|
||||||
return Transformed(PurePath(result))
|
return Transformed(PurePath(result))
|
||||||
|
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
NAME = "PFERD"
|
NAME = "PFERD"
|
||||||
VERSION = "3.4.3"
|
VERSION = "3.8.0"
|
||||||
|
13
README.md
13
README.md
@ -17,7 +17,7 @@ Binaries for Linux, Windows and Mac can be downloaded directly from the
|
|||||||
|
|
||||||
### With pip
|
### With pip
|
||||||
|
|
||||||
Ensure you have at least Python 3.9 installed. Run the following command to
|
Ensure you have at least Python 3.11 installed. Run the following command to
|
||||||
install PFERD or upgrade it to the latest version:
|
install PFERD or upgrade it to the latest version:
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -56,6 +56,17 @@ Also, you can download most ILIAS pages directly like this:
|
|||||||
$ pferd kit-ilias-web <url> <output_directory>
|
$ pferd kit-ilias-web <url> <output_directory>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
PFERD supports other ILIAS instances as well, using the `ilias-web` crawler (see
|
||||||
|
the [config section on `ilias-web`](CONFIG.md#the-ilias-web-crawler) for more
|
||||||
|
detail on the `base-url` and `client-id` parameters):
|
||||||
|
|
||||||
|
```
|
||||||
|
$ pferd ilias-web \
|
||||||
|
--base-url https://ilias.my-university.example \
|
||||||
|
--client-id My_University desktop \
|
||||||
|
<output_directory>
|
||||||
|
```
|
||||||
|
|
||||||
However, the CLI only lets you download a single thing at a time, and the
|
However, the CLI only lets you download a single thing at a time, and the
|
||||||
resulting command can grow long quite quickly. Because of this, PFERD can also
|
resulting command can grow long quite quickly. Because of this, PFERD can also
|
||||||
be used with a config file.
|
be used with a config file.
|
||||||
|
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1744440957,
|
||||||
|
"narHash": "sha256-FHlSkNqFmPxPJvy+6fNLaNeWnF1lZSgqVCl/eWaJRc4=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "26d499fc9f1d567283d5d56fcf367edd815dba1d",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-24.11",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
41
flake.nix
Normal file
41
flake.nix
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
{
|
||||||
|
description = "Tool for downloading course-related files from ILIAS";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs }:
|
||||||
|
let
|
||||||
|
# Helper function to generate an attrset '{ x86_64-linux = f "x86_64-linux"; ... }'.
|
||||||
|
forAllSystems = nixpkgs.lib.genAttrs nixpkgs.lib.systems.flakeExposed;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
packages = forAllSystems (system:
|
||||||
|
let pkgs = import nixpkgs { inherit system; };
|
||||||
|
in
|
||||||
|
rec {
|
||||||
|
default = pkgs.python3Packages.buildPythonApplication rec {
|
||||||
|
pname = "pferd";
|
||||||
|
# Performing black magic
|
||||||
|
# Don't worry, I sacrificed enough goats for the next few years
|
||||||
|
version = (pkgs.lib.importTOML ./PFERD/version.py).VERSION;
|
||||||
|
format = "pyproject";
|
||||||
|
|
||||||
|
src = ./.;
|
||||||
|
|
||||||
|
nativeBuildInputs = with pkgs.python3Packages; [
|
||||||
|
setuptools
|
||||||
|
];
|
||||||
|
|
||||||
|
propagatedBuildInputs = with pkgs.python3Packages; [
|
||||||
|
aiohttp
|
||||||
|
beautifulsoup4
|
||||||
|
rich
|
||||||
|
keyring
|
||||||
|
certifi
|
||||||
|
];
|
||||||
|
};
|
||||||
|
});
|
||||||
|
};
|
||||||
|
}
|
11
mypy.ini
11
mypy.ini
@ -1,11 +0,0 @@
|
|||||||
[mypy]
|
|
||||||
disallow_any_generics = True
|
|
||||||
disallow_untyped_defs = True
|
|
||||||
disallow_incomplete_defs = True
|
|
||||||
no_implicit_optional = True
|
|
||||||
warn_unused_ignores = True
|
|
||||||
warn_unreachable = True
|
|
||||||
show_error_context = True
|
|
||||||
|
|
||||||
[mypy-rich.*,bs4,keyring]
|
|
||||||
ignore_missing_imports = True
|
|
@ -1,3 +1,42 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = ["setuptools", "wheel"]
|
requires = ["setuptools", "wheel"]
|
||||||
build-backend = "setuptools.build_meta"
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "PFERD"
|
||||||
|
dependencies = [
|
||||||
|
"aiohttp>=3.8.1",
|
||||||
|
"beautifulsoup4>=4.10.0",
|
||||||
|
"rich>=11.0.0",
|
||||||
|
"keyring>=23.5.0",
|
||||||
|
"certifi>=2021.10.8"
|
||||||
|
]
|
||||||
|
dynamic = ["version"]
|
||||||
|
requires-python = ">=3.11"
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
pferd = "PFERD.__main__:main"
|
||||||
|
|
||||||
|
[tool.setuptools.dynamic]
|
||||||
|
version = {attr = "PFERD.version.VERSION"}
|
||||||
|
|
||||||
|
[tool.flake8]
|
||||||
|
max-line-length = 110
|
||||||
|
|
||||||
|
[tool.isort]
|
||||||
|
line_length = 110
|
||||||
|
|
||||||
|
[tool.autopep8]
|
||||||
|
max_line_length = 110
|
||||||
|
in-place = true
|
||||||
|
recursive = true
|
||||||
|
|
||||||
|
[tool.mypy]
|
||||||
|
disallow_any_generics = true
|
||||||
|
disallow_untyped_defs = true
|
||||||
|
disallow_incomplete_defs = true
|
||||||
|
no_implicit_optional = true
|
||||||
|
warn_unused_ignores = true
|
||||||
|
warn_unreachable = true
|
||||||
|
show_error_context = true
|
||||||
|
ignore_missing_imports = true
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import time
|
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
from subprocess import run
|
from subprocess import run
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,5 +2,5 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
mypy PFERD
|
mypy .
|
||||||
flake8 PFERD
|
flake8 PFERD
|
||||||
|
@ -2,5 +2,5 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
autopep8 --recursive --in-place PFERD
|
autopep8 .
|
||||||
isort PFERD
|
isort .
|
||||||
|
@ -13,5 +13,5 @@ pip install --upgrade setuptools
|
|||||||
pip install --editable .
|
pip install --editable .
|
||||||
|
|
||||||
# Installing tools and type hints
|
# Installing tools and type hints
|
||||||
pip install --upgrade mypy flake8 autopep8 isort pyinstaller
|
pip install --upgrade mypy flake8 flake8-pyproject autopep8 isort pyinstaller
|
||||||
pip install --upgrade types-chardet types-certifi
|
pip install --upgrade types-chardet types-certifi
|
||||||
|
23
setup.cfg
23
setup.cfg
@ -1,23 +0,0 @@
|
|||||||
[metadata]
|
|
||||||
name = PFERD
|
|
||||||
version = attr: PFERD.version.VERSION
|
|
||||||
|
|
||||||
[options]
|
|
||||||
packages = find:
|
|
||||||
python_requires = >=3.9
|
|
||||||
install_requires =
|
|
||||||
aiohttp>=3.8.1
|
|
||||||
beautifulsoup4>=4.10.0
|
|
||||||
rich>=11.0.0
|
|
||||||
keyring>=23.5.0
|
|
||||||
certifi>=2021.10.8
|
|
||||||
|
|
||||||
[options.entry_points]
|
|
||||||
console_scripts =
|
|
||||||
pferd = PFERD.__main__:main
|
|
||||||
|
|
||||||
[flake8]
|
|
||||||
max_line_length = 110
|
|
||||||
|
|
||||||
[isort]
|
|
||||||
line_length = 110
|
|
Reference in New Issue
Block a user