Compare commits

..

4 Commits

Author SHA1 Message Date
29251fa003 Merge 77c1f1516c into e41a22149e 2023-08-26 17:16:00 +02:00
e41a22149e Add default show-not-deleted option
If set to `no`, PFERD won't print status or report messages for not deleted files
2023-08-26 17:13:45 +02:00
77c1f1516c Used proper plural 2021-11-02 12:41:40 +01:00
9e12e96d90 Added alias functionality 2021-11-02 03:42:08 +01:00
31 changed files with 1242 additions and 1846 deletions

View File

@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: monthly
groups:
gh-actions:
patterns:
- "*"

View File

@ -1,6 +1,6 @@
name: build-and-release name: build-and-release
on: [push, pull_request] on: push
defaults: defaults:
run: run:
@ -13,12 +13,13 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest, windows-latest, macos-13, macos-latest] os: [ubuntu-latest, windows-latest, macos-latest]
python: ["3.9"] python: ["3.9"]
steps: steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5 - uses: actions/checkout@v3
- uses: actions/setup-python@v4
with: with:
python-version: ${{ matrix.python }} python-version: ${{ matrix.python }}
@ -33,12 +34,7 @@ jobs:
run: ./scripts/setup --no-pip run: ./scripts/setup --no-pip
- name: Run checks - name: Run checks
run: | run: ./scripts/check
./scripts/check
./scripts/format
- name: Assert no changes
run: git diff --exit-code
- name: Build - name: Build
run: ./scripts/build run: ./scripts/build
@ -49,9 +45,9 @@ jobs:
run: mv dist/pferd* dist/pferd-${{ matrix.os }} run: mv dist/pferd* dist/pferd-${{ matrix.os }}
- name: Upload binary - name: Upload binary
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: pferd-${{ matrix.os }} name: Binaries
path: dist/pferd-${{ matrix.os }} path: dist/pferd-${{ matrix.os }}
release: release:
@ -61,20 +57,18 @@ jobs:
steps: steps:
- name: Download binaries - name: Download binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v3
with: with:
pattern: pferd-* name: Binaries
merge-multiple: true
- name: Rename binaries - name: Rename binaries
run: | run: |
mv pferd-ubuntu-latest pferd-linux mv pferd-ubuntu-latest pferd-linux
mv pferd-windows-latest pferd-windows.exe mv pferd-windows-latest pferd-windows.exe
mv pferd-macos-13 pferd-mac-x86_64
mv pferd-macos-latest pferd-mac mv pferd-macos-latest pferd-mac
- name: Create release - name: Create release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
@ -82,4 +76,3 @@ jobs:
pferd-linux pferd-linux
pferd-windows.exe pferd-windows.exe
pferd-mac pferd-mac
pferd-mac-x86_64

1
.gitignore vendored
View File

@ -3,7 +3,6 @@
/PFERD.egg-info/ /PFERD.egg-info/
__pycache__/ __pycache__/
/.vscode/ /.vscode/
/.idea/
# pyinstaller # pyinstaller
/pferd.spec /pferd.spec

View File

@ -22,55 +22,18 @@ ambiguous situations.
## Unreleased ## Unreleased
## 3.6.0 - 2024-10-23
### Added
- Generic `ilias-web` crawler and `ilias-web` CLI command
- Support for the course overview page. Using this URL as a target might cause
duplication warnings, as subgroups are listed separately.
- Support for named capture groups in regex transforms
- Crawl custom item groups as folders
### Fixed
- Normalization of meeting names in cards
- Sanitization of slashes in exercise container names
## 3.5.2 - 2024-04-14
### Fixed
- Crawling of personal desktop with ILIAS 8
- Crawling of empty personal desktops
## 3.5.1 - 2024-04-09
### Added
- Support for ILIAS 8
### Fixed
- Video name deduplication
## 3.5.0 - 2023-09-13
### Added
- `no-delete-prompt-override` conflict resolution strategy
- Support for ILIAS learning modules
- `show_not_deleted` option to stop printing the "Not Deleted" status or report
message. This combines nicely with the `no-delete-prompt-override` strategy,
causing PFERD to mostly ignore local-only files.
- Support for mediacast video listings
- Crawling of files in info tab
### Changed
- Remove size suffix for files in content pages
### Fixed ### Fixed
- Crawling of courses with the timeline view as the default tab - Crawling of courses with the timeline view as the default tab
- Crawling of file and custom opencast cards - Crawling of file and custom opencast cards
- Crawling of button cards without descriptions - Crawling of button cards without descriptions
- Abort crawling when encountering an unexpected ilias root page redirect - Abort crawling when encountering an unexpected ilias root page redirect
- Sanitize ascii control characters on Windows
- Crawling of paginated past meetings ### Added
- Ignore SCORM learning modules - `no-delete-prompt-override` conflict resolution strategy
- support for ILIAS learning modules
- `show_not_deleted` option to stop printing the "Not Deleted" status or report
message. This combines nicely with the `no-delete-prompt-override` strategy,
causing PFERD to mostly ignore local-only files.
## 3.4.3 - 2022-11-29 ## 3.4.3 - 2022-11-29

View File

@ -4,11 +4,11 @@ A config file consists of sections. A section begins with a `[section]` header,
which is followed by a list of `key = value` pairs. Comments must be on their which is followed by a list of `key = value` pairs. Comments must be on their
own line and start with `#`. Multiline values must be indented beyond their key. own line and start with `#`. Multiline values must be indented beyond their key.
Boolean values can be `yes` or `no`. For more details and some examples on the Boolean values can be `yes` or `no`. For more details and some examples on the
format, see the [configparser documentation][cp-file] format, see the [configparser documentation][1] ([interpolation][2] is
([interpolation][cp-interp] is disabled). disabled).
[cp-file]: <https://docs.python.org/3/library/configparser.html#supported-ini-file-structure> "Supported INI File Structure" [1]: <https://docs.python.org/3/library/configparser.html#supported-ini-file-structure> "Supported INI File Structure"
[cp-interp]: <https://docs.python.org/3/library/configparser.html#interpolation-of-values> "Interpolation of values" [2]: <https://docs.python.org/3/library/configparser.html#interpolation-of-values> "Interpolation of values"
## The `DEFAULT` section ## The `DEFAULT` section
@ -92,6 +92,9 @@ common to all crawlers:
load for the crawl target. (Default: `0.0`) load for the crawl target. (Default: `0.0`)
- `windows_paths`: Whether PFERD should find alternative names for paths that - `windows_paths`: Whether PFERD should find alternative names for paths that
are invalid on Windows. (Default: `yes` on Windows, `no` otherwise) are invalid on Windows. (Default: `yes` on Windows, `no` otherwise)
- `aliases`: List of strings that are considered as an alias when invoking with
the `--crawler` or `-C` flag. If there is more than one crawl section with
the same aliases all are selected. Thereby, you can group different crawlers.
Some crawlers may also require credentials for authentication. To configure how Some crawlers may also require credentials for authentication. To configure how
the crawler obtains its credentials, the `auth` option is used. It is set to the the crawler obtains its credentials, the `auth` option is used. It is set to the
@ -106,6 +109,7 @@ username = foo
password = bar password = bar
[crawl:something] [crawl:something]
aliases = [sth, some]
type = some-complex-crawler type = some-complex-crawler
auth = auth:example auth = auth:example
on_conflict = no-delete on_conflict = no-delete
@ -146,7 +150,7 @@ crawler simulate a slower, network-based crawler.
This crawler crawls a KIT-IPD page by url. The root page can be crawled from This crawler crawls a KIT-IPD page by url. The root page can be crawled from
outside the KIT network so you will be informed about any new/deleted files, outside the KIT network so you will be informed about any new/deleted files,
but downloading files requires you to be within. Adding a short delay between but downloading files requires you to be within. Adding a show delay between
requests is likely a good idea. requests is likely a good idea.
- `target`: URL to a KIT-IPD page - `target`: URL to a KIT-IPD page
@ -154,56 +158,6 @@ requests is likely a good idea.
matches, the given link is downloaded as a file. This is used to extract matches, the given link is downloaded as a file. This is used to extract
files from KIT-IPD pages. (Default: `^.*?[^/]+\.(pdf|zip|c|cpp|java)$`) files from KIT-IPD pages. (Default: `^.*?[^/]+\.(pdf|zip|c|cpp|java)$`)
### The `ilias-web` crawler
This crawler crawls a generic ILIAS instance.
Inspired by [this ILIAS downloader][ilias-dl], the following configurations should work
out of the box for the corresponding universities:
[ilias-dl]: https://github.com/V3lop5/ilias-downloader/blob/main/configs "ilias-downloader configs"
| University | `base_url` | `client_id` |
|---------------|--------------------------------------|---------------|
| FH Aachen | https://www.ili.fh-aachen.de | elearning |
| Uni Köln | https://www.ilias.uni-koeln.de/ilias | uk |
| Uni Konstanz | https://ilias.uni-konstanz.de | ILIASKONSTANZ |
| Uni Stuttgart | https://ilias3.uni-stuttgart.de | Uni_Stuttgart |
If your university isn't listed, try navigating to your instance's login page.
Assuming no custom login service is used, the URL will look something like this:
```jinja
{{ base_url }}/login.php?client_id={{ client_id }}&cmd=force_login&lang=
```
If the values work, feel free to submit a PR and add them to the table above.
- `base_url`: The URL where the ILIAS instance is located. (Required)
- `client_id`: An ID used for authentication. (Required)
- `target`: The ILIAS element to crawl. (Required)
- `desktop`: Crawl your personal desktop / dashboard
- `<course id>`: Crawl the course with the given id
- `<url>`: Crawl a given element by URL (preferably the permanent URL linked
at the bottom of its ILIAS page).
This also supports the "My Courses" overview page to download *all*
courses. Note that this might produce confusing local directory layouts
and duplication warnings if you are a member of an ILIAS group. The
`desktop` target is generally preferable.
- `auth`: Name of auth section to use for login. (Required)
- `links`: How to represent external links. (Default: `fancy`)
- `ignore`: Don't download links.
- `plaintext`: A text file containing only the URL.
- `fancy`: A HTML file looking like the ILIAS link element.
- `internet-shortcut`: An internet shortcut file (`.url` file).
- `link_redirect_delay`: Time (in seconds) until `fancy` link files will
redirect to the actual URL. Set to a negative value to disable the automatic
redirect. (Default: `-1`)
- `videos`: Whether to download videos. (Default: `no`)
- `forums`: Whether to download forum threads. (Default: `no`)
- `http_timeout`: The timeout (in seconds) for all HTTP requests. (Default:
`20.0`)
### The `kit-ilias-web` crawler ### The `kit-ilias-web` crawler
This crawler crawls the KIT ILIAS instance. This crawler crawls the KIT ILIAS instance.
@ -282,10 +236,10 @@ is stored in the keyring.
### The `pass` authenticator ### The `pass` authenticator
This authenticator queries the [`pass` password manager][pass] for a username This authenticator queries the [`pass` password manager][3] for a username and
and password. It tries to be mostly compatible with [browserpass][browserpass] password. It tries to be mostly compatible with [browserpass][4] and
and [passff][passff], so see those links for an overview of the format. If PFERD [passff][5], so see those links for an overview of the format. If PFERD fails
fails to load your password, you can use the `--explain` flag to see why. to load your password, you can use the `--explain` flag to see why.
- `passname`: The name of the password to use (Required) - `passname`: The name of the password to use (Required)
- `username_prefixes`: A comma-separated list of username line prefixes - `username_prefixes`: A comma-separated list of username line prefixes
@ -293,9 +247,9 @@ fails to load your password, you can use the `--explain` flag to see why.
- `password_prefixes`: A comma-separated list of password line prefixes - `password_prefixes`: A comma-separated list of password line prefixes
(Default: `password,pass,secret`) (Default: `password,pass,secret`)
[pass]: <https://www.passwordstore.org/> "Pass: The Standard Unix Password Manager" [3]: <https://www.passwordstore.org/> "Pass: The Standard Unix Password Manager"
[browserpass]: <https://github.com/browserpass/browserpass-extension#organizing-password-store> "Organizing password store" [4]: <https://github.com/browserpass/browserpass-extension#organizing-password-store> "Organizing password store"
[passff]: <https://github.com/passff/passff#multi-line-format> "Multi-line format" [5]: <https://github.com/passff/passff#multi-line-format> "Multi-line format"
### The `tfa` authenticator ### The `tfa` authenticator
@ -394,8 +348,7 @@ matches `SOURCE`, the output path is created using `TARGET` as template.
be referred to as `{g<n>}` (e.g. `{g3}`). `{g0}` refers to the original path. be referred to as `{g<n>}` (e.g. `{g3}`). `{g0}` refers to the original path.
If capturing group *n*'s contents are a valid integer, the integer value is If capturing group *n*'s contents are a valid integer, the integer value is
available as `{i<n>}` (e.g. `{i3}`). If capturing group *n*'s contents are a available as `{i<n>}` (e.g. `{i3}`). If capturing group *n*'s contents are a
valid float, the float value is available as `{f<n>}` (e.g. `{f3}`). Named capture valid float, the float value is available as `{f<n>}` (e.g. `{f3}`). If a
groups (e.g. `(?P<name>)`) are available by their name (e.g. `{name}`). If a
capturing group is not present (e.g. when matching the string `cd` with the capturing group is not present (e.g. when matching the string `cd` with the
regex `(ab)?cd`), the corresponding variables are not defined. regex `(ab)?cd`), the corresponding variables are not defined.

View File

@ -1,6 +1,6 @@
Copyright 2019-2024 Garmelon, I-Al-Istannen, danstooamerican, pavelzw, Copyright 2019-2021 Garmelon, I-Al-Istannen, danstooamerican, pavelzw,
TheChristophe, Scriptim, thelukasprobst, Toorero, TheChristophe, Scriptim, thelukasprobst, Toorero,
Mr-Pine, p-fruck Mr-Pine
Permission is hereby granted, free of charge, to any person obtaining a copy of Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in this software and associated documentation files (the "Software"), to deal in

View File

@ -8,7 +8,6 @@
# well. # well.
from . import command_local # noqa: F401 imported but unused from . import command_local # noqa: F401 imported but unused
from . import command_ilias_web # noqa: F401 imported but unused
from . import command_kit_ilias_web # noqa: F401 imported but unused from . import command_kit_ilias_web # noqa: F401 imported but unused
from . import command_kit_ipd # noqa: F401 imported but unused from . import command_kit_ipd # noqa: F401 imported but unused
from .parser import PARSER, ParserLoadError, load_default_section # noqa: F401 imported but unused from .parser import PARSER, ParserLoadError, load_default_section # noqa: F401 imported but unused

View File

@ -1,56 +0,0 @@
import argparse
import configparser
from ..logging import log
from .common_ilias_args import configure_common_group_args, load_common
from .parser import CRAWLER_PARSER, SUBPARSERS, load_crawler
COMMAND_NAME = "ilias-web"
SUBPARSER = SUBPARSERS.add_parser(
COMMAND_NAME,
parents=[CRAWLER_PARSER],
)
GROUP = SUBPARSER.add_argument_group(
title=f"{COMMAND_NAME} crawler arguments",
description=f"arguments for the '{COMMAND_NAME}' crawler",
)
GROUP.add_argument(
"--base-url",
type=str,
metavar="BASE_URL",
help="The base url of the ilias instance"
)
GROUP.add_argument(
"--client-id",
type=str,
metavar="CLIENT_ID",
help="The client id of the ilias instance"
)
configure_common_group_args(GROUP)
def load(
args: argparse.Namespace,
parser: configparser.ConfigParser,
) -> None:
log.explain(f"Creating config for command '{COMMAND_NAME}'")
parser["crawl:ilias"] = {}
section = parser["crawl:ilias"]
load_crawler(args, section)
section["type"] = COMMAND_NAME
if args.ilias_url is not None:
section["base_url"] = args.ilias_url
if args.client_id is not None:
section["client_id"] = args.client_id
load_common(section, args, parser)
SUBPARSER.set_defaults(command=load)

View File

@ -1,37 +1,120 @@
import argparse import argparse
import configparser import configparser
from pathlib import Path
from ..crawl.ilias.file_templates import Links
from ..logging import log from ..logging import log
from .common_ilias_args import configure_common_group_args, load_common from .parser import (CRAWLER_PARSER, SUBPARSERS, BooleanOptionalAction, ParserLoadError, load_crawler,
from .parser import CRAWLER_PARSER, SUBPARSERS, load_crawler show_value_error)
COMMAND_NAME = "kit-ilias-web"
SUBPARSER = SUBPARSERS.add_parser( SUBPARSER = SUBPARSERS.add_parser(
COMMAND_NAME, "kit-ilias-web",
parents=[CRAWLER_PARSER], parents=[CRAWLER_PARSER],
) )
GROUP = SUBPARSER.add_argument_group( GROUP = SUBPARSER.add_argument_group(
title=f"{COMMAND_NAME} crawler arguments", title="kit-ilias-web crawler arguments",
description=f"arguments for the '{COMMAND_NAME}' crawler", description="arguments for the 'kit-ilias-web' crawler",
)
GROUP.add_argument(
"target",
type=str,
metavar="TARGET",
help="course id, 'desktop', or ILIAS URL to crawl"
)
GROUP.add_argument(
"output",
type=Path,
metavar="OUTPUT",
help="output directory"
)
GROUP.add_argument(
"--username", "-u",
type=str,
metavar="USERNAME",
help="user name for authentication"
)
GROUP.add_argument(
"--keyring",
action=BooleanOptionalAction,
help="use the system keyring to store and retrieve passwords"
)
GROUP.add_argument(
"--credential-file",
type=Path,
metavar="PATH",
help="read username and password from a credential file"
)
GROUP.add_argument(
"--links",
type=show_value_error(Links.from_string),
metavar="OPTION",
help="how to represent external links"
)
GROUP.add_argument(
"--link-redirect-delay",
type=int,
metavar="SECONDS",
help="time before 'fancy' links redirect to to their target (-1 to disable)"
)
GROUP.add_argument(
"--videos",
action=BooleanOptionalAction,
help="crawl and download videos"
)
GROUP.add_argument(
"--forums",
action=BooleanOptionalAction,
help="crawl and download forum posts"
)
GROUP.add_argument(
"--http-timeout", "-t",
type=float,
metavar="SECONDS",
help="timeout for all HTTP requests"
) )
configure_common_group_args(GROUP)
def load( def load(
args: argparse.Namespace, args: argparse.Namespace,
parser: configparser.ConfigParser, parser: configparser.ConfigParser,
) -> None: ) -> None:
log.explain(f"Creating config for command '{COMMAND_NAME}'") log.explain("Creating config for command 'kit-ilias-web'")
parser["crawl:ilias"] = {} parser["crawl:ilias"] = {}
section = parser["crawl:ilias"] section = parser["crawl:ilias"]
load_crawler(args, section) load_crawler(args, section)
section["type"] = COMMAND_NAME section["type"] = "kit-ilias-web"
load_common(section, args, parser) section["target"] = str(args.target)
section["output_dir"] = str(args.output)
section["auth"] = "auth:ilias"
if args.links is not None:
section["links"] = str(args.links.value)
if args.link_redirect_delay is not None:
section["link_redirect_delay"] = str(args.link_redirect_delay)
if args.videos is not None:
section["videos"] = "yes" if args.videos else "no"
if args.forums is not None:
section["forums"] = "yes" if args.forums else "no"
if args.http_timeout is not None:
section["http_timeout"] = str(args.http_timeout)
parser["auth:ilias"] = {}
auth_section = parser["auth:ilias"]
if args.credential_file is not None:
if args.username is not None:
raise ParserLoadError("--credential-file and --username can't be used together")
if args.keyring:
raise ParserLoadError("--credential-file and --keyring can't be used together")
auth_section["type"] = "credential-file"
auth_section["path"] = str(args.credential_file)
elif args.keyring:
auth_section["type"] = "keyring"
else:
auth_section["type"] = "simple"
if args.username is not None:
auth_section["username"] = args.username
SUBPARSER.set_defaults(command=load) SUBPARSER.set_defaults(command=load)

View File

@ -1,104 +0,0 @@
import argparse
import configparser
from pathlib import Path
from ..crawl.ilias.file_templates import Links
from .parser import BooleanOptionalAction, ParserLoadError, show_value_error
def configure_common_group_args(group: argparse._ArgumentGroup) -> None:
"""These arguments are shared between the KIT and generic Ilias web command."""
group.add_argument(
"target",
type=str,
metavar="TARGET",
help="course id, 'desktop', or ILIAS URL to crawl"
)
group.add_argument(
"output",
type=Path,
metavar="OUTPUT",
help="output directory"
)
group.add_argument(
"--username", "-u",
type=str,
metavar="USERNAME",
help="user name for authentication"
)
group.add_argument(
"--keyring",
action=BooleanOptionalAction,
help="use the system keyring to store and retrieve passwords"
)
group.add_argument(
"--credential-file",
type=Path,
metavar="PATH",
help="read username and password from a credential file"
)
group.add_argument(
"--links",
type=show_value_error(Links.from_string),
metavar="OPTION",
help="how to represent external links"
)
group.add_argument(
"--link-redirect-delay",
type=int,
metavar="SECONDS",
help="time before 'fancy' links redirect to to their target (-1 to disable)"
)
group.add_argument(
"--videos",
action=BooleanOptionalAction,
help="crawl and download videos"
)
group.add_argument(
"--forums",
action=BooleanOptionalAction,
help="crawl and download forum posts"
)
group.add_argument(
"--http-timeout", "-t",
type=float,
metavar="SECONDS",
help="timeout for all HTTP requests"
)
def load_common(
section: configparser.SectionProxy,
args: argparse.Namespace,
parser: configparser.ConfigParser,
) -> None:
"""Load common config between generic and KIT ilias web command"""
section["target"] = str(args.target)
section["output_dir"] = str(args.output)
section["auth"] = "auth:ilias"
if args.links is not None:
section["links"] = str(args.links.value)
if args.link_redirect_delay is not None:
section["link_redirect_delay"] = str(args.link_redirect_delay)
if args.videos is not None:
section["videos"] = "yes" if args.videos else "no"
if args.forums is not None:
section["forums"] = "yes" if args.forums else "no"
if args.http_timeout is not None:
section["http_timeout"] = str(args.http_timeout)
parser["auth:ilias"] = {}
auth_section = parser["auth:ilias"]
if args.credential_file is not None:
if args.username is not None:
raise ParserLoadError("--credential-file and --username can't be used together")
if args.keyring:
raise ParserLoadError("--credential-file and --keyring can't be used together")
auth_section["type"] = "credential-file"
auth_section["path"] = str(args.credential_file)
elif args.keyring:
auth_section["type"] = "keyring"
else:
auth_section["type"] = "simple"
if args.username is not None:
auth_section["username"] = args.username

View File

@ -241,5 +241,4 @@ def load_default_section(
if args.show_not_deleted is not None: if args.show_not_deleted is not None:
section["show_not_deleted"] = "yes" if args.show_not_deleted else "no" section["show_not_deleted"] = "yes" if args.show_not_deleted else "no"
SUBPARSERS = PARSER.add_subparsers(title="crawlers") SUBPARSERS = PARSER.add_subparsers(title="crawlers")

View File

@ -4,7 +4,7 @@ from typing import Callable, Dict
from ..auth import Authenticator from ..auth import Authenticator
from ..config import Config from ..config import Config
from .crawler import Crawler, CrawlError, CrawlerSection # noqa: F401 from .crawler import Crawler, CrawlError, CrawlerSection # noqa: F401
from .ilias import IliasWebCrawler, IliasWebCrawlerSection, KitIliasWebCrawler, KitIliasWebCrawlerSection from .ilias import KitIliasWebCrawler, KitIliasWebCrawlerSection
from .kit_ipd_crawler import KitIpdCrawler, KitIpdCrawlerSection from .kit_ipd_crawler import KitIpdCrawler, KitIpdCrawlerSection
from .local_crawler import LocalCrawler, LocalCrawlerSection from .local_crawler import LocalCrawler, LocalCrawlerSection
@ -18,8 +18,6 @@ CrawlerConstructor = Callable[[
CRAWLERS: Dict[str, CrawlerConstructor] = { CRAWLERS: Dict[str, CrawlerConstructor] = {
"local": lambda n, s, c, a: "local": lambda n, s, c, a:
LocalCrawler(n, LocalCrawlerSection(s), c), LocalCrawler(n, LocalCrawlerSection(s), c),
"ilias-web": lambda n, s, c, a:
IliasWebCrawler(n, IliasWebCrawlerSection(s), c, a),
"kit-ilias-web": lambda n, s, c, a: "kit-ilias-web": lambda n, s, c, a:
KitIliasWebCrawler(n, KitIliasWebCrawlerSection(s), c, a), KitIliasWebCrawler(n, KitIliasWebCrawlerSection(s), c, a),
"kit-ipd": lambda n, s, c, a: "kit-ipd": lambda n, s, c, a:

View File

@ -1,9 +1,3 @@
from .kit_ilias_web_crawler import (IliasWebCrawler, IliasWebCrawlerSection, KitIliasWebCrawler, from .kit_ilias_web_crawler import KitIliasWebCrawler, KitIliasWebCrawlerSection
KitIliasWebCrawlerSection)
__all__ = [ __all__ = ["KitIliasWebCrawler", "KitIliasWebCrawlerSection"]
"IliasWebCrawler",
"IliasWebCrawlerSection",
"KitIliasWebCrawler",
"KitIliasWebCrawlerSection",
]

View File

@ -1,39 +0,0 @@
import asyncio
from typing import Any, Callable, Optional
import aiohttp
from ...logging import log
from ..crawler import AWrapped, CrawlError, CrawlWarning
def _iorepeat(attempts: int, name: str, failure_is_error: bool = False) -> Callable[[AWrapped], AWrapped]:
def decorator(f: AWrapped) -> AWrapped:
async def wrapper(*args: Any, **kwargs: Any) -> Optional[Any]:
last_exception: Optional[BaseException] = None
for round in range(attempts):
try:
return await f(*args, **kwargs)
except aiohttp.ContentTypeError: # invalid content type
raise CrawlWarning("ILIAS returned an invalid content type")
except aiohttp.TooManyRedirects:
raise CrawlWarning("Got stuck in a redirect loop")
except aiohttp.ClientPayloadError as e: # encoding or not enough bytes
last_exception = e
except aiohttp.ClientConnectionError as e: # e.g. timeout, disconnect, resolve failed, etc.
last_exception = e
except asyncio.exceptions.TimeoutError as e: # explicit http timeouts in HttpCrawler
last_exception = e
log.explain_topic(f"Retrying operation {name}. Retries left: {attempts - 1 - round}")
if last_exception:
message = f"Error in I/O Operation: {last_exception}"
if failure_is_error:
raise CrawlError(message) from last_exception
else:
raise CrawlWarning(message) from last_exception
raise CrawlError("Impossible return in ilias _iorepeat")
return wrapper # type: ignore
return decorator

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ import re
from dataclasses import dataclass from dataclasses import dataclass
from datetime import date, datetime, timedelta from datetime import date, datetime, timedelta
from enum import Enum from enum import Enum
from typing import Dict, List, Optional, Union, cast from typing import Dict, List, Optional, Union
from urllib.parse import urljoin, urlparse from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup, Tag from bs4 import BeautifulSoup, Tag
@ -17,23 +17,19 @@ TargetType = Union[str, int]
class IliasElementType(Enum): class IliasElementType(Enum):
EXERCISE = "exercise" EXERCISE = "exercise"
EXERCISE_FILES = "exercise_files" # own submitted files EXERCISE_FILES = "exercise_files" # own submitted files
TEST = "test" # an online test. Will be ignored currently. TEST = "test" # an online test. Will be ignored currently.
FILE = "file" FILE = "file"
FOLDER = "folder" FOLDER = "folder"
FORUM = "forum" FORUM = "forum"
LINK = "link" LINK = "link"
INFO_TAB = "info_tab"
LEARNING_MODULE = "learning_module" LEARNING_MODULE = "learning_module"
BOOKING = "booking" BOOKING = "booking"
MEETING = "meeting" MEETING = "meeting"
SURVEY = "survey" SURVEY = "survey"
SCORM_LEARNING_MODULE = "scorm_learning_module" VIDEO = "video"
MEDIACAST_VIDEO_FOLDER = "mediacast_video_folder" VIDEO_PLAYER = "video_player"
MEDIACAST_VIDEO = "mediacast_video" VIDEO_FOLDER = "video_folder"
OPENCAST_VIDEO = "opencast_video" VIDEO_FOLDER_MAYBE_PAGINATED = "video_folder_maybe_paginated"
OPENCAST_VIDEO_PLAYER = "opencast_video_player"
OPENCAST_VIDEO_FOLDER = "opencast_video_folder"
OPENCAST_VIDEO_FOLDER_MAYBE_PAGINATED = "opencast_video_folder_maybe_paginated"
@dataclass @dataclass
@ -48,13 +44,8 @@ class IliasPageElement:
regexes = [ regexes = [
r"eid=(?P<id>[0-9a-z\-]+)", r"eid=(?P<id>[0-9a-z\-]+)",
r"file_(?P<id>\d+)", r"file_(?P<id>\d+)",
r"copa_(?P<id>\d+)",
r"fold_(?P<id>\d+)",
r"frm_(?P<id>\d+)",
r"exc_(?P<id>\d+)",
r"ref_id=(?P<id>\d+)", r"ref_id=(?P<id>\d+)",
r"target=[a-z]+_(?P<id>\d+)", r"target=[a-z]+_(?P<id>\d+)"
r"mm_(?P<id>\d+)"
] ]
for regex in regexes: for regex in regexes:
@ -65,52 +56,6 @@ class IliasPageElement:
log.warn(f"Didn't find identity for {self.name} - {self.url}. Please report this.") log.warn(f"Didn't find identity for {self.name} - {self.url}. Please report this.")
return self.url return self.url
@staticmethod
def create_new(
typ: IliasElementType,
url: str,
name: str,
mtime: Optional[datetime] = None,
description: Optional[str] = None,
skip_sanitize: bool = False
) -> 'IliasPageElement':
if typ == IliasElementType.MEETING:
normalized = IliasPageElement._normalize_meeting_name(name)
log.explain(f"Normalized meeting name from {name!r} to {normalized!r}")
name = normalized
if not skip_sanitize:
name = _sanitize_path_name(name)
return IliasPageElement(typ, url, name, mtime, description)
@staticmethod
def _normalize_meeting_name(meeting_name: str) -> str:
"""
Normalizes meeting names, which have a relative time as their first part,
to their date in ISO format.
"""
# This checks whether we can reach a `:` without passing a `-`
if re.search(r"^[^-]+: ", meeting_name):
# Meeting name only contains date: "05. Jan 2000:"
split_delimiter = ":"
else:
# Meeting name contains date and start/end times: "05. Jan 2000, 16:00 - 17:30:"
split_delimiter = ", "
# We have a meeting day without time
date_portion_str = meeting_name.split(split_delimiter)[0]
date_portion = demangle_date(date_portion_str)
# We failed to parse the date, bail out
if not date_portion:
return meeting_name
# Replace the first section with the absolute date
rest_of_name = split_delimiter.join(meeting_name.split(split_delimiter)[1:])
return datetime.strftime(date_portion, "%Y-%m-%d") + split_delimiter + rest_of_name
@dataclass @dataclass
class IliasDownloadForumData: class IliasDownloadForumData:
@ -145,9 +90,13 @@ class IliasPage:
@staticmethod @staticmethod
def is_root_page(soup: BeautifulSoup) -> bool: def is_root_page(soup: BeautifulSoup) -> bool:
if permalink := IliasPage.get_soup_permalink(soup): permalink = soup.find(id="current_perma_link")
return "goto.php?target=root_" in permalink if permalink is None:
return False return False
value = permalink.attrs.get("value")
if value is None:
return False
return "goto.php?target=root_" in value
def get_child_elements(self) -> List[IliasPageElement]: def get_child_elements(self) -> List[IliasPageElement]:
""" """
@ -156,9 +105,9 @@ class IliasPage:
if self._is_video_player(): if self._is_video_player():
log.explain("Page is a video player, extracting URL") log.explain("Page is a video player, extracting URL")
return self._player_to_video() return self._player_to_video()
if self._is_opencast_video_listing(): if self._is_video_listing():
log.explain("Page is an opencast video listing, searching for elements") log.explain("Page is a video listing, searching for elements")
return self._find_opencast_video_entries() return self._find_video_entries()
if self._is_exercise_file(): if self._is_exercise_file():
log.explain("Page is an exercise, searching for elements") log.explain("Page is an exercise, searching for elements")
return self._find_exercise_entries() return self._find_exercise_entries()
@ -168,25 +117,9 @@ class IliasPage:
if self._is_content_page(): if self._is_content_page():
log.explain("Page is a content page, searching for elements") log.explain("Page is a content page, searching for elements")
return self._find_copa_entries() return self._find_copa_entries()
if self._is_info_tab():
log.explain("Page is info tab, searching for elements")
return self._find_info_tab_entries()
log.explain("Page is a normal folder, searching for elements") log.explain("Page is a normal folder, searching for elements")
return self._find_normal_entries() return self._find_normal_entries()
def get_info_tab(self) -> Optional[IliasPageElement]:
tab: Optional[Tag] = self._soup.find(
name="a",
attrs={"href": lambda x: x and "cmdClass=ilinfoscreengui" in x}
)
if tab is not None:
return IliasPageElement.create_new(
IliasElementType.INFO_TAB,
self._abs_url_from_link(tab),
"infos"
)
return None
def get_description(self) -> Optional[BeautifulSoup]: def get_description(self) -> Optional[BeautifulSoup]:
def is_interesting_class(name: str) -> bool: def is_interesting_class(name: str) -> bool:
return name in ["ilCOPageSection", "ilc_Paragraph", "ilc_va_ihcap_VAccordIHeadCap"] return name in ["ilCOPageSection", "ilc_Paragraph", "ilc_va_ihcap_VAccordIHeadCap"]
@ -266,18 +199,14 @@ class IliasPage:
if self._is_ilias_opencast_embedding(): if self._is_ilias_opencast_embedding():
log.explain("Unwrapping opencast embedding") log.explain("Unwrapping opencast embedding")
return self.get_child_elements()[0] return self.get_child_elements()[0]
if self._page_type == IliasElementType.OPENCAST_VIDEO_FOLDER_MAYBE_PAGINATED: if self._page_type == IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED:
log.explain("Unwrapping video pagination") log.explain("Unwrapping video pagination")
return self._find_opencast_video_entries_paginated()[0] return self._find_video_entries_paginated()[0]
if self._contains_collapsed_future_meetings(): if self._contains_collapsed_future_meetings():
log.explain("Requesting *all* future meetings") log.explain("Requesting *all* future meetings")
return self._uncollapse_future_meetings_url() return self._uncollapse_future_meetings_url()
if not self._is_content_tab_selected(): if not self._is_content_tab_selected():
if self._page_type != IliasElementType.INFO_TAB: return self._select_content_page_url()
log.explain("Selecting content tab")
return self._select_content_page_url()
else:
log.explain("Crawling info tab, skipping content select")
return None return None
def _is_forum_page(self) -> bool: def _is_forum_page(self) -> bool:
@ -290,7 +219,7 @@ class IliasPage:
def _is_video_player(self) -> bool: def _is_video_player(self) -> bool:
return "paella_config_file" in str(self._soup) return "paella_config_file" in str(self._soup)
def _is_opencast_video_listing(self) -> bool: def _is_video_listing(self) -> bool:
if self._is_ilias_opencast_embedding(): if self._is_ilias_opencast_embedding():
return True return True
@ -325,38 +254,30 @@ class IliasPage:
return self._soup.find("a", attrs={"href": lambda x: x and "block_type=pditems" in x}) return self._soup.find("a", attrs={"href": lambda x: x and "block_type=pditems" in x})
def _is_content_page(self) -> bool: def _is_content_page(self) -> bool:
if link := self.get_permalink(): link = self._soup.find(id="current_perma_link")
return "target=copa_" in link if not link:
return False return False
return "target=copa_" in link.get("value")
def _is_learning_module_page(self) -> bool: def _is_learning_module_page(self) -> bool:
if link := self.get_permalink(): link = self._soup.find(id="current_perma_link")
return "target=pg_" in link if not link:
return False return False
return "target=pg_" in link.get("value")
def _contains_collapsed_future_meetings(self) -> bool: def _contains_collapsed_future_meetings(self) -> bool:
return self._uncollapse_future_meetings_url() is not None return self._uncollapse_future_meetings_url() is not None
def _uncollapse_future_meetings_url(self) -> Optional[IliasPageElement]: def _uncollapse_future_meetings_url(self) -> Optional[IliasPageElement]:
element = self._soup.find( element = self._soup.find("a", attrs={"href": lambda x: x and "crs_next_sess=1" in x})
"a",
attrs={"href": lambda x: x and ("crs_next_sess=1" in x or "crs_prev_sess=1" in x)}
)
if not element: if not element:
return None return None
link = self._abs_url_from_link(element) link = self._abs_url_from_link(element)
return IliasPageElement.create_new(IliasElementType.FOLDER, link, "show all meetings") return IliasPageElement(IliasElementType.FOLDER, link, "show all meetings")
def _is_content_tab_selected(self) -> bool: def _is_content_tab_selected(self) -> bool:
return self._select_content_page_url() is None return self._select_content_page_url() is None
def _is_info_tab(self) -> bool:
might_be_info = self._soup.find("form", attrs={"name": lambda x: x == "formInfoScreen"}) is not None
return self._page_type == IliasElementType.INFO_TAB and might_be_info
def _is_course_overview_page(self) -> bool:
return "baseClass=ilmembershipoverviewgui" in self._page_url
def _select_content_page_url(self) -> Optional[IliasPageElement]: def _select_content_page_url(self) -> Optional[IliasPageElement]:
tab = self._soup.find( tab = self._soup.find(
id="tab_view_content", id="tab_view_content",
@ -368,7 +289,7 @@ class IliasPage:
link = tab.find("a") link = tab.find("a")
if link: if link:
link = self._abs_url_from_link(link) link = self._abs_url_from_link(link)
return IliasPageElement.create_new(IliasElementType.FOLDER, link, "select content page") return IliasPageElement(IliasElementType.FOLDER, link, "select content page")
_unexpected_html_warning() _unexpected_html_warning()
log.warn_contd(f"Could not find content tab URL on {self._page_url!r}.") log.warn_contd(f"Could not find content tab URL on {self._page_url!r}.")
@ -398,16 +319,14 @@ class IliasPage:
# and just fetch the lone video url! # and just fetch the lone video url!
if len(streams) == 1: if len(streams) == 1:
video_url = streams[0]["sources"]["mp4"][0]["src"] video_url = streams[0]["sources"]["mp4"][0]["src"]
return [ return [IliasPageElement(IliasElementType.VIDEO, video_url, self._source_name)]
IliasPageElement.create_new(IliasElementType.OPENCAST_VIDEO, video_url, self._source_name)
]
log.explain(f"Found multiple videos for stream at {self._source_name}") log.explain(f"Found multiple videos for stream at {self._source_name}")
items = [] items = []
for stream in sorted(streams, key=lambda stream: stream["content"]): for stream in sorted(streams, key=lambda stream: stream["content"]):
full_name = f"{self._source_name.replace('.mp4', '')} ({stream['content']}).mp4" full_name = f"{self._source_name.replace('.mp4', '')} ({stream['content']}).mp4"
video_url = stream["sources"]["mp4"][0]["src"] video_url = stream["sources"]["mp4"][0]["src"]
items.append(IliasPageElement.create_new(IliasElementType.OPENCAST_VIDEO, video_url, full_name)) items.append(IliasPageElement(IliasElementType.VIDEO, video_url, full_name))
return items return items
@ -422,7 +341,7 @@ class IliasPage:
link = self._abs_url_from_link(correct_link) link = self._abs_url_from_link(correct_link)
return IliasPageElement.create_new(IliasElementType.FORUM, link, "show all forum threads") return IliasPageElement(IliasElementType.FORUM, link, "show all forum threads")
def _find_personal_desktop_entries(self) -> List[IliasPageElement]: def _find_personal_desktop_entries(self) -> List[IliasPageElement]:
items: List[IliasPageElement] = [] items: List[IliasPageElement] = []
@ -433,10 +352,6 @@ class IliasPage:
name = _sanitize_path_name(link.text.strip()) name = _sanitize_path_name(link.text.strip())
url = self._abs_url_from_link(link) url = self._abs_url_from_link(link)
if "cmd=manage" in url and "cmdClass=ilPDSelectedItemsBlockGUI" in url:
# Configure button/link does not have anything interesting
continue
type = self._find_type_from_link(name, link, url) type = self._find_type_from_link(name, link, url)
if not type: if not type:
_unexpected_html_warning() _unexpected_html_warning()
@ -449,7 +364,7 @@ class IliasPage:
url = re.sub(r"(target=file_\d+)", r"\1_download", url) url = re.sub(r"(target=file_\d+)", r"\1_download", url)
log.explain("Rewired file URL to include download part") log.explain("Rewired file URL to include download part")
items.append(IliasPageElement.create_new(type, url, name)) items.append(IliasPageElement(type, url, name))
return items return items
@ -459,36 +374,18 @@ class IliasPage:
for link in links: for link in links:
url = self._abs_url_from_link(link) url = self._abs_url_from_link(link)
name = re.sub(r"\([\d,.]+ [MK]B\)", "", link.getText()).strip().replace("\t", "") name = _sanitize_path_name(link.getText().strip().replace("\t", ""))
name = _sanitize_path_name(name)
if "file_id" not in url: if "file_id" not in url:
_unexpected_html_warning() _unexpected_html_warning()
log.warn_contd(f"Found unknown content page item {name!r} with url {url!r}") log.warn_contd(f"Found unknown content page item {name!r} with url {url!r}")
continue continue
items.append(IliasPageElement.create_new(IliasElementType.FILE, url, name)) items.append(IliasPageElement(IliasElementType.FILE, url, name))
return items return items
def _find_info_tab_entries(self) -> List[IliasPageElement]: def _find_video_entries(self) -> List[IliasPageElement]:
items = []
links: List[Tag] = self._soup.select("a.il_ContainerItemCommand")
for link in links:
if "cmdClass=ilobjcoursegui" not in link["href"]:
continue
if "cmd=sendfile" not in link["href"]:
continue
items.append(IliasPageElement.create_new(
IliasElementType.FILE,
self._abs_url_from_link(link),
_sanitize_path_name(link.getText())
))
return items
def _find_opencast_video_entries(self) -> List[IliasPageElement]:
# ILIAS has three stages for video pages # ILIAS has three stages for video pages
# 1. The initial dummy page without any videos. This page contains the link to the listing # 1. The initial dummy page without any videos. This page contains the link to the listing
# 2. The video listing which might be paginated # 2. The video listing which might be paginated
@ -508,29 +405,27 @@ class IliasPage:
query_params = {"limit": "800", "cmd": "asyncGetTableGUI", "cmdMode": "asynch"} query_params = {"limit": "800", "cmd": "asyncGetTableGUI", "cmdMode": "asynch"}
url = url_set_query_params(url, query_params) url = url_set_query_params(url, query_params)
log.explain("Found ILIAS video frame page, fetching actual content next") log.explain("Found ILIAS video frame page, fetching actual content next")
return [ return [IliasPageElement(IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED, url, "")]
IliasPageElement.create_new(IliasElementType.OPENCAST_VIDEO_FOLDER_MAYBE_PAGINATED, url, "")
]
is_paginated = self._soup.find(id=re.compile(r"tab_page_sel.+")) is not None is_paginated = self._soup.find(id=re.compile(r"tab_page_sel.+")) is not None
if is_paginated and not self._page_type == IliasElementType.OPENCAST_VIDEO_FOLDER: if is_paginated and not self._page_type == IliasElementType.VIDEO_FOLDER:
# We are in stage 2 - try to break pagination # We are in stage 2 - try to break pagination
return self._find_opencast_video_entries_paginated() return self._find_video_entries_paginated()
return self._find_opencast_video_entries_no_paging() return self._find_video_entries_no_paging()
def _find_opencast_video_entries_paginated(self) -> List[IliasPageElement]: def _find_video_entries_paginated(self) -> List[IliasPageElement]:
table_element: Tag = self._soup.find(name="table", id=re.compile(r"tbl_xoct_.+")) table_element: Tag = self._soup.find(name="table", id=re.compile(r"tbl_xoct_.+"))
if table_element is None: if table_element is None:
log.warn("Couldn't increase elements per page (table not found). I might miss elements.") log.warn("Couldn't increase elements per page (table not found). I might miss elements.")
return self._find_opencast_video_entries_no_paging() return self._find_video_entries_no_paging()
id_match = re.match(r"tbl_xoct_(.+)", table_element.attrs["id"]) id_match = re.match(r"tbl_xoct_(.+)", table_element.attrs["id"])
if id_match is None: if id_match is None:
log.warn("Couldn't increase elements per page (table id not found). I might miss elements.") log.warn("Couldn't increase elements per page (table id not found). I might miss elements.")
return self._find_opencast_video_entries_no_paging() return self._find_video_entries_no_paging()
table_id = id_match.group(1) table_id = id_match.group(1)
@ -539,9 +434,9 @@ class IliasPage:
url = url_set_query_params(self._page_url, query_params) url = url_set_query_params(self._page_url, query_params)
log.explain("Disabled pagination, retrying folder as a new entry") log.explain("Disabled pagination, retrying folder as a new entry")
return [IliasPageElement.create_new(IliasElementType.OPENCAST_VIDEO_FOLDER, url, "")] return [IliasPageElement(IliasElementType.VIDEO_FOLDER, url, "")]
def _find_opencast_video_entries_no_paging(self) -> List[IliasPageElement]: def _find_video_entries_no_paging(self) -> List[IliasPageElement]:
""" """
Crawls the "second stage" video page. This page contains the actual video urls. Crawls the "second stage" video page. This page contains the actual video urls.
""" """
@ -553,11 +448,11 @@ class IliasPage:
results: List[IliasPageElement] = [] results: List[IliasPageElement] = []
for link in video_links: for link in video_links:
results.append(self._listed_opencast_video_to_element(link)) results.append(self._listed_video_to_element(link))
return results return results
def _listed_opencast_video_to_element(self, link: Tag) -> IliasPageElement: def _listed_video_to_element(self, link: Tag) -> IliasPageElement:
# The link is part of a table with multiple columns, describing metadata. # The link is part of a table with multiple columns, describing metadata.
# 6th or 7th child (1 indexed) is the modification time string. Try to find it # 6th or 7th child (1 indexed) is the modification time string. Try to find it
# by parsing backwards from the end and finding something that looks like a date # by parsing backwards from the end and finding something that looks like a date
@ -568,8 +463,8 @@ class IliasPage:
modification_string = link.parent.parent.parent.select_one( modification_string = link.parent.parent.parent.select_one(
f"td.std:nth-child({index})" f"td.std:nth-child({index})"
).getText().strip() ).getText().strip()
if match := re.search(r"\d+\.\d+.\d+ \d+:\d+", modification_string): if re.search(r"\d+\.\d+.\d+ - \d+:\d+", modification_string):
modification_time = datetime.strptime(match.group(0), "%d.%m.%Y %H:%M") modification_time = datetime.strptime(modification_string, "%d.%m.%Y - %H:%M")
break break
if modification_time is None: if modification_time is None:
@ -584,9 +479,7 @@ class IliasPage:
video_url = self._abs_url_from_link(link) video_url = self._abs_url_from_link(link)
log.explain(f"Found video {video_name!r} at {video_url}") log.explain(f"Found video {video_name!r} at {video_url}")
return IliasPageElement.create_new( return IliasPageElement(IliasElementType.VIDEO_PLAYER, video_url, video_name, modification_time)
IliasElementType.OPENCAST_VIDEO_PLAYER, video_url, video_name, modification_time
)
def _find_exercise_entries(self) -> List[IliasPageElement]: def _find_exercise_entries(self) -> List[IliasPageElement]:
if self._soup.find(id="tab_submission"): if self._soup.find(id="tab_submission"):
@ -620,7 +513,7 @@ class IliasPage:
if date is None: if date is None:
log.warn(f"Date parsing failed for exercise entry {name!r}") log.warn(f"Date parsing failed for exercise entry {name!r}")
results.append(IliasPageElement.create_new( results.append(IliasPageElement(
IliasElementType.FILE, IliasElementType.FILE,
self._abs_url_from_link(link), self._abs_url_from_link(link),
name, name,
@ -653,22 +546,22 @@ class IliasPage:
# Two divs, side by side. Left is the name, right is the link ==> get left # Two divs, side by side. Left is the name, right is the link ==> get left
# sibling # sibling
file_name = file_link.parent.findPrevious(name="div").getText().strip() file_name = file_link.parent.findPrevious(name="div").getText().strip()
file_name = _sanitize_path_name(file_name)
url = self._abs_url_from_link(file_link) url = self._abs_url_from_link(file_link)
log.explain(f"Found exercise entry {file_name!r}") log.explain(f"Found exercise entry {file_name!r}")
results.append(IliasPageElement.create_new( results.append(IliasPageElement(
IliasElementType.FILE, IliasElementType.FILE,
url, url,
_sanitize_path_name(container_name) + "/" + _sanitize_path_name(file_name), container_name + "/" + file_name,
mtime=None, # We do not have any timestamp None # We do not have any timestamp
skip_sanitize=True
)) ))
# Find all links to file listings (e.g. "Submitted Files" for groups) # Find all links to file listings (e.g. "Submitted Files" for groups)
file_listings: List[Tag] = container.findAll( file_listings: List[Tag] = container.findAll(
name="a", name="a",
# download links contain the given command class # download links contain the given command class
attrs={"href": lambda x: x and "cmdclass=ilexsubmissionfilegui" in x.lower()} attrs={"href": lambda x: x and "cmdClass=ilexsubmissionfilegui" in x}
) )
# Add each listing as a new # Add each listing as a new
@ -679,15 +572,14 @@ class IliasPage:
label_container: Tag = parent_container.find( label_container: Tag = parent_container.find(
attrs={"class": lambda x: x and "control-label" in x} attrs={"class": lambda x: x and "control-label" in x}
) )
file_name = label_container.getText().strip() file_name = _sanitize_path_name(label_container.getText().strip())
url = self._abs_url_from_link(listing) url = self._abs_url_from_link(listing)
log.explain(f"Found exercise detail {file_name!r} at {url}") log.explain(f"Found exercise detail {file_name!r} at {url}")
results.append(IliasPageElement.create_new( results.append(IliasPageElement(
IliasElementType.EXERCISE_FILES, IliasElementType.EXERCISE_FILES,
url, url,
_sanitize_path_name(container_name) + "/" + _sanitize_path_name(file_name), container_name + "/" + file_name,
None, # we do not have any timestamp None # we do not have any timestamp
skip_sanitize=True
)) ))
return results return results
@ -695,18 +587,12 @@ class IliasPage:
def _find_normal_entries(self) -> List[IliasPageElement]: def _find_normal_entries(self) -> List[IliasPageElement]:
result: List[IliasPageElement] = [] result: List[IliasPageElement] = []
links: List[Tag] = []
# Fetch all links and throw them to the general interpreter # Fetch all links and throw them to the general interpreter
if self._is_course_overview_page(): links: List[Tag] = self._soup.select("a.il_ContainerItemTitle")
log.explain("Page is a course overview page, adjusting link selector")
links.extend(self._soup.select(".il-item-title > a"))
else:
links.extend(self._soup.select("a.il_ContainerItemTitle"))
for link in links: for link in links:
abs_url = self._abs_url_from_link(link) abs_url = self._abs_url_from_link(link)
# Make sure parents are sanitized. We do not want accidental parents parents = self._find_upwards_folder_hierarchy(link)
parents = [_sanitize_path_name(x) for x in self._find_upwards_folder_hierarchy(link)]
if parents: if parents:
element_name = "/".join(parents) + "/" + _sanitize_path_name(link.getText()) element_name = "/".join(parents) + "/" + _sanitize_path_name(link.getText())
@ -724,62 +610,21 @@ class IliasPage:
if not element_type: if not element_type:
continue continue
if element_type == IliasElementType.MEETING:
normalized = _sanitize_path_name(self._normalize_meeting_name(element_name))
log.explain(f"Normalized meeting name from {element_name!r} to {normalized!r}")
element_name = normalized
elif element_type == IliasElementType.FILE: elif element_type == IliasElementType.FILE:
result.append(self._file_to_element(element_name, abs_url, link)) result.append(self._file_to_element(element_name, abs_url, link))
continue continue
log.explain(f"Found {element_name!r}") log.explain(f"Found {element_name!r}")
result.append(IliasPageElement.create_new( result.append(IliasPageElement(element_type, abs_url, element_name, description=description))
element_type,
abs_url,
element_name,
description=description,
skip_sanitize=True
))
result += self._find_cards() result += self._find_cards()
result += self._find_mediacast_videos()
return result return result
def _find_mediacast_videos(self) -> List[IliasPageElement]:
videos: List[IliasPageElement] = []
for elem in cast(List[Tag], self._soup.select(".ilPlayerPreviewOverlayOuter")):
element_name = _sanitize_path_name(
elem.select_one(".ilPlayerPreviewDescription").getText().strip()
)
if not element_name.endswith(".mp4"):
# just to make sure it has some kinda-alrightish ending
element_name = element_name + ".mp4"
video_element = elem.find(name="video")
if not video_element:
_unexpected_html_warning()
log.warn_contd(f"No <video> element found for mediacast video '{element_name}'")
continue
videos.append(IliasPageElement.create_new(
typ=IliasElementType.MEDIACAST_VIDEO,
url=self._abs_url_from_relative(video_element.get("src")),
name=element_name,
mtime=self._find_mediacast_video_mtime(elem.findParent(name="td"))
))
return videos
def _find_mediacast_video_mtime(self, enclosing_td: Tag) -> Optional[datetime]:
description_td: Tag = enclosing_td.findPreviousSibling("td")
if not description_td:
return None
meta_tag: Tag = description_td.find_all("p")[-1]
if not meta_tag:
return None
updated_str = meta_tag.getText().strip().replace("\n", " ")
updated_str = re.sub(".+?: ", "", updated_str)
return demangle_date(updated_str)
def _is_in_expanded_meeting(self, tag: Tag) -> bool: def _is_in_expanded_meeting(self, tag: Tag) -> bool:
""" """
Returns whether a file is part of an expanded meeting. Returns whether a file is part of an expanded meeting.
@ -817,14 +662,11 @@ class IliasPage:
# ILIAS has proper accordions and weird blocks that look like normal headings, # ILIAS has proper accordions and weird blocks that look like normal headings,
# but some JS later transforms them into an accordion. # but some JS later transforms them into an accordion.
# This is for these weird JS-y blocks and custom item groups # This is for these weird JS-y blocks
if "ilContainerItemsContainer" in parent.get("class"): if "ilContainerItemsContainer" in parent.get("class"):
data_store_url = parent.parent.get("data-store-url", "").lower()
is_custom_item_group = "baseclass=ilcontainerblockpropertiesstoragegui" in data_store_url \
and "cont_block_id=" in data_store_url
# I am currently under the impression that *only* those JS blocks have an # I am currently under the impression that *only* those JS blocks have an
# ilNoDisplay class. # ilNoDisplay class.
if not is_custom_item_group and "ilNoDisplay" not in parent.get("class"): if "ilNoDisplay" not in parent.get("class"):
continue continue
prev: Tag = parent.findPreviousSibling("div") prev: Tag = parent.findPreviousSibling("div")
if "ilContainerBlockHeader" in prev.get("class"): if "ilContainerBlockHeader" in prev.get("class"):
@ -884,9 +726,7 @@ class IliasPage:
full_path = name + "." + file_type full_path = name + "." + file_type
log.explain(f"Found file {full_path!r}") log.explain(f"Found file {full_path!r}")
return IliasPageElement.create_new( return IliasPageElement(IliasElementType.FILE, url, full_path, modification_date)
IliasElementType.FILE, url, full_path, modification_date, skip_sanitize=True
)
def _find_cards(self) -> List[IliasPageElement]: def _find_cards(self) -> List[IliasPageElement]:
result: List[IliasPageElement] = [] result: List[IliasPageElement] = []
@ -903,7 +743,7 @@ class IliasPage:
log.warn_contd(f"Could not extract type for {title}") log.warn_contd(f"Could not extract type for {title}")
continue continue
result.append(IliasPageElement.create_new(type, url, name)) result.append(IliasPageElement(type, url, name))
card_button_tiles: List[Tag] = self._soup.select(".card-title button") card_button_tiles: List[Tag] = self._soup.select(".card-title button")
@ -932,7 +772,7 @@ class IliasPage:
log.warn_contd(f"Could not extract type for {button}") log.warn_contd(f"Could not extract type for {button}")
continue continue
result.append(IliasPageElement.create_new(type, url, name, description=description)) result.append(IliasPageElement(type, url, name, description=description))
return result return result
@ -956,7 +796,7 @@ class IliasPage:
icon: Tag = card_root.select_one(".il-card-repository-head .icon") icon: Tag = card_root.select_one(".il-card-repository-head .icon")
if "opencast" in icon["class"] or "xoct" in icon["class"]: if "opencast" in icon["class"] or "xoct" in icon["class"]:
return IliasElementType.OPENCAST_VIDEO_FOLDER_MAYBE_PAGINATED return IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED
if "exc" in icon["class"]: if "exc" in icon["class"]:
return IliasElementType.EXERCISE return IliasElementType.EXERCISE
if "webr" in icon["class"]: if "webr" in icon["class"]:
@ -977,8 +817,6 @@ class IliasPage:
return IliasElementType.SURVEY return IliasElementType.SURVEY
if "file" in icon["class"]: if "file" in icon["class"]:
return IliasElementType.FILE return IliasElementType.FILE
if "mcst" in icon["class"]:
return IliasElementType.MEDIACAST_VIDEO_FOLDER
_unexpected_html_warning() _unexpected_html_warning()
log.warn_contd(f"Could not extract type from {icon} for card title {card_title}") log.warn_contd(f"Could not extract type from {icon} for card title {card_title}")
@ -986,9 +824,9 @@ class IliasPage:
@staticmethod @staticmethod
def _find_type_from_link( def _find_type_from_link(
element_name: str, element_name: str,
link_element: Tag, link_element: Tag,
url: str url: str
) -> Optional[IliasElementType]: ) -> Optional[IliasElementType]:
""" """
Decides which sub crawler to use for a given top level element. Decides which sub crawler to use for a given top level element.
@ -1020,25 +858,6 @@ class IliasPage:
if "baseClass=ilLMPresentationGUI" in parsed_url.query: if "baseClass=ilLMPresentationGUI" in parsed_url.query:
return IliasElementType.LEARNING_MODULE return IliasElementType.LEARNING_MODULE
if "baseClass=ilMediaCastHandlerGUI" in parsed_url.query:
return IliasElementType.MEDIACAST_VIDEO_FOLDER
if "baseClass=ilSAHSPresentationGUI" in parsed_url.query:
return IliasElementType.SCORM_LEARNING_MODULE
# other universities might have content type specified in URL path
if "_file_" in parsed_url.path:
return IliasElementType.FILE
if "_fold_" in parsed_url.path or "_copa_" in parsed_url.path:
return IliasElementType.FOLDER
if "_frm_" in parsed_url.path:
return IliasElementType.FORUM
if "_exc_" in parsed_url.path:
return IliasElementType.EXERCISE
# Booking and Meeting can not be detected based on the link. They do have a ref_id though, so # Booking and Meeting can not be detected based on the link. They do have a ref_id though, so
# try to guess it from the image. # try to guess it from the image.
@ -1080,11 +899,7 @@ class IliasPage:
if img_tag is None: if img_tag is None:
img_tag = found_parent.select_one("img.icon") img_tag = found_parent.select_one("img.icon")
is_session_expansion_button = found_parent.find( if img_tag is None and found_parent.find("a", attrs={"href": lambda x: x and "crs_next_sess=" in x}):
"a",
attrs={"href": lambda x: x and ("crs_next_sess=" in x or "crs_prev_sess=" in x)}
)
if img_tag is None and is_session_expansion_button:
log.explain("Found session expansion button, skipping it as it has no content") log.explain("Found session expansion button, skipping it as it has no content")
return None return None
@ -1094,7 +909,7 @@ class IliasPage:
return None return None
if "opencast" in str(img_tag["alt"]).lower(): if "opencast" in str(img_tag["alt"]).lower():
return IliasElementType.OPENCAST_VIDEO_FOLDER_MAYBE_PAGINATED return IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED
if str(img_tag["src"]).endswith("icon_exc.svg"): if str(img_tag["src"]).endswith("icon_exc.svg"):
return IliasElementType.EXERCISE return IliasElementType.EXERCISE
@ -1114,52 +929,34 @@ class IliasPage:
if str(img_tag["src"]).endswith("icon_tst.svg"): if str(img_tag["src"]).endswith("icon_tst.svg"):
return IliasElementType.TEST return IliasElementType.TEST
if str(img_tag["src"]).endswith("icon_mcst.svg"):
return IliasElementType.MEDIACAST_VIDEO_FOLDER
if str(img_tag["src"]).endswith("icon_sahs.svg"):
return IliasElementType.SCORM_LEARNING_MODULE
return IliasElementType.FOLDER return IliasElementType.FOLDER
@staticmethod @staticmethod
def is_logged_in(soup: BeautifulSoup) -> bool: def _normalize_meeting_name(meeting_name: str) -> str:
# Normal ILIAS pages """
mainbar: Optional[Tag] = soup.find(class_="il-maincontrols-metabar") Normalizes meeting names, which have a relative time as their first part,
if mainbar is not None: to their date in ISO format.
login_button = mainbar.find(attrs={"href": lambda x: x and "login.php" in x}) """
shib_login = soup.find(id="button_shib_login")
return not login_button and not shib_login
# Personal Desktop # This checks whether we can reach a `:` without passing a `-`
if soup.find("a", attrs={"href": lambda x: x and "block_type=pditems" in x}): if re.search(r"^[^-]+: ", meeting_name):
return True # Meeting name only contains date: "05. Jan 2000:"
split_delimiter = ":"
else:
# Meeting name contains date and start/end times: "05. Jan 2000, 16:00 - 17:30:"
split_delimiter = ", "
# Empty personal desktop has zero (0) markers. Match on the text... # We have a meeting day without time
if alert := soup.select_one(".alert-info"): date_portion_str = meeting_name.split(split_delimiter)[0]
text = alert.getText().lower() date_portion = demangle_date(date_portion_str)
if "you have not yet selected any favourites" in text:
return True
if "sie haben aktuell noch keine favoriten ausgewählt" in text:
return True
# Video listing embeds do not have complete ILIAS html. Try to match them by # We failed to parse the date, bail out
# their video listing table if not date_portion:
video_table = soup.find( return meeting_name
recursive=True,
name="table",
attrs={"id": lambda x: x is not None and x.startswith("tbl_xoct")}
)
if video_table is not None:
return True
# The individual video player wrapper page has nothing of the above.
# Match it by its playerContainer.
if soup.select_one("#playerContainer") is not None:
return True
return False
def get_permalink(self) -> Optional[str]: # Replace the first section with the absolute date
return IliasPage.get_soup_permalink(self._soup) rest_of_name = split_delimiter.join(meeting_name.split(split_delimiter)[1:])
return datetime.strftime(date_portion, "%Y-%m-%d") + split_delimiter + rest_of_name
def _abs_url_from_link(self, link_tag: Tag) -> str: def _abs_url_from_link(self, link_tag: Tag) -> str:
""" """
@ -1173,13 +970,6 @@ class IliasPage:
""" """
return urljoin(self._page_url, relative_url) return urljoin(self._page_url, relative_url)
@staticmethod
def get_soup_permalink(soup: BeautifulSoup) -> Optional[str]:
perma_link_element: Tag = soup.select_one(".il-footer-permanent-url > a")
if not perma_link_element or not perma_link_element.get("href"):
return None
return perma_link_element.get("href")
def _unexpected_html_warning() -> None: def _unexpected_html_warning() -> None:
log.warn("Encountered unexpected HTML structure, ignoring element.") log.warn("Encountered unexpected HTML structure, ignoring element.")
@ -1203,7 +993,7 @@ def demangle_date(date_str: str, fail_silently: bool = False) -> Optional[dateti
date_str = re.sub("Gestern|Yesterday", _format_date_english(_yesterday()), date_str, re.I) date_str = re.sub("Gestern|Yesterday", _format_date_english(_yesterday()), date_str, re.I)
date_str = re.sub("Heute|Today", _format_date_english(date.today()), date_str, re.I) date_str = re.sub("Heute|Today", _format_date_english(date.today()), date_str, re.I)
date_str = re.sub("Morgen|Tomorrow", _format_date_english(_tomorrow()), date_str, re.I) date_str = re.sub("Morgen|Tomorrow", _format_date_english(_tomorrow()), date_str, re.I)
date_str = date_str.strip() date_str = date_str.strip()
for german, english in zip(german_months, english_months): for german, english in zip(german_months, english_months):
date_str = date_str.replace(german, english) date_str = date_str.replace(german, english)

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@ def name_variants(path: PurePath) -> Iterator[PurePath]:
class Deduplicator: class Deduplicator:
FORBIDDEN_CHARS = '<>:"/\\|?*' + "".join([chr(i) for i in range(0, 32)]) FORBIDDEN_CHARS = '<>:"/\\|?*'
FORBIDDEN_NAMES = { FORBIDDEN_NAMES = {
"CON", "PRN", "AUX", "NUL", "CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",

View File

@ -1,5 +1,5 @@
from pathlib import Path from pathlib import Path
from typing import Dict, List, Optional from typing import Dict, List, Optional, Set
from rich.markup import escape from rich.markup import escape
@ -43,16 +43,24 @@ class Pferd:
crawl_sections = [name for name, _ in config.crawl_sections()] crawl_sections = [name for name, _ in config.crawl_sections()]
crawlers_to_run = [] # With crawl: prefix crawlers_to_run = set() # With crawl: prefix
unknown_names = [] # Without crawl: prefix unknown_names = [] # Without crawl: prefix
for name in cli_crawlers: for name in cli_crawlers:
section_name = f"crawl:{name}" section_name = f"crawl:{name}"
if section_name in crawl_sections: if section_name in crawl_sections:
log.explain(f"Crawler section named {section_name!r} exists") log.explain(f"Crawler section named {section_name!r} exists")
crawlers_to_run.append(section_name) crawlers_to_run.add(section_name)
else: # interprete name as alias of a crawler
log.explain(f"There's no crawler section named {section_name!r}") alias_names = self._find_crawlers_by_alias(name, config)
if alias_names:
crawlers_to_run.update(alias_names)
log.explain_topic(f"Crawler alias {name!r} found corresponding crawler sections:")
for alias_name in alias_names:
log.explain(f"Crawler section named {alias_name!r} with alias {name!r} exists")
if not section_name in crawl_sections and not alias_names:
log.explain(f"There's neither a crawler section named {section_name!r} nor does a crawler with alias {name!r} exist.")
unknown_names.append(name) unknown_names.append(name)
if unknown_names: if unknown_names:
@ -65,6 +73,14 @@ class Pferd:
return crawlers_to_run return crawlers_to_run
def _find_crawlers_by_alias(self, alias: str, config: Config) -> Set[str]:
alias_names = set()
for (section_name, section) in config.crawl_sections():
section_aliases = section.get("aliases", [])
if alias in section_aliases:
alias_names.add(section_name)
return alias_names
def _find_crawlers_to_run( def _find_crawlers_to_run(
self, self,
config: Config, config: Config,

View File

@ -110,10 +110,6 @@ class ExactReTf(Transformation):
except ValueError: except ValueError:
pass pass
named_groups: Dict[str, str] = match.groupdict()
for name, capture in named_groups.items():
locals_dir[name] = capture
result = eval(f"f{right!r}", {}, locals_dir) result = eval(f"f{right!r}", {}, locals_dir)
return Transformed(PurePath(result)) return Transformed(PurePath(result))

View File

@ -1,2 +1,2 @@
NAME = "PFERD" NAME = "PFERD"
VERSION = "3.6.0" VERSION = "3.4.3"

View File

@ -56,17 +56,6 @@ Also, you can download most ILIAS pages directly like this:
$ pferd kit-ilias-web <url> <output_directory> $ pferd kit-ilias-web <url> <output_directory>
``` ```
PFERD supports other ILIAS instances as well, using the `ilias-web` crawler (see
the [config section on `ilias-web`](CONFIG.md#the-ilias-web-crawler) for more
detail on the `base-url` and `client-id` parameters):
```
$ pferd ilias-web \
--base-url https://ilias.my-university.example \
--client-id My_University desktop \
<output_directory>
```
However, the CLI only lets you download a single thing at a time, and the However, the CLI only lets you download a single thing at a time, and the
resulting command can grow long quite quickly. Because of this, PFERD can also resulting command can grow long quite quickly. Because of this, PFERD can also
be used with a config file. be used with a config file.

27
flake.lock generated
View File

@ -1,27 +0,0 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1708979614,
"narHash": "sha256-FWLWmYojIg6TeqxSnHkKpHu5SGnFP5um1uUjH+wRV6g=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b7ee09cf5614b02d289cd86fcfa6f24d4e078c2a",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View File

@ -1,41 +0,0 @@
{
description = "Tool for downloading course-related files from ILIAS";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11";
};
outputs = { self, nixpkgs }:
let
# Helper function to generate an attrset '{ x86_64-linux = f "x86_64-linux"; ... }'.
forAllSystems = nixpkgs.lib.genAttrs nixpkgs.lib.systems.flakeExposed;
in
{
packages = forAllSystems (system:
let pkgs = import nixpkgs { inherit system; };
in
rec {
default = pkgs.python3Packages.buildPythonApplication rec {
pname = "pferd";
# Performing black magic
# Don't worry, I sacrificed enough goats for the next few years
version = (pkgs.lib.importTOML ./PFERD/version.py).VERSION;
format = "pyproject";
src = ./.;
nativeBuildInputs = with pkgs.python3Packages; [
setuptools
];
propagatedBuildInputs = with pkgs.python3Packages; [
aiohttp
beautifulsoup4
rich
keyring
certifi
];
};
});
};
}

11
mypy.ini Normal file
View File

@ -0,0 +1,11 @@
[mypy]
disallow_any_generics = True
disallow_untyped_defs = True
disallow_incomplete_defs = True
no_implicit_optional = True
warn_unused_ignores = True
warn_unreachable = True
show_error_context = True
[mypy-rich.*,bs4,keyring]
ignore_missing_imports = True

View File

@ -1,42 +1,3 @@
[build-system] [build-system]
requires = ["setuptools", "wheel"] requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"
[project]
name = "PFERD"
dependencies = [
"aiohttp>=3.8.1",
"beautifulsoup4>=4.10.0",
"rich>=11.0.0",
"keyring>=23.5.0",
"certifi>=2021.10.8"
]
dynamic = ["version"]
requires-python = ">=3.9"
[project.scripts]
pferd = "PFERD.__main__:main"
[tool.setuptools.dynamic]
version = {attr = "PFERD.version.VERSION"}
[tool.flake8]
max-line-length = 110
[tool.isort]
line_length = 110
[tool.autopep8]
max_line_length = 110
in-place = true
recursive = true
[tool.mypy]
disallow_any_generics = true
disallow_untyped_defs = true
disallow_incomplete_defs = true
no_implicit_optional = true
warn_unused_ignores = true
warn_unreachable = true
show_error_context = true
ignore_missing_imports = true

View File

@ -1,8 +1,8 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import argparse import argparse
import re
import time import time
import re
from subprocess import run from subprocess import run

View File

@ -2,5 +2,5 @@
set -e set -e
mypy . mypy PFERD
flake8 PFERD flake8 PFERD

View File

@ -2,5 +2,5 @@
set -e set -e
autopep8 . autopep8 --recursive --in-place PFERD
isort . isort PFERD

View File

@ -13,5 +13,5 @@ pip install --upgrade setuptools
pip install --editable . pip install --editable .
# Installing tools and type hints # Installing tools and type hints
pip install --upgrade mypy flake8 flake8-pyproject autopep8 isort pyinstaller pip install --upgrade mypy flake8 autopep8 isort pyinstaller
pip install --upgrade types-chardet types-certifi pip install --upgrade types-chardet types-certifi

23
setup.cfg Normal file
View File

@ -0,0 +1,23 @@
[metadata]
name = PFERD
version = attr: PFERD.version.VERSION
[options]
packages = find:
python_requires = >=3.9
install_requires =
aiohttp>=3.8.1
beautifulsoup4>=4.10.0
rich>=11.0.0
keyring>=23.5.0
certifi>=2021.10.8
[options.entry_points]
console_scripts =
pferd = PFERD.__main__:main
[flake8]
max_line_length = 110
[isort]
line_length = 110