mirror of
https://github.com/Garmelon/PFERD.git
synced 2023-12-21 10:23:01 +01:00
Compare commits
26 Commits
update-che
...
sequential
Author | SHA1 | Date | |
---|---|---|---|
bf27f4a686 | |||
5adfdfbd2b | |||
5c3942a13d | |||
5c9209b12e | |||
50c7778d38 | |||
354a22d1e3 | |||
6f87c5c774 | |||
1ca10571f0 | |||
10e1a5e871 | |||
a2ffce4702 | |||
0294ceb7d5 | |||
6f30c6583d | |||
467fc526e8 | |||
722d2eb393 | |||
6d44aac278 | |||
55a2de6b88 | |||
c0d6d8b229 | |||
635caa765d | |||
e69b55b349 | |||
07200bbde5 | |||
c020cccc64 | |||
259cfc20cc | |||
37b51a66d8 | |||
f47d2f11d8 | |||
1b6be6bd79 | |||
e1430e6298 |
8
.github/workflows/build-and-release.yml
vendored
8
.github/workflows/build-and-release.yml
vendored
@ -17,9 +17,9 @@ jobs:
|
|||||||
python: ["3.9"]
|
python: ["3.9"]
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python }}
|
python-version: ${{ matrix.python }}
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ jobs:
|
|||||||
run: mv dist/pferd* dist/pferd-${{ matrix.os }}
|
run: mv dist/pferd* dist/pferd-${{ matrix.os }}
|
||||||
|
|
||||||
- name: Upload binary
|
- name: Upload binary
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: Binaries
|
name: Binaries
|
||||||
path: dist/pferd-${{ matrix.os }}
|
path: dist/pferd-${{ matrix.os }}
|
||||||
@ -57,7 +57,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Download binaries
|
- name: Download binaries
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: Binaries
|
name: Binaries
|
||||||
|
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,7 +2,6 @@
|
|||||||
/.venv/
|
/.venv/
|
||||||
/PFERD.egg-info/
|
/PFERD.egg-info/
|
||||||
__pycache__/
|
__pycache__/
|
||||||
/.vscode/
|
|
||||||
|
|
||||||
# pyinstaller
|
# pyinstaller
|
||||||
/pferd.spec
|
/pferd.spec
|
||||||
|
8
.vscode/settings.json
vendored
Normal file
8
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"files.insertFinalNewline": true,
|
||||||
|
"files.trimFinalNewlines": true,
|
||||||
|
"python.formatting.provider": "autopep8",
|
||||||
|
"python.linting.enabled": true,
|
||||||
|
"python.linting.flake8Enabled": true,
|
||||||
|
"python.linting.mypyEnabled": true,
|
||||||
|
}
|
28
CHANGELOG.md
28
CHANGELOG.md
@ -23,8 +23,34 @@ ambiguous situations.
|
|||||||
## Unreleased
|
## Unreleased
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Forum crawling crashing when parsing empty (= 0 messages) threads
|
- Crawling of courses with the timeline view as the default tab
|
||||||
|
- Crawling of file and custom opencast cards
|
||||||
|
- Crawling of button cards without descriptions
|
||||||
|
|
||||||
|
## 3.4.3 - 2022-11-29
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Missing documentation for `forums` option
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Clear up error message shown when multiple paths are found to an element
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- IPD crawler unnecessarily appending trailing slashes
|
||||||
|
- Crawling opencast when ILIAS is set to English
|
||||||
|
|
||||||
|
## 3.4.2 - 2022-10-26
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Recognize and crawl content pages in cards
|
||||||
|
- Recognize and ignore surveys
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Forum crawling crashing when a thread has no messages at all
|
||||||
- Forum crawling crashing when a forum has no threads at all
|
- Forum crawling crashing when a forum has no threads at all
|
||||||
|
- Ilias login failing in some cases
|
||||||
|
- Crawling of paginated future meetings
|
||||||
|
- IPD crawler handling of URLs without trailing slash
|
||||||
|
|
||||||
## 3.4.1 - 2022-08-17
|
## 3.4.1 - 2022-08-17
|
||||||
|
|
||||||
|
@ -181,6 +181,7 @@ script once per day should be fine.
|
|||||||
redirect to the actual URL. Set to a negative value to disable the automatic
|
redirect to the actual URL. Set to a negative value to disable the automatic
|
||||||
redirect. (Default: `-1`)
|
redirect. (Default: `-1`)
|
||||||
- `videos`: Whether to download videos. (Default: `no`)
|
- `videos`: Whether to download videos. (Default: `no`)
|
||||||
|
- `forums`: Whether to download forum threads. (Default: `no`)
|
||||||
- `http_timeout`: The timeout (in seconds) for all HTTP requests. (Default:
|
- `http_timeout`: The timeout (in seconds) for all HTTP requests. (Default:
|
||||||
`20.0`)
|
`20.0`)
|
||||||
|
|
||||||
@ -289,7 +290,7 @@ path matches `SOURCE`, it is renamed to `TARGET`.
|
|||||||
Example: `foo/bar --> baz`
|
Example: `foo/bar --> baz`
|
||||||
- Doesn't match `foo`, `a/foo/bar` or `foo/baz`
|
- Doesn't match `foo`, `a/foo/bar` or `foo/baz`
|
||||||
- Converts `foo/bar` into `baz`
|
- Converts `foo/bar` into `baz`
|
||||||
- Converts `foo/bar/wargl` into `bar/wargl`
|
- Converts `foo/bar/wargl` into `baz/wargl`
|
||||||
|
|
||||||
Example: `foo/bar --> !`
|
Example: `foo/bar --> !`
|
||||||
- Doesn't match `foo`, `a/foo/bar` or `foo/baz`
|
- Doesn't match `foo`, `a/foo/bar` or `foo/baz`
|
||||||
|
@ -5,8 +5,6 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from PFERD.update import check_for_updates
|
|
||||||
|
|
||||||
from .auth import AuthLoadError
|
from .auth import AuthLoadError
|
||||||
from .cli import PARSER, ParserLoadError, load_default_section
|
from .cli import PARSER, ParserLoadError, load_default_section
|
||||||
from .config import Config, ConfigDumpError, ConfigLoadError, ConfigOptionError
|
from .config import Config, ConfigDumpError, ConfigLoadError, ConfigOptionError
|
||||||
@ -136,11 +134,6 @@ def main() -> None:
|
|||||||
loop.run_until_complete(asyncio.sleep(1))
|
loop.run_until_complete(asyncio.sleep(1))
|
||||||
loop.close()
|
loop.close()
|
||||||
else:
|
else:
|
||||||
log.explain_topic("Checking for updates")
|
|
||||||
if not args.skip_update_check:
|
|
||||||
asyncio.run(check_for_updates())
|
|
||||||
else:
|
|
||||||
log.explain("Update check skipped due to configuration option")
|
|
||||||
asyncio.run(pferd.run(args.debug_transforms))
|
asyncio.run(pferd.run(args.debug_transforms))
|
||||||
except (ConfigOptionError, AuthLoadError) as e:
|
except (ConfigOptionError, AuthLoadError) as e:
|
||||||
log.unlock()
|
log.unlock()
|
||||||
|
@ -151,11 +151,6 @@ PARSER.add_argument(
|
|||||||
action="version",
|
action="version",
|
||||||
version=f"{NAME} {VERSION} (https://github.com/Garmelon/PFERD)",
|
version=f"{NAME} {VERSION} (https://github.com/Garmelon/PFERD)",
|
||||||
)
|
)
|
||||||
PARSER.add_argument(
|
|
||||||
"--skip-update-check",
|
|
||||||
action="store_true",
|
|
||||||
help="disable automatic update checks at startup"
|
|
||||||
)
|
|
||||||
PARSER.add_argument(
|
PARSER.add_argument(
|
||||||
"--config", "-c",
|
"--config", "-c",
|
||||||
type=Path,
|
type=Path,
|
||||||
|
@ -9,7 +9,6 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Ty
|
|||||||
from ..auth import Authenticator
|
from ..auth import Authenticator
|
||||||
from ..config import Config, Section
|
from ..config import Config, Section
|
||||||
from ..deduplicator import Deduplicator
|
from ..deduplicator import Deduplicator
|
||||||
from ..limiter import Limiter
|
|
||||||
from ..logging import ProgressBar, log
|
from ..logging import ProgressBar, log
|
||||||
from ..output_dir import FileSink, FileSinkToken, OnConflict, OutputDirectory, OutputDirError, Redownload
|
from ..output_dir import FileSink, FileSinkToken, OnConflict, OutputDirectory, OutputDirError, Redownload
|
||||||
from ..report import MarkConflictError, MarkDuplicateError, Report
|
from ..report import MarkConflictError, MarkDuplicateError, Report
|
||||||
@ -98,10 +97,9 @@ def anoncritical(f: AWrapped) -> AWrapped:
|
|||||||
|
|
||||||
|
|
||||||
class CrawlToken(ReusableAsyncContextManager[ProgressBar]):
|
class CrawlToken(ReusableAsyncContextManager[ProgressBar]):
|
||||||
def __init__(self, limiter: Limiter, path: PurePath):
|
def __init__(self, path: PurePath):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self._limiter = limiter
|
|
||||||
self._path = path
|
self._path = path
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -110,17 +108,15 @@ class CrawlToken(ReusableAsyncContextManager[ProgressBar]):
|
|||||||
|
|
||||||
async def _on_aenter(self) -> ProgressBar:
|
async def _on_aenter(self) -> ProgressBar:
|
||||||
self._stack.callback(lambda: log.status("[bold cyan]", "Crawled", fmt_path(self._path)))
|
self._stack.callback(lambda: log.status("[bold cyan]", "Crawled", fmt_path(self._path)))
|
||||||
await self._stack.enter_async_context(self._limiter.limit_crawl())
|
|
||||||
bar = self._stack.enter_context(log.crawl_bar("[bold bright_cyan]", "Crawling", fmt_path(self._path)))
|
bar = self._stack.enter_context(log.crawl_bar("[bold bright_cyan]", "Crawling", fmt_path(self._path)))
|
||||||
|
|
||||||
return bar
|
return bar
|
||||||
|
|
||||||
|
|
||||||
class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]):
|
class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]):
|
||||||
def __init__(self, limiter: Limiter, fs_token: FileSinkToken, path: PurePath):
|
def __init__(self, fs_token: FileSinkToken, path: PurePath):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self._limiter = limiter
|
|
||||||
self._fs_token = fs_token
|
self._fs_token = fs_token
|
||||||
self._path = path
|
self._path = path
|
||||||
|
|
||||||
@ -129,7 +125,6 @@ class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]):
|
|||||||
return self._path
|
return self._path
|
||||||
|
|
||||||
async def _on_aenter(self) -> Tuple[ProgressBar, FileSink]:
|
async def _on_aenter(self) -> Tuple[ProgressBar, FileSink]:
|
||||||
await self._stack.enter_async_context(self._limiter.limit_download())
|
|
||||||
sink = await self._stack.enter_async_context(self._fs_token)
|
sink = await self._stack.enter_async_context(self._fs_token)
|
||||||
# The "Downloaded ..." message is printed in the output dir, not here
|
# The "Downloaded ..." message is printed in the output dir, not here
|
||||||
bar = self._stack.enter_context(log.download_bar("[bold bright_cyan]", "Downloading",
|
bar = self._stack.enter_context(log.download_bar("[bold bright_cyan]", "Downloading",
|
||||||
@ -235,12 +230,6 @@ class Crawler(ABC):
|
|||||||
self.name = name
|
self.name = name
|
||||||
self.error_free = True
|
self.error_free = True
|
||||||
|
|
||||||
self._limiter = Limiter(
|
|
||||||
task_limit=section.tasks(),
|
|
||||||
download_limit=section.downloads(),
|
|
||||||
task_delay=section.task_delay(),
|
|
||||||
)
|
|
||||||
|
|
||||||
self._deduplicator = Deduplicator(section.windows_paths())
|
self._deduplicator = Deduplicator(section.windows_paths())
|
||||||
self._transformer = Transformer(section.transform())
|
self._transformer = Transformer(section.transform())
|
||||||
|
|
||||||
@ -288,7 +277,7 @@ class Crawler(ABC):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
log.explain("Answer: Yes")
|
log.explain("Answer: Yes")
|
||||||
return CrawlToken(self._limiter, path)
|
return CrawlToken(path)
|
||||||
|
|
||||||
async def download(
|
async def download(
|
||||||
self,
|
self,
|
||||||
@ -313,7 +302,7 @@ class Crawler(ABC):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
log.explain("Answer: Yes")
|
log.explain("Answer: Yes")
|
||||||
return DownloadToken(self._limiter, fs_token, path)
|
return DownloadToken(fs_token, path)
|
||||||
|
|
||||||
async def _cleanup(self) -> None:
|
async def _cleanup(self) -> None:
|
||||||
log.explain_topic("Decision: Clean up files")
|
log.explain_topic("Decision: Clean up files")
|
||||||
|
@ -1,12 +1,9 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import http.cookies
|
from http.cookiejar import LWPCookieJar
|
||||||
import ssl
|
|
||||||
from pathlib import Path, PurePath
|
from pathlib import Path, PurePath
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
import aiohttp
|
import requests
|
||||||
import certifi
|
|
||||||
from aiohttp.client import ClientTimeout
|
|
||||||
|
|
||||||
from ..auth import Authenticator
|
from ..auth import Authenticator
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
@ -35,9 +32,9 @@ class HttpCrawler(Crawler):
|
|||||||
|
|
||||||
self._authentication_id = 0
|
self._authentication_id = 0
|
||||||
self._authentication_lock = asyncio.Lock()
|
self._authentication_lock = asyncio.Lock()
|
||||||
self._request_count = 0
|
self._http_timeout = section.http_timeout() # TODO Use or remove
|
||||||
self._http_timeout = section.http_timeout()
|
|
||||||
|
|
||||||
|
self._cookie_jar = LWPCookieJar()
|
||||||
self._cookie_jar_path = self._output_dir.resolve(self.COOKIE_FILE)
|
self._cookie_jar_path = self._output_dir.resolve(self.COOKIE_FILE)
|
||||||
self._shared_cookie_jar_paths: Optional[List[Path]] = None
|
self._shared_cookie_jar_paths: Optional[List[Path]] = None
|
||||||
self._shared_auth = shared_auth
|
self._shared_auth = shared_auth
|
||||||
@ -57,7 +54,6 @@ class HttpCrawler(Crawler):
|
|||||||
# This should reduce the amount of requests we make: If an authentication is in progress
|
# This should reduce the amount of requests we make: If an authentication is in progress
|
||||||
# all future requests wait for authentication to complete.
|
# all future requests wait for authentication to complete.
|
||||||
async with self._authentication_lock:
|
async with self._authentication_lock:
|
||||||
self._request_count += 1
|
|
||||||
return self._authentication_id
|
return self._authentication_id
|
||||||
|
|
||||||
async def authenticate(self, caller_auth_id: int) -> None:
|
async def authenticate(self, caller_auth_id: int) -> None:
|
||||||
@ -106,32 +102,13 @@ class HttpCrawler(Crawler):
|
|||||||
|
|
||||||
self._shared_cookie_jar_paths.append(self._cookie_jar_path)
|
self._shared_cookie_jar_paths.append(self._cookie_jar_path)
|
||||||
|
|
||||||
def _load_cookies_from_file(self, path: Path) -> None:
|
|
||||||
jar: Any = http.cookies.SimpleCookie()
|
|
||||||
with open(path, encoding="utf-8") as f:
|
|
||||||
for i, line in enumerate(f):
|
|
||||||
# Names of headers are case insensitive
|
|
||||||
if line[:11].lower() == "set-cookie:":
|
|
||||||
jar.load(line[11:])
|
|
||||||
else:
|
|
||||||
log.explain(f"Line {i} doesn't start with 'Set-Cookie:', ignoring it")
|
|
||||||
self._cookie_jar.update_cookies(jar)
|
|
||||||
|
|
||||||
def _save_cookies_to_file(self, path: Path) -> None:
|
|
||||||
jar: Any = http.cookies.SimpleCookie()
|
|
||||||
for morsel in self._cookie_jar:
|
|
||||||
jar[morsel.key] = morsel
|
|
||||||
with open(path, "w", encoding="utf-8") as f:
|
|
||||||
f.write(jar.output(sep="\n"))
|
|
||||||
f.write("\n") # A trailing newline is just common courtesy
|
|
||||||
|
|
||||||
def _load_cookies(self) -> None:
|
def _load_cookies(self) -> None:
|
||||||
log.explain_topic("Loading cookies")
|
log.explain_topic("Loading cookies")
|
||||||
|
|
||||||
cookie_jar_path: Optional[Path] = None
|
cookie_jar_path: Optional[Path] = None
|
||||||
|
|
||||||
if self._shared_cookie_jar_paths is None:
|
if self._shared_cookie_jar_paths is None:
|
||||||
log.explain("Not sharing any cookies")
|
log.explain("Not sharing cookies")
|
||||||
cookie_jar_path = self._cookie_jar_path
|
cookie_jar_path = self._cookie_jar_path
|
||||||
else:
|
else:
|
||||||
log.explain("Sharing cookies")
|
log.explain("Sharing cookies")
|
||||||
@ -154,46 +131,38 @@ class HttpCrawler(Crawler):
|
|||||||
|
|
||||||
log.explain(f"Loading cookies from {fmt_real_path(cookie_jar_path)}")
|
log.explain(f"Loading cookies from {fmt_real_path(cookie_jar_path)}")
|
||||||
try:
|
try:
|
||||||
self._load_cookies_from_file(cookie_jar_path)
|
self._cookie_jar.load(filename=str(cookie_jar_path))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.explain("Failed to load cookies")
|
log.explain(f"Failed to load cookies: {e}")
|
||||||
log.explain(str(e))
|
log.explain("Proceeding without cookies")
|
||||||
|
|
||||||
def _save_cookies(self) -> None:
|
def _save_cookies(self) -> None:
|
||||||
log.explain_topic("Saving cookies")
|
log.explain_topic("Saving cookies")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
log.explain(f"Saving cookies to {fmt_real_path(self._cookie_jar_path)}")
|
log.explain(f"Saving cookies to {fmt_real_path(self._cookie_jar_path)}")
|
||||||
self._save_cookies_to_file(self._cookie_jar_path)
|
self._cookie_jar.save(filename=str(self._cookie_jar_path))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.warn(f"Failed to save cookies to {fmt_real_path(self._cookie_jar_path)}")
|
log.warn(f"Failed to save cookies: {e}")
|
||||||
log.warn(str(e))
|
|
||||||
|
|
||||||
async def run(self) -> None:
|
async def run(self) -> None:
|
||||||
self._request_count = 0
|
self._request_count = 0
|
||||||
self._cookie_jar = aiohttp.CookieJar()
|
|
||||||
self._load_cookies()
|
self._load_cookies()
|
||||||
|
|
||||||
async with aiohttp.ClientSession(
|
self.session = requests.Session()
|
||||||
headers={"User-Agent": f"{NAME}/{VERSION}"},
|
self.session.headers["User-Agent"] = f"{NAME}/{VERSION}"
|
||||||
cookie_jar=self._cookie_jar,
|
|
||||||
connector=aiohttp.TCPConnector(ssl=ssl.create_default_context(cafile=certifi.where())),
|
# From the request docs: "All requests code should work out of the box
|
||||||
timeout=ClientTimeout(
|
# with externally provided instances of CookieJar, e.g. LWPCookieJar and
|
||||||
# 30 minutes. No download in the history of downloads was longer than 30 minutes.
|
# FileCookieJar."
|
||||||
# This is enough to transfer a 600 MB file over a 3 Mib/s connection.
|
# https://requests.readthedocs.io/en/latest/api/#requests.cookies.RequestsCookieJar
|
||||||
# Allowing an arbitrary value could be annoying for overnight batch jobs
|
self.session.cookies = self._cookie_jar # type: ignore
|
||||||
total=15 * 60,
|
|
||||||
connect=self._http_timeout,
|
with self.session:
|
||||||
sock_connect=self._http_timeout,
|
|
||||||
sock_read=self._http_timeout,
|
|
||||||
)
|
|
||||||
) as session:
|
|
||||||
self.session = session
|
|
||||||
try:
|
try:
|
||||||
await super().run()
|
await super().run()
|
||||||
finally:
|
finally:
|
||||||
del self.session
|
del self.session
|
||||||
log.explain_topic(f"Total amount of HTTP requests: {self._request_count}")
|
|
||||||
|
|
||||||
# They are saved in authenticate, but a final save won't hurt
|
# They are saved in authenticate, but a final save won't hurt
|
||||||
self._save_cookies()
|
self._save_cookies()
|
||||||
|
@ -24,6 +24,7 @@ class IliasElementType(Enum):
|
|||||||
LINK = "link"
|
LINK = "link"
|
||||||
BOOKING = "booking"
|
BOOKING = "booking"
|
||||||
MEETING = "meeting"
|
MEETING = "meeting"
|
||||||
|
SURVEY = "survey"
|
||||||
VIDEO = "video"
|
VIDEO = "video"
|
||||||
VIDEO_PLAYER = "video_player"
|
VIDEO_PLAYER = "video_player"
|
||||||
VIDEO_FOLDER = "video_folder"
|
VIDEO_FOLDER = "video_folder"
|
||||||
@ -133,7 +134,7 @@ class IliasPage:
|
|||||||
|
|
||||||
thread_ids = [f["value"] for f in form.find_all(attrs={"name": "thread_ids[]"})]
|
thread_ids = [f["value"] for f in form.find_all(attrs={"name": "thread_ids[]"})]
|
||||||
|
|
||||||
form_data: Dict[str, Union[str, List[ſtr]]] = {
|
form_data: Dict[str, Union[str, List[str]]] = {
|
||||||
"thread_ids[]": thread_ids,
|
"thread_ids[]": thread_ids,
|
||||||
"selected_cmd2": "html",
|
"selected_cmd2": "html",
|
||||||
"select_cmd2": "Ausführen",
|
"select_cmd2": "Ausführen",
|
||||||
@ -157,6 +158,8 @@ class IliasPage:
|
|||||||
if self._contains_collapsed_future_meetings():
|
if self._contains_collapsed_future_meetings():
|
||||||
log.explain("Requesting *all* future meetings")
|
log.explain("Requesting *all* future meetings")
|
||||||
return self._uncollapse_future_meetings_url()
|
return self._uncollapse_future_meetings_url()
|
||||||
|
if not self._is_content_tab_selected():
|
||||||
|
return self._select_content_page_url()
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _is_forum_page(self) -> bool:
|
def _is_forum_page(self) -> bool:
|
||||||
@ -219,6 +222,27 @@ class IliasPage:
|
|||||||
link = self._abs_url_from_link(element)
|
link = self._abs_url_from_link(element)
|
||||||
return IliasPageElement(IliasElementType.FOLDER, link, "show all meetings")
|
return IliasPageElement(IliasElementType.FOLDER, link, "show all meetings")
|
||||||
|
|
||||||
|
def _is_content_tab_selected(self) -> bool:
|
||||||
|
return self._select_content_page_url() is None
|
||||||
|
|
||||||
|
def _select_content_page_url(self) -> Optional[IliasPageElement]:
|
||||||
|
tab = self._soup.find(
|
||||||
|
id="tab_view_content",
|
||||||
|
attrs={"class": lambda x: x is not None and "active" not in x}
|
||||||
|
)
|
||||||
|
# Already selected (or not found)
|
||||||
|
if not tab:
|
||||||
|
return None
|
||||||
|
link = tab.find("a")
|
||||||
|
if link:
|
||||||
|
link = self._abs_url_from_link(link)
|
||||||
|
return IliasPageElement(IliasElementType.FOLDER, link, "select content page")
|
||||||
|
|
||||||
|
_unexpected_html_warning()
|
||||||
|
log.warn_contd(f"Could not find content tab URL on {self._page_url!r}.")
|
||||||
|
log.warn_contd("PFERD might not find content on the course's main page.")
|
||||||
|
return None
|
||||||
|
|
||||||
def _player_to_video(self) -> List[IliasPageElement]:
|
def _player_to_video(self) -> List[IliasPageElement]:
|
||||||
# Fetch the actual video page. This is a small wrapper page initializing a javscript
|
# Fetch the actual video page. This is a small wrapper page initializing a javscript
|
||||||
# player. Sadly we can not execute that JS. The actual video stream url is nowhere
|
# player. Sadly we can not execute that JS. The actual video stream url is nowhere
|
||||||
@ -365,7 +389,7 @@ class IliasPage:
|
|||||||
"""
|
"""
|
||||||
# Video start links are marked with an "Abspielen" link
|
# Video start links are marked with an "Abspielen" link
|
||||||
video_links: List[Tag] = self._soup.findAll(
|
video_links: List[Tag] = self._soup.findAll(
|
||||||
name="a", text=re.compile(r"\s*Abspielen\s*")
|
name="a", text=re.compile(r"\s*(Abspielen|Play)\s*")
|
||||||
)
|
)
|
||||||
|
|
||||||
results: List[IliasPageElement] = []
|
results: List[IliasPageElement] = []
|
||||||
@ -684,7 +708,11 @@ class IliasPage:
|
|||||||
"div",
|
"div",
|
||||||
attrs={"class": lambda x: x and "caption" in x},
|
attrs={"class": lambda x: x and "caption" in x},
|
||||||
)
|
)
|
||||||
description = caption_parent.find_next_sibling("div").getText().strip()
|
caption_container = caption_parent.find_next_sibling("div")
|
||||||
|
if caption_container:
|
||||||
|
description = caption_container.getText().strip()
|
||||||
|
else:
|
||||||
|
description = None
|
||||||
|
|
||||||
if not type:
|
if not type:
|
||||||
_unexpected_html_warning()
|
_unexpected_html_warning()
|
||||||
@ -714,7 +742,7 @@ class IliasPage:
|
|||||||
|
|
||||||
icon: Tag = card_root.select_one(".il-card-repository-head .icon")
|
icon: Tag = card_root.select_one(".il-card-repository-head .icon")
|
||||||
|
|
||||||
if "opencast" in icon["class"]:
|
if "opencast" in icon["class"] or "xoct" in icon["class"]:
|
||||||
return IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED
|
return IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED
|
||||||
if "exc" in icon["class"]:
|
if "exc" in icon["class"]:
|
||||||
return IliasElementType.EXERCISE
|
return IliasElementType.EXERCISE
|
||||||
@ -730,6 +758,12 @@ class IliasPage:
|
|||||||
return IliasElementType.TEST
|
return IliasElementType.TEST
|
||||||
if "fold" in icon["class"]:
|
if "fold" in icon["class"]:
|
||||||
return IliasElementType.FOLDER
|
return IliasElementType.FOLDER
|
||||||
|
if "copa" in icon["class"]:
|
||||||
|
return IliasElementType.FOLDER
|
||||||
|
if "svy" in icon["class"]:
|
||||||
|
return IliasElementType.SURVEY
|
||||||
|
if "file" in icon["class"]:
|
||||||
|
return IliasElementType.FILE
|
||||||
|
|
||||||
_unexpected_html_warning()
|
_unexpected_html_warning()
|
||||||
log.warn_contd(f"Could not extract type from {icon} for card title {card_title}")
|
log.warn_contd(f"Could not extract type from {icon} for card title {card_title}")
|
||||||
|
@ -126,13 +126,6 @@ def _iorepeat(attempts: int, name: str, failure_is_error: bool = False) -> Calla
|
|||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
def _wrap_io_in_warning(name: str) -> Callable[[AWrapped], AWrapped]:
|
|
||||||
"""
|
|
||||||
Wraps any I/O exception in a CrawlWarning.
|
|
||||||
"""
|
|
||||||
return _iorepeat(1, name)
|
|
||||||
|
|
||||||
|
|
||||||
# Crawler control flow:
|
# Crawler control flow:
|
||||||
#
|
#
|
||||||
# crawl_desktop -+
|
# crawl_desktop -+
|
||||||
@ -194,7 +187,7 @@ instance's greatest bottleneck.
|
|||||||
self._links = section.links()
|
self._links = section.links()
|
||||||
self._videos = section.videos()
|
self._videos = section.videos()
|
||||||
self._forums = section.forums()
|
self._forums = section.forums()
|
||||||
self._visited_urls: Set[str] = set()
|
self._visited_urls: Dict[str, PurePath] = dict()
|
||||||
|
|
||||||
async def _run(self) -> None:
|
async def _run(self) -> None:
|
||||||
if isinstance(self._target, int):
|
if isinstance(self._target, int):
|
||||||
@ -226,80 +219,22 @@ instance's greatest bottleneck.
|
|||||||
return
|
return
|
||||||
cl = maybe_cl # Not mypy's fault, but explained here: https://github.com/python/mypy/issues/2608
|
cl = maybe_cl # Not mypy's fault, but explained here: https://github.com/python/mypy/issues/2608
|
||||||
|
|
||||||
elements: List[IliasPageElement] = []
|
def ensure_is_valid_course_id(parent: Optional[IliasPageElement], soup: BeautifulSoup) -> None:
|
||||||
# A list as variable redefinitions are not propagated to outer scopes
|
if parent is None and expected_id is not None:
|
||||||
description: List[BeautifulSoup] = []
|
|
||||||
|
|
||||||
@_iorepeat(3, "crawling url")
|
|
||||||
async def gather_elements() -> None:
|
|
||||||
elements.clear()
|
|
||||||
async with cl:
|
|
||||||
next_stage_url: Optional[str] = url
|
|
||||||
current_parent = None
|
|
||||||
|
|
||||||
# Duplicated code, but the root page is special - we want to avoid fetching it twice!
|
|
||||||
while next_stage_url:
|
|
||||||
soup = await self._get_page(next_stage_url)
|
|
||||||
|
|
||||||
if current_parent is None and expected_id is not None:
|
|
||||||
perma_link_element: Tag = soup.find(id="current_perma_link")
|
perma_link_element: Tag = soup.find(id="current_perma_link")
|
||||||
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
|
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
|
||||||
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
|
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
|
||||||
|
|
||||||
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
|
await self._crawl_ilias_page(url, None, cl, ensure_is_valid_course_id)
|
||||||
log.explain(f"URL: {next_stage_url}")
|
|
||||||
page = IliasPage(soup, next_stage_url, current_parent)
|
|
||||||
if next_element := page.get_next_stage_element():
|
|
||||||
current_parent = next_element
|
|
||||||
next_stage_url = next_element.url
|
|
||||||
else:
|
|
||||||
next_stage_url = None
|
|
||||||
|
|
||||||
elements.extend(page.get_child_elements())
|
|
||||||
if description_string := page.get_description():
|
|
||||||
description.append(description_string)
|
|
||||||
|
|
||||||
# Fill up our task list with the found elements
|
|
||||||
await gather_elements()
|
|
||||||
|
|
||||||
if description:
|
|
||||||
await self._download_description(PurePath("."), description[0])
|
|
||||||
|
|
||||||
elements.sort(key=lambda e: e.id())
|
|
||||||
|
|
||||||
tasks: List[Awaitable[None]] = []
|
|
||||||
for element in elements:
|
|
||||||
if handle := await self._handle_ilias_element(PurePath("."), element):
|
|
||||||
tasks.append(asyncio.create_task(handle))
|
|
||||||
|
|
||||||
# And execute them
|
|
||||||
await self.gather(tasks)
|
|
||||||
|
|
||||||
async def _handle_ilias_page(
|
|
||||||
self,
|
|
||||||
url: str,
|
|
||||||
parent: IliasPageElement,
|
|
||||||
path: PurePath,
|
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
|
||||||
maybe_cl = await self.crawl(path)
|
|
||||||
if not maybe_cl:
|
|
||||||
return None
|
|
||||||
return self._crawl_ilias_page(url, parent, maybe_cl)
|
|
||||||
|
|
||||||
@anoncritical
|
@anoncritical
|
||||||
async def _crawl_ilias_page(
|
async def _crawl_ilias_page(
|
||||||
self,
|
self,
|
||||||
url: str,
|
url: str,
|
||||||
parent: IliasPageElement,
|
parent: Optional[IliasPageElement],
|
||||||
cl: CrawlToken,
|
cl: CrawlToken,
|
||||||
|
next_stage_hook: Callable[[Optional[IliasPageElement], BeautifulSoup], None] = lambda a, b: None
|
||||||
) -> None:
|
) -> None:
|
||||||
elements: List[IliasPageElement] = []
|
|
||||||
# A list as variable redefinitions are not propagated to outer scopes
|
|
||||||
description: List[BeautifulSoup] = []
|
|
||||||
|
|
||||||
@_iorepeat(3, "crawling folder")
|
|
||||||
async def gather_elements() -> None:
|
|
||||||
elements.clear()
|
|
||||||
async with cl:
|
async with cl:
|
||||||
next_stage_url: Optional[str] = url
|
next_stage_url: Optional[str] = url
|
||||||
current_parent = parent
|
current_parent = parent
|
||||||
@ -308,6 +243,9 @@ instance's greatest bottleneck.
|
|||||||
soup = await self._get_page(next_stage_url)
|
soup = await self._get_page(next_stage_url)
|
||||||
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
|
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
|
||||||
log.explain(f"URL: {next_stage_url}")
|
log.explain(f"URL: {next_stage_url}")
|
||||||
|
|
||||||
|
next_stage_hook(current_parent, soup)
|
||||||
|
|
||||||
page = IliasPage(soup, next_stage_url, current_parent)
|
page = IliasPage(soup, next_stage_url, current_parent)
|
||||||
if next_element := page.get_next_stage_element():
|
if next_element := page.get_next_stage_element():
|
||||||
current_parent = next_element
|
current_parent = next_element
|
||||||
@ -315,25 +253,11 @@ instance's greatest bottleneck.
|
|||||||
else:
|
else:
|
||||||
next_stage_url = None
|
next_stage_url = None
|
||||||
|
|
||||||
elements.extend(page.get_child_elements())
|
for element in sorted(page.get_child_elements(), key=lambda e: e.id()):
|
||||||
|
await self._handle_ilias_element(cl.path, element)
|
||||||
|
|
||||||
if description_string := page.get_description():
|
if description_string := page.get_description():
|
||||||
description.append(description_string)
|
await self._download_description(cl.path, description_string)
|
||||||
|
|
||||||
# Fill up our task list with the found elements
|
|
||||||
await gather_elements()
|
|
||||||
|
|
||||||
if description:
|
|
||||||
await self._download_description(cl.path, description[0])
|
|
||||||
|
|
||||||
elements.sort(key=lambda e: e.id())
|
|
||||||
|
|
||||||
tasks: List[Awaitable[None]] = []
|
|
||||||
for element in elements:
|
|
||||||
if handle := await self._handle_ilias_element(cl.path, element):
|
|
||||||
tasks.append(asyncio.create_task(handle))
|
|
||||||
|
|
||||||
# And execute them
|
|
||||||
await self.gather(tasks)
|
|
||||||
|
|
||||||
# These decorators only apply *to this method* and *NOT* to the returned
|
# These decorators only apply *to this method* and *NOT* to the returned
|
||||||
# awaitables!
|
# awaitables!
|
||||||
@ -345,12 +269,14 @@ instance's greatest bottleneck.
|
|||||||
self,
|
self,
|
||||||
parent_path: PurePath,
|
parent_path: PurePath,
|
||||||
element: IliasPageElement,
|
element: IliasPageElement,
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
) -> None:
|
||||||
if element.url in self._visited_urls:
|
if element.url in self._visited_urls:
|
||||||
raise CrawlWarning(
|
raise CrawlWarning(
|
||||||
f"Found second path to element {element.name!r} at {element.url!r}. Aborting subpath"
|
f"Found second path to element {element.name!r} at {element.url!r}. "
|
||||||
|
+ f"First path: {fmt_path(self._visited_urls[element.url])}. "
|
||||||
|
+ f"Second path: {fmt_path(parent_path)}."
|
||||||
)
|
)
|
||||||
self._visited_urls.add(element.url)
|
self._visited_urls[element.url] = parent_path
|
||||||
|
|
||||||
element_path = PurePath(parent_path, element.name)
|
element_path = PurePath(parent_path, element.name)
|
||||||
|
|
||||||
@ -365,7 +291,7 @@ instance's greatest bottleneck.
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
if element.type == IliasElementType.FILE:
|
if element.type == IliasElementType.FILE:
|
||||||
return await self._handle_file(element, element_path)
|
await self._handle_file(element, element_path)
|
||||||
elif element.type == IliasElementType.FORUM:
|
elif element.type == IliasElementType.FORUM:
|
||||||
if not self._forums:
|
if not self._forums:
|
||||||
log.status(
|
log.status(
|
||||||
@ -375,22 +301,36 @@ instance's greatest bottleneck.
|
|||||||
"[bright_black](enable with option 'forums')"
|
"[bright_black](enable with option 'forums')"
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
return await self._handle_forum(element, element_path)
|
await self._handle_forum(element, element_path)
|
||||||
elif element.type == IliasElementType.TEST:
|
elif element.type == IliasElementType.TEST:
|
||||||
log.explain_topic(f"Decision: Crawl {fmt_path(element_path)}")
|
log.status(
|
||||||
log.explain("Tests contain no relevant files")
|
"[bold bright_black]",
|
||||||
log.explain("Answer: No")
|
"Ignored",
|
||||||
|
fmt_path(element_path),
|
||||||
|
"[bright_black](tests contain no relevant data)"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
elif element.type == IliasElementType.SURVEY:
|
||||||
|
log.status(
|
||||||
|
"[bold bright_black]",
|
||||||
|
"Ignored",
|
||||||
|
fmt_path(element_path),
|
||||||
|
"[bright_black](surveys contain no relevant data)"
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
elif element.type == IliasElementType.LINK:
|
elif element.type == IliasElementType.LINK:
|
||||||
return await self._handle_link(element, element_path)
|
await self._handle_link(element, element_path)
|
||||||
elif element.type == IliasElementType.BOOKING:
|
elif element.type == IliasElementType.BOOKING:
|
||||||
return await self._handle_booking(element, element_path)
|
await self._handle_booking(element, element_path)
|
||||||
elif element.type == IliasElementType.VIDEO:
|
elif element.type == IliasElementType.VIDEO:
|
||||||
return await self._handle_file(element, element_path)
|
await self._handle_file(element, element_path)
|
||||||
elif element.type == IliasElementType.VIDEO_PLAYER:
|
elif element.type == IliasElementType.VIDEO_PLAYER:
|
||||||
return await self._handle_video(element, element_path)
|
await self._handle_video(element, element_path)
|
||||||
elif element.type in _DIRECTORY_PAGES:
|
elif element.type in _DIRECTORY_PAGES:
|
||||||
return await self._handle_ilias_page(element.url, element, element_path)
|
maybe_cl = await self.crawl(element_path)
|
||||||
|
if not maybe_cl:
|
||||||
|
return None
|
||||||
|
await self._crawl_ilias_page(element.url, element, maybe_cl)
|
||||||
else:
|
else:
|
||||||
# This will retry it a few times, failing everytime. It doesn't make any network
|
# This will retry it a few times, failing everytime. It doesn't make any network
|
||||||
# requests, so that's fine.
|
# requests, so that's fine.
|
||||||
@ -400,7 +340,7 @@ instance's greatest bottleneck.
|
|||||||
self,
|
self,
|
||||||
element: IliasPageElement,
|
element: IliasPageElement,
|
||||||
element_path: PurePath,
|
element_path: PurePath,
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
) -> None:
|
||||||
log.explain_topic(f"Decision: Crawl Link {fmt_path(element_path)}")
|
log.explain_topic(f"Decision: Crawl Link {fmt_path(element_path)}")
|
||||||
log.explain(f"Links type is {self._links}")
|
log.explain(f"Links type is {self._links}")
|
||||||
|
|
||||||
@ -417,7 +357,7 @@ instance's greatest bottleneck.
|
|||||||
if not maybe_dl:
|
if not maybe_dl:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return self._download_link(element, link_template_maybe, maybe_dl)
|
await self._download_link(element, link_template_maybe, maybe_dl)
|
||||||
|
|
||||||
@anoncritical
|
@anoncritical
|
||||||
@_iorepeat(3, "resolving link")
|
@_iorepeat(3, "resolving link")
|
||||||
@ -509,7 +449,7 @@ instance's greatest bottleneck.
|
|||||||
self,
|
self,
|
||||||
element: IliasPageElement,
|
element: IliasPageElement,
|
||||||
element_path: PurePath,
|
element_path: PurePath,
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
) -> None:
|
||||||
# Copy old mapping as it is likely still relevant
|
# Copy old mapping as it is likely still relevant
|
||||||
if self.prev_report:
|
if self.prev_report:
|
||||||
self.report.add_custom_value(
|
self.report.add_custom_value(
|
||||||
@ -535,7 +475,7 @@ instance's greatest bottleneck.
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return self._download_video(element_path, element, maybe_dl)
|
await self._download_video(element_path, element, maybe_dl)
|
||||||
|
|
||||||
def _previous_contained_videos(self, video_path: PurePath) -> List[PurePath]:
|
def _previous_contained_videos(self, video_path: PurePath) -> List[PurePath]:
|
||||||
if not self.prev_report:
|
if not self.prev_report:
|
||||||
@ -617,11 +557,11 @@ instance's greatest bottleneck.
|
|||||||
self,
|
self,
|
||||||
element: IliasPageElement,
|
element: IliasPageElement,
|
||||||
element_path: PurePath,
|
element_path: PurePath,
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
) -> None:
|
||||||
maybe_dl = await self.download(element_path, mtime=element.mtime)
|
maybe_dl = await self.download(element_path, mtime=element.mtime)
|
||||||
if not maybe_dl:
|
if not maybe_dl:
|
||||||
return None
|
return None
|
||||||
return self._download_file(element, maybe_dl)
|
await self._download_file(element, maybe_dl)
|
||||||
|
|
||||||
@anoncritical
|
@anoncritical
|
||||||
@_iorepeat(3, "downloading file")
|
@_iorepeat(3, "downloading file")
|
||||||
@ -664,11 +604,11 @@ instance's greatest bottleneck.
|
|||||||
self,
|
self,
|
||||||
element: IliasPageElement,
|
element: IliasPageElement,
|
||||||
element_path: PurePath,
|
element_path: PurePath,
|
||||||
) -> Optional[Coroutine[Any, Any, None]]:
|
) -> None:
|
||||||
maybe_cl = await self.crawl(element_path)
|
maybe_cl = await self.crawl(element_path)
|
||||||
if not maybe_cl:
|
if not maybe_cl:
|
||||||
return None
|
return None
|
||||||
return self._crawl_forum(element, maybe_cl)
|
await self._crawl_forum(element, maybe_cl)
|
||||||
|
|
||||||
@_iorepeat(3, "crawling forum")
|
@_iorepeat(3, "crawling forum")
|
||||||
@anoncritical
|
@anoncritical
|
||||||
|
@ -2,7 +2,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import PurePath
|
from pathlib import PurePath
|
||||||
from typing import Awaitable, List, Optional, Pattern, Set, Union
|
from typing import List, Optional, Pattern, Set, Tuple, Union
|
||||||
from urllib.parse import urljoin
|
from urllib.parse import urljoin
|
||||||
|
|
||||||
from bs4 import BeautifulSoup, Tag
|
from bs4 import BeautifulSoup, Tag
|
||||||
@ -64,67 +64,62 @@ class KitIpdCrawler(HttpCrawler):
|
|||||||
self._file_regex = section.link_regex()
|
self._file_regex = section.link_regex()
|
||||||
|
|
||||||
async def _run(self) -> None:
|
async def _run(self) -> None:
|
||||||
maybe_cl = await self.crawl(PurePath("."))
|
cl = await self.crawl(PurePath("."))
|
||||||
if not maybe_cl:
|
if not cl:
|
||||||
return
|
return
|
||||||
|
|
||||||
tasks: List[Awaitable[None]] = []
|
async with cl:
|
||||||
|
|
||||||
async with maybe_cl:
|
|
||||||
for item in await self._fetch_items():
|
for item in await self._fetch_items():
|
||||||
if isinstance(item, KitIpdFolder):
|
if isinstance(item, KitIpdFolder):
|
||||||
tasks.append(self._crawl_folder(item))
|
await self._crawl_folder(item)
|
||||||
else:
|
else:
|
||||||
# Orphan files are placed in the root folder
|
# Orphan files are placed in the root folder
|
||||||
tasks.append(self._download_file(PurePath("."), item))
|
await self._download_file(PurePath("."), item)
|
||||||
|
|
||||||
await self.gather(tasks)
|
|
||||||
|
|
||||||
async def _crawl_folder(self, folder: KitIpdFolder) -> None:
|
async def _crawl_folder(self, folder: KitIpdFolder) -> None:
|
||||||
path = PurePath(folder.name)
|
path = PurePath(folder.name)
|
||||||
if not await self.crawl(path):
|
if not await self.crawl(path):
|
||||||
return
|
return
|
||||||
|
|
||||||
tasks = [self._download_file(path, file) for file in folder.files]
|
for file in folder.files:
|
||||||
|
await self._download_file(path, file)
|
||||||
await self.gather(tasks)
|
|
||||||
|
|
||||||
async def _download_file(self, parent: PurePath, file: KitIpdFile) -> None:
|
async def _download_file(self, parent: PurePath, file: KitIpdFile) -> None:
|
||||||
element_path = parent / file.name
|
element_path = parent / file.name
|
||||||
maybe_dl = await self.download(element_path)
|
dl = await self.download(element_path)
|
||||||
if not maybe_dl:
|
if not dl:
|
||||||
return
|
return
|
||||||
|
|
||||||
async with maybe_dl as (bar, sink):
|
async with dl as (bar, sink):
|
||||||
await self._stream_from_url(file.url, sink, bar)
|
await self._stream_from_url(file.url, sink, bar)
|
||||||
|
|
||||||
async def _fetch_items(self) -> Set[Union[KitIpdFile, KitIpdFolder]]:
|
async def _fetch_items(self) -> Set[Union[KitIpdFile, KitIpdFolder]]:
|
||||||
page = await self.get_page()
|
page, url = await self._get_page()
|
||||||
elements: List[Tag] = self._find_file_links(page)
|
elements: List[Tag] = self._find_file_links(page)
|
||||||
items: Set[Union[KitIpdFile, KitIpdFolder]] = set()
|
items: Set[Union[KitIpdFile, KitIpdFolder]] = set()
|
||||||
|
|
||||||
for element in elements:
|
for element in elements:
|
||||||
folder_label = self._find_folder_label(element)
|
folder_label = self._find_folder_label(element)
|
||||||
if folder_label:
|
if folder_label:
|
||||||
folder = self._extract_folder(folder_label)
|
folder = self._extract_folder(folder_label, url)
|
||||||
if folder not in items:
|
if folder not in items:
|
||||||
items.add(folder)
|
items.add(folder)
|
||||||
folder.explain()
|
folder.explain()
|
||||||
else:
|
else:
|
||||||
file = self._extract_file(element)
|
file = self._extract_file(element, url)
|
||||||
items.add(file)
|
items.add(file)
|
||||||
log.explain_topic(f"Orphan file {file.name!r} (href={file.url!r})")
|
log.explain_topic(f"Orphan file {file.name!r} (href={file.url!r})")
|
||||||
log.explain("Attributing it to root folder")
|
log.explain("Attributing it to root folder")
|
||||||
|
|
||||||
return items
|
return items
|
||||||
|
|
||||||
def _extract_folder(self, folder_tag: Tag) -> KitIpdFolder:
|
def _extract_folder(self, folder_tag: Tag, url: str) -> KitIpdFolder:
|
||||||
files: List[KitIpdFile] = []
|
files: List[KitIpdFile] = []
|
||||||
name = folder_tag.getText().strip()
|
name = folder_tag.getText().strip()
|
||||||
|
|
||||||
container: Tag = folder_tag.findNextSibling(name="table")
|
container: Tag = folder_tag.findNextSibling(name="table")
|
||||||
for link in self._find_file_links(container):
|
for link in self._find_file_links(container):
|
||||||
files.append(self._extract_file(link))
|
files.append(self._extract_file(link, url))
|
||||||
|
|
||||||
return KitIpdFolder(name, files)
|
return KitIpdFolder(name, files)
|
||||||
|
|
||||||
@ -135,16 +130,16 @@ class KitIpdCrawler(HttpCrawler):
|
|||||||
return None
|
return None
|
||||||
return enclosing_table.findPreviousSibling(name=re.compile("^h[1-6]$"))
|
return enclosing_table.findPreviousSibling(name=re.compile("^h[1-6]$"))
|
||||||
|
|
||||||
def _extract_file(self, link: Tag) -> KitIpdFile:
|
def _extract_file(self, link: Tag, url: str) -> KitIpdFile:
|
||||||
url = self._abs_url_from_link(link)
|
url = self._abs_url_from_link(url, link)
|
||||||
name = os.path.basename(url)
|
name = os.path.basename(url)
|
||||||
return KitIpdFile(name, url)
|
return KitIpdFile(name, url)
|
||||||
|
|
||||||
def _find_file_links(self, tag: Union[Tag, BeautifulSoup]) -> List[Tag]:
|
def _find_file_links(self, tag: Union[Tag, BeautifulSoup]) -> List[Tag]:
|
||||||
return tag.findAll(name="a", attrs={"href": self._file_regex})
|
return tag.findAll(name="a", attrs={"href": self._file_regex})
|
||||||
|
|
||||||
def _abs_url_from_link(self, link_tag: Tag) -> str:
|
def _abs_url_from_link(self, url: str, link_tag: Tag) -> str:
|
||||||
return urljoin(self._url, link_tag.get("href"))
|
return urljoin(url, link_tag.get("href"))
|
||||||
|
|
||||||
async def _stream_from_url(self, url: str, sink: FileSink, bar: ProgressBar) -> None:
|
async def _stream_from_url(self, url: str, sink: FileSink, bar: ProgressBar) -> None:
|
||||||
async with self.session.get(url, allow_redirects=False) as resp:
|
async with self.session.get(url, allow_redirects=False) as resp:
|
||||||
@ -159,12 +154,12 @@ class KitIpdCrawler(HttpCrawler):
|
|||||||
|
|
||||||
sink.done()
|
sink.done()
|
||||||
|
|
||||||
async def get_page(self) -> BeautifulSoup:
|
async def _get_page(self) -> Tuple[BeautifulSoup, str]:
|
||||||
async with self.session.get(self._url) as request:
|
response = self.session.get(self._url)
|
||||||
|
|
||||||
# The web page for Algorithmen für Routenplanung contains some
|
# The web page for Algorithmen für Routenplanung contains some
|
||||||
# weird comments that beautifulsoup doesn't parse correctly. This
|
# weird comments that beautifulsoup doesn't parse correctly. This
|
||||||
# hack enables those pages to be crawled, and should hopefully not
|
# hack enables those pages to be crawled, and should hopefully not
|
||||||
# cause issues on other pages.
|
# cause issues on other pages.
|
||||||
content = (await request.read()).decode("utf-8")
|
content = re.sub(r"<!--.*?-->", "", response.text)
|
||||||
content = re.sub(r"<!--.*?-->", "", content)
|
return soupify(content.encode("utf-8")), str(request.url)
|
||||||
return soupify(content.encode("utf-8"))
|
|
||||||
|
@ -71,8 +71,6 @@ class LocalCrawler(Crawler):
|
|||||||
if not cl:
|
if not cl:
|
||||||
return
|
return
|
||||||
|
|
||||||
tasks = []
|
|
||||||
|
|
||||||
async with cl:
|
async with cl:
|
||||||
await asyncio.sleep(random.uniform(
|
await asyncio.sleep(random.uniform(
|
||||||
0.5 * self._crawl_delay,
|
0.5 * self._crawl_delay,
|
||||||
@ -81,9 +79,7 @@ class LocalCrawler(Crawler):
|
|||||||
|
|
||||||
for child in path.iterdir():
|
for child in path.iterdir():
|
||||||
pure_child = cl.path / child.name
|
pure_child = cl.path / child.name
|
||||||
tasks.append(self._crawl_path(child, pure_child))
|
await self._crawl_path(child, pure_child)
|
||||||
|
|
||||||
await self.gather(tasks)
|
|
||||||
|
|
||||||
async def _crawl_file(self, path: Path, pure: PurePath) -> None:
|
async def _crawl_file(self, path: Path, pure: PurePath) -> None:
|
||||||
stat = path.stat()
|
stat = path.stat()
|
||||||
|
@ -1,97 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import time
|
|
||||||
from contextlib import asynccontextmanager
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import AsyncIterator, Optional
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class Slot:
|
|
||||||
active: bool = False
|
|
||||||
last_left: Optional[float] = None
|
|
||||||
|
|
||||||
|
|
||||||
class Limiter:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
task_limit: int,
|
|
||||||
download_limit: int,
|
|
||||||
task_delay: float
|
|
||||||
):
|
|
||||||
if task_limit <= 0:
|
|
||||||
raise ValueError("task limit must be at least 1")
|
|
||||||
if download_limit <= 0:
|
|
||||||
raise ValueError("download limit must be at least 1")
|
|
||||||
if download_limit > task_limit:
|
|
||||||
raise ValueError("download limit can't be greater than task limit")
|
|
||||||
if task_delay < 0:
|
|
||||||
raise ValueError("Task delay must not be negative")
|
|
||||||
|
|
||||||
self._slots = [Slot() for _ in range(task_limit)]
|
|
||||||
self._downloads = download_limit
|
|
||||||
self._delay = task_delay
|
|
||||||
|
|
||||||
self._condition = asyncio.Condition()
|
|
||||||
|
|
||||||
def _acquire_slot(self) -> Optional[Slot]:
|
|
||||||
for slot in self._slots:
|
|
||||||
if not slot.active:
|
|
||||||
slot.active = True
|
|
||||||
return slot
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def _wait_for_slot_delay(self, slot: Slot) -> None:
|
|
||||||
if slot.last_left is not None:
|
|
||||||
delay = slot.last_left + self._delay - time.time()
|
|
||||||
if delay > 0:
|
|
||||||
await asyncio.sleep(delay)
|
|
||||||
|
|
||||||
def _release_slot(self, slot: Slot) -> None:
|
|
||||||
slot.last_left = time.time()
|
|
||||||
slot.active = False
|
|
||||||
|
|
||||||
@asynccontextmanager
|
|
||||||
async def limit_crawl(self) -> AsyncIterator[None]:
|
|
||||||
slot: Slot
|
|
||||||
async with self._condition:
|
|
||||||
while True:
|
|
||||||
if found_slot := self._acquire_slot():
|
|
||||||
slot = found_slot
|
|
||||||
break
|
|
||||||
await self._condition.wait()
|
|
||||||
|
|
||||||
await self._wait_for_slot_delay(slot)
|
|
||||||
|
|
||||||
try:
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
async with self._condition:
|
|
||||||
self._release_slot(slot)
|
|
||||||
self._condition.notify_all()
|
|
||||||
|
|
||||||
@asynccontextmanager
|
|
||||||
async def limit_download(self) -> AsyncIterator[None]:
|
|
||||||
slot: Slot
|
|
||||||
async with self._condition:
|
|
||||||
while True:
|
|
||||||
if self._downloads <= 0:
|
|
||||||
await self._condition.wait()
|
|
||||||
continue
|
|
||||||
|
|
||||||
if found_slot := self._acquire_slot():
|
|
||||||
slot = found_slot
|
|
||||||
self._downloads -= 1
|
|
||||||
break
|
|
||||||
|
|
||||||
await self._condition.wait()
|
|
||||||
|
|
||||||
await self._wait_for_slot_delay(slot)
|
|
||||||
|
|
||||||
try:
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
async with self._condition:
|
|
||||||
self._release_slot(slot)
|
|
||||||
self._downloads += 1
|
|
||||||
self._condition.notify_all()
|
|
@ -1,53 +0,0 @@
|
|||||||
from dataclasses import dataclass
|
|
||||||
import ssl
|
|
||||||
from typing import Optional
|
|
||||||
import aiohttp
|
|
||||||
import certifi
|
|
||||||
|
|
||||||
from .version import NAME, VERSION
|
|
||||||
from .logging import log
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PferdUpdate:
|
|
||||||
release_url: str
|
|
||||||
version: str
|
|
||||||
|
|
||||||
|
|
||||||
def _build_session() -> aiohttp.ClientSession:
|
|
||||||
return aiohttp.ClientSession(
|
|
||||||
headers={"User-Agent": f"{NAME}/{VERSION}"},
|
|
||||||
connector=aiohttp.TCPConnector(ssl=ssl.create_default_context(cafile=certifi.where())),
|
|
||||||
timeout=aiohttp.ClientTimeout(
|
|
||||||
total=15 * 60,
|
|
||||||
connect=10,
|
|
||||||
sock_connect=10,
|
|
||||||
sock_read=10,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def check_for_updates() -> None:
|
|
||||||
if new_version := await get_newer_version():
|
|
||||||
log.warn(
|
|
||||||
f"{NAME} version out of date. "
|
|
||||||
+ f"You are running version {VERSION!r} but {new_version.version!r} was found on GitHub."
|
|
||||||
)
|
|
||||||
log.warn_contd(f"You can download it on GitHub: {new_version.release_url}")
|
|
||||||
else:
|
|
||||||
log.explain("No update found")
|
|
||||||
|
|
||||||
|
|
||||||
async def get_newer_version() -> Optional[PferdUpdate]:
|
|
||||||
async with _build_session() as session:
|
|
||||||
async with session.get(
|
|
||||||
"https://api.github.com/repos/Garmelon/Pferd/releases/latest",
|
|
||||||
headers={"Accept": "application/vnd.github+json"}
|
|
||||||
) as response:
|
|
||||||
release_information = await response.json()
|
|
||||||
tag_name: str = release_information["tag_name"]
|
|
||||||
tag_name = tag_name.removeprefix("v")
|
|
||||||
if VERSION == tag_name:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return PferdUpdate(release_url=release_information["html_url"], version=tag_name)
|
|
@ -92,17 +92,32 @@ def url_set_query_params(url: str, params: Dict[str, str]) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def str_path(path: PurePath) -> str:
|
def str_path(path: PurePath) -> str:
|
||||||
|
"""
|
||||||
|
Turn a path into a string, in a platform-independent way.
|
||||||
|
|
||||||
|
This function always uses "/" as path separator, even on Windows.
|
||||||
|
"""
|
||||||
if not path.parts:
|
if not path.parts:
|
||||||
return "."
|
return "."
|
||||||
return "/".join(path.parts)
|
return "/".join(path.parts)
|
||||||
|
|
||||||
|
|
||||||
def fmt_path(path: PurePath) -> str:
|
def fmt_path(path: PurePath) -> str:
|
||||||
|
"""
|
||||||
|
Turn a path into a delimited string.
|
||||||
|
|
||||||
|
This is useful if file or directory names contain weird characters like
|
||||||
|
newlines, leading/trailing whitespace or unprintable characters. This way,
|
||||||
|
they are escaped and visible to the user.
|
||||||
|
"""
|
||||||
return repr(str_path(path))
|
return repr(str_path(path))
|
||||||
|
|
||||||
|
|
||||||
def fmt_real_path(path: Path) -> str:
|
def fmt_real_path(path: Path) -> str:
|
||||||
return repr(str(path.absolute()))
|
"""
|
||||||
|
Like fmt_path, but resolves the path before converting it to a string.
|
||||||
|
"""
|
||||||
|
return fmt_path(path.absolute())
|
||||||
|
|
||||||
|
|
||||||
class ReusableAsyncContextManager(ABC, Generic[T]):
|
class ReusableAsyncContextManager(ABC, Generic[T]):
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
NAME = "PFERD"
|
NAME = "PFERD"
|
||||||
VERSION = "3.4.1"
|
VERSION = "3.4.3"
|
||||||
|
@ -30,7 +30,10 @@ The use of [venv](https://docs.python.org/3/library/venv.html) is recommended.
|
|||||||
|
|
||||||
Unofficial packages are available for:
|
Unofficial packages are available for:
|
||||||
- [AUR](https://aur.archlinux.org/packages/pferd)
|
- [AUR](https://aur.archlinux.org/packages/pferd)
|
||||||
|
- [brew](https://formulae.brew.sh/formula/pferd)
|
||||||
|
- [conda-forge](https://github.com/conda-forge/pferd-feedstock)
|
||||||
- [nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/pferd/default.nix)
|
- [nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/pferd/default.nix)
|
||||||
|
- [PyPi](https://pypi.org/project/pferd)
|
||||||
|
|
||||||
See also PFERD's [repology page](https://repology.org/project/pferd/versions).
|
See also PFERD's [repology page](https://repology.org/project/pferd/versions).
|
||||||
|
|
||||||
|
@ -14,4 +14,4 @@ pip install --editable .
|
|||||||
|
|
||||||
# Installing tools and type hints
|
# Installing tools and type hints
|
||||||
pip install --upgrade mypy flake8 autopep8 isort pyinstaller
|
pip install --upgrade mypy flake8 autopep8 isort pyinstaller
|
||||||
pip install --upgrade types-chardet types-certifi
|
mypy PFERD --install-types --non-interactive
|
||||||
|
Reference in New Issue
Block a user