Compare commits

...

14 Commits

Author SHA1 Message Date
bf27f4a686 TODO 2023-04-19 10:13:36 +02:00
5adfdfbd2b Switch http_crawler to requests 2023-04-19 10:12:48 +02:00
5c3942a13d Fix flake8 error 2023-04-19 10:12:48 +02:00
5c9209b12e Document path formatting functions 2023-04-19 10:12:48 +02:00
50c7778d38 Use mypy to install library stub packages 2023-04-19 10:12:48 +02:00
354a22d1e3 Add vscode settings 2023-04-19 10:12:48 +02:00
6f87c5c774 Make ipd crawler synchronous 2023-04-19 10:12:48 +02:00
1ca10571f0 Remove limiter 2023-04-19 10:12:48 +02:00
10e1a5e871 De-Async ilias crawler 2023-04-19 10:12:48 +02:00
a2ffce4702 Make local crawler synchronous 2023-04-19 10:12:48 +02:00
0294ceb7d5 Update github action versions 2023-03-22 00:10:54 +01:00
6f30c6583d Fix crawling of cards without descriptions 2023-03-21 23:52:33 +01:00
467fc526e8 Fix crawling of file/video cards 2023-03-21 23:52:24 +01:00
722d2eb393 Fix crawling of courses with preselected timeline tab 2023-03-21 23:36:47 +01:00
14 changed files with 158 additions and 322 deletions

View File

@ -17,9 +17,9 @@ jobs:
python: ["3.9"]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-python@v2
- uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
@ -45,7 +45,7 @@ jobs:
run: mv dist/pferd* dist/pferd-${{ matrix.os }}
- name: Upload binary
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: Binaries
path: dist/pferd-${{ matrix.os }}
@ -57,7 +57,7 @@ jobs:
steps:
- name: Download binaries
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: Binaries

1
.gitignore vendored
View File

@ -2,7 +2,6 @@
/.venv/
/PFERD.egg-info/
__pycache__/
/.vscode/
# pyinstaller
/pferd.spec

8
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,8 @@
{
"files.insertFinalNewline": true,
"files.trimFinalNewlines": true,
"python.formatting.provider": "autopep8",
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.linting.mypyEnabled": true,
}

View File

@ -22,6 +22,11 @@ ambiguous situations.
## Unreleased
### Fixed
- Crawling of courses with the timeline view as the default tab
- Crawling of file and custom opencast cards
- Crawling of button cards without descriptions
## 3.4.3 - 2022-11-29
### Added

View File

@ -9,7 +9,6 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Ty
from ..auth import Authenticator
from ..config import Config, Section
from ..deduplicator import Deduplicator
from ..limiter import Limiter
from ..logging import ProgressBar, log
from ..output_dir import FileSink, FileSinkToken, OnConflict, OutputDirectory, OutputDirError, Redownload
from ..report import MarkConflictError, MarkDuplicateError, Report
@ -98,10 +97,9 @@ def anoncritical(f: AWrapped) -> AWrapped:
class CrawlToken(ReusableAsyncContextManager[ProgressBar]):
def __init__(self, limiter: Limiter, path: PurePath):
def __init__(self, path: PurePath):
super().__init__()
self._limiter = limiter
self._path = path
@property
@ -110,17 +108,15 @@ class CrawlToken(ReusableAsyncContextManager[ProgressBar]):
async def _on_aenter(self) -> ProgressBar:
self._stack.callback(lambda: log.status("[bold cyan]", "Crawled", fmt_path(self._path)))
await self._stack.enter_async_context(self._limiter.limit_crawl())
bar = self._stack.enter_context(log.crawl_bar("[bold bright_cyan]", "Crawling", fmt_path(self._path)))
return bar
class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]):
def __init__(self, limiter: Limiter, fs_token: FileSinkToken, path: PurePath):
def __init__(self, fs_token: FileSinkToken, path: PurePath):
super().__init__()
self._limiter = limiter
self._fs_token = fs_token
self._path = path
@ -129,7 +125,6 @@ class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]):
return self._path
async def _on_aenter(self) -> Tuple[ProgressBar, FileSink]:
await self._stack.enter_async_context(self._limiter.limit_download())
sink = await self._stack.enter_async_context(self._fs_token)
# The "Downloaded ..." message is printed in the output dir, not here
bar = self._stack.enter_context(log.download_bar("[bold bright_cyan]", "Downloading",
@ -235,12 +230,6 @@ class Crawler(ABC):
self.name = name
self.error_free = True
self._limiter = Limiter(
task_limit=section.tasks(),
download_limit=section.downloads(),
task_delay=section.task_delay(),
)
self._deduplicator = Deduplicator(section.windows_paths())
self._transformer = Transformer(section.transform())
@ -288,7 +277,7 @@ class Crawler(ABC):
return None
log.explain("Answer: Yes")
return CrawlToken(self._limiter, path)
return CrawlToken(path)
async def download(
self,
@ -313,7 +302,7 @@ class Crawler(ABC):
return None
log.explain("Answer: Yes")
return DownloadToken(self._limiter, fs_token, path)
return DownloadToken(fs_token, path)
async def _cleanup(self) -> None:
log.explain_topic("Decision: Clean up files")

View File

@ -1,12 +1,9 @@
import asyncio
import http.cookies
import ssl
from http.cookiejar import LWPCookieJar
from pathlib import Path, PurePath
from typing import Any, Dict, List, Optional
from typing import Dict, List, Optional
import aiohttp
import certifi
from aiohttp.client import ClientTimeout
import requests
from ..auth import Authenticator
from ..config import Config
@ -35,9 +32,9 @@ class HttpCrawler(Crawler):
self._authentication_id = 0
self._authentication_lock = asyncio.Lock()
self._request_count = 0
self._http_timeout = section.http_timeout()
self._http_timeout = section.http_timeout() # TODO Use or remove
self._cookie_jar = LWPCookieJar()
self._cookie_jar_path = self._output_dir.resolve(self.COOKIE_FILE)
self._shared_cookie_jar_paths: Optional[List[Path]] = None
self._shared_auth = shared_auth
@ -57,7 +54,6 @@ class HttpCrawler(Crawler):
# This should reduce the amount of requests we make: If an authentication is in progress
# all future requests wait for authentication to complete.
async with self._authentication_lock:
self._request_count += 1
return self._authentication_id
async def authenticate(self, caller_auth_id: int) -> None:
@ -106,32 +102,13 @@ class HttpCrawler(Crawler):
self._shared_cookie_jar_paths.append(self._cookie_jar_path)
def _load_cookies_from_file(self, path: Path) -> None:
jar: Any = http.cookies.SimpleCookie()
with open(path, encoding="utf-8") as f:
for i, line in enumerate(f):
# Names of headers are case insensitive
if line[:11].lower() == "set-cookie:":
jar.load(line[11:])
else:
log.explain(f"Line {i} doesn't start with 'Set-Cookie:', ignoring it")
self._cookie_jar.update_cookies(jar)
def _save_cookies_to_file(self, path: Path) -> None:
jar: Any = http.cookies.SimpleCookie()
for morsel in self._cookie_jar:
jar[morsel.key] = morsel
with open(path, "w", encoding="utf-8") as f:
f.write(jar.output(sep="\n"))
f.write("\n") # A trailing newline is just common courtesy
def _load_cookies(self) -> None:
log.explain_topic("Loading cookies")
cookie_jar_path: Optional[Path] = None
if self._shared_cookie_jar_paths is None:
log.explain("Not sharing any cookies")
log.explain("Not sharing cookies")
cookie_jar_path = self._cookie_jar_path
else:
log.explain("Sharing cookies")
@ -154,46 +131,38 @@ class HttpCrawler(Crawler):
log.explain(f"Loading cookies from {fmt_real_path(cookie_jar_path)}")
try:
self._load_cookies_from_file(cookie_jar_path)
self._cookie_jar.load(filename=str(cookie_jar_path))
except Exception as e:
log.explain("Failed to load cookies")
log.explain(str(e))
log.explain(f"Failed to load cookies: {e}")
log.explain("Proceeding without cookies")
def _save_cookies(self) -> None:
log.explain_topic("Saving cookies")
try:
log.explain(f"Saving cookies to {fmt_real_path(self._cookie_jar_path)}")
self._save_cookies_to_file(self._cookie_jar_path)
self._cookie_jar.save(filename=str(self._cookie_jar_path))
except Exception as e:
log.warn(f"Failed to save cookies to {fmt_real_path(self._cookie_jar_path)}")
log.warn(str(e))
log.warn(f"Failed to save cookies: {e}")
async def run(self) -> None:
self._request_count = 0
self._cookie_jar = aiohttp.CookieJar()
self._load_cookies()
async with aiohttp.ClientSession(
headers={"User-Agent": f"{NAME}/{VERSION}"},
cookie_jar=self._cookie_jar,
connector=aiohttp.TCPConnector(ssl=ssl.create_default_context(cafile=certifi.where())),
timeout=ClientTimeout(
# 30 minutes. No download in the history of downloads was longer than 30 minutes.
# This is enough to transfer a 600 MB file over a 3 Mib/s connection.
# Allowing an arbitrary value could be annoying for overnight batch jobs
total=15 * 60,
connect=self._http_timeout,
sock_connect=self._http_timeout,
sock_read=self._http_timeout,
)
) as session:
self.session = session
self.session = requests.Session()
self.session.headers["User-Agent"] = f"{NAME}/{VERSION}"
# From the request docs: "All requests code should work out of the box
# with externally provided instances of CookieJar, e.g. LWPCookieJar and
# FileCookieJar."
# https://requests.readthedocs.io/en/latest/api/#requests.cookies.RequestsCookieJar
self.session.cookies = self._cookie_jar # type: ignore
with self.session:
try:
await super().run()
finally:
del self.session
log.explain_topic(f"Total amount of HTTP requests: {self._request_count}")
# They are saved in authenticate, but a final save won't hurt
self._save_cookies()

View File

@ -158,6 +158,8 @@ class IliasPage:
if self._contains_collapsed_future_meetings():
log.explain("Requesting *all* future meetings")
return self._uncollapse_future_meetings_url()
if not self._is_content_tab_selected():
return self._select_content_page_url()
return None
def _is_forum_page(self) -> bool:
@ -220,6 +222,27 @@ class IliasPage:
link = self._abs_url_from_link(element)
return IliasPageElement(IliasElementType.FOLDER, link, "show all meetings")
def _is_content_tab_selected(self) -> bool:
return self._select_content_page_url() is None
def _select_content_page_url(self) -> Optional[IliasPageElement]:
tab = self._soup.find(
id="tab_view_content",
attrs={"class": lambda x: x is not None and "active" not in x}
)
# Already selected (or not found)
if not tab:
return None
link = tab.find("a")
if link:
link = self._abs_url_from_link(link)
return IliasPageElement(IliasElementType.FOLDER, link, "select content page")
_unexpected_html_warning()
log.warn_contd(f"Could not find content tab URL on {self._page_url!r}.")
log.warn_contd("PFERD might not find content on the course's main page.")
return None
def _player_to_video(self) -> List[IliasPageElement]:
# Fetch the actual video page. This is a small wrapper page initializing a javscript
# player. Sadly we can not execute that JS. The actual video stream url is nowhere
@ -685,7 +708,11 @@ class IliasPage:
"div",
attrs={"class": lambda x: x and "caption" in x},
)
description = caption_parent.find_next_sibling("div").getText().strip()
caption_container = caption_parent.find_next_sibling("div")
if caption_container:
description = caption_container.getText().strip()
else:
description = None
if not type:
_unexpected_html_warning()
@ -715,7 +742,7 @@ class IliasPage:
icon: Tag = card_root.select_one(".il-card-repository-head .icon")
if "opencast" in icon["class"]:
if "opencast" in icon["class"] or "xoct" in icon["class"]:
return IliasElementType.VIDEO_FOLDER_MAYBE_PAGINATED
if "exc" in icon["class"]:
return IliasElementType.EXERCISE
@ -735,6 +762,8 @@ class IliasPage:
return IliasElementType.FOLDER
if "svy" in icon["class"]:
return IliasElementType.SURVEY
if "file" in icon["class"]:
return IliasElementType.FILE
_unexpected_html_warning()
log.warn_contd(f"Could not extract type from {icon} for card title {card_title}")

View File

@ -126,13 +126,6 @@ def _iorepeat(attempts: int, name: str, failure_is_error: bool = False) -> Calla
return decorator
def _wrap_io_in_warning(name: str) -> Callable[[AWrapped], AWrapped]:
"""
Wraps any I/O exception in a CrawlWarning.
"""
return _iorepeat(1, name)
# Crawler control flow:
#
# crawl_desktop -+
@ -226,80 +219,22 @@ instance's greatest bottleneck.
return
cl = maybe_cl # Not mypy's fault, but explained here: https://github.com/python/mypy/issues/2608
elements: List[IliasPageElement] = []
# A list as variable redefinitions are not propagated to outer scopes
description: List[BeautifulSoup] = []
@_iorepeat(3, "crawling url")
async def gather_elements() -> None:
elements.clear()
async with cl:
next_stage_url: Optional[str] = url
current_parent = None
# Duplicated code, but the root page is special - we want to avoid fetching it twice!
while next_stage_url:
soup = await self._get_page(next_stage_url)
if current_parent is None and expected_id is not None:
def ensure_is_valid_course_id(parent: Optional[IliasPageElement], soup: BeautifulSoup) -> None:
if parent is None and expected_id is not None:
perma_link_element: Tag = soup.find(id="current_perma_link")
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
log.explain(f"URL: {next_stage_url}")
page = IliasPage(soup, next_stage_url, current_parent)
if next_element := page.get_next_stage_element():
current_parent = next_element
next_stage_url = next_element.url
else:
next_stage_url = None
elements.extend(page.get_child_elements())
if description_string := page.get_description():
description.append(description_string)
# Fill up our task list with the found elements
await gather_elements()
if description:
await self._download_description(PurePath("."), description[0])
elements.sort(key=lambda e: e.id())
tasks: List[Awaitable[None]] = []
for element in elements:
if handle := await self._handle_ilias_element(PurePath("."), element):
tasks.append(asyncio.create_task(handle))
# And execute them
await self.gather(tasks)
async def _handle_ilias_page(
self,
url: str,
parent: IliasPageElement,
path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]:
maybe_cl = await self.crawl(path)
if not maybe_cl:
return None
return self._crawl_ilias_page(url, parent, maybe_cl)
await self._crawl_ilias_page(url, None, cl, ensure_is_valid_course_id)
@anoncritical
async def _crawl_ilias_page(
self,
url: str,
parent: IliasPageElement,
parent: Optional[IliasPageElement],
cl: CrawlToken,
next_stage_hook: Callable[[Optional[IliasPageElement], BeautifulSoup], None] = lambda a, b: None
) -> None:
elements: List[IliasPageElement] = []
# A list as variable redefinitions are not propagated to outer scopes
description: List[BeautifulSoup] = []
@_iorepeat(3, "crawling folder")
async def gather_elements() -> None:
elements.clear()
async with cl:
next_stage_url: Optional[str] = url
current_parent = parent
@ -308,6 +243,9 @@ instance's greatest bottleneck.
soup = await self._get_page(next_stage_url)
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
log.explain(f"URL: {next_stage_url}")
next_stage_hook(current_parent, soup)
page = IliasPage(soup, next_stage_url, current_parent)
if next_element := page.get_next_stage_element():
current_parent = next_element
@ -315,25 +253,11 @@ instance's greatest bottleneck.
else:
next_stage_url = None
elements.extend(page.get_child_elements())
for element in sorted(page.get_child_elements(), key=lambda e: e.id()):
await self._handle_ilias_element(cl.path, element)
if description_string := page.get_description():
description.append(description_string)
# Fill up our task list with the found elements
await gather_elements()
if description:
await self._download_description(cl.path, description[0])
elements.sort(key=lambda e: e.id())
tasks: List[Awaitable[None]] = []
for element in elements:
if handle := await self._handle_ilias_element(cl.path, element):
tasks.append(asyncio.create_task(handle))
# And execute them
await self.gather(tasks)
await self._download_description(cl.path, description_string)
# These decorators only apply *to this method* and *NOT* to the returned
# awaitables!
@ -345,7 +269,7 @@ instance's greatest bottleneck.
self,
parent_path: PurePath,
element: IliasPageElement,
) -> Optional[Coroutine[Any, Any, None]]:
) -> None:
if element.url in self._visited_urls:
raise CrawlWarning(
f"Found second path to element {element.name!r} at {element.url!r}. "
@ -367,7 +291,7 @@ instance's greatest bottleneck.
return None
if element.type == IliasElementType.FILE:
return await self._handle_file(element, element_path)
await self._handle_file(element, element_path)
elif element.type == IliasElementType.FORUM:
if not self._forums:
log.status(
@ -377,7 +301,7 @@ instance's greatest bottleneck.
"[bright_black](enable with option 'forums')"
)
return None
return await self._handle_forum(element, element_path)
await self._handle_forum(element, element_path)
elif element.type == IliasElementType.TEST:
log.status(
"[bold bright_black]",
@ -395,15 +319,18 @@ instance's greatest bottleneck.
)
return None
elif element.type == IliasElementType.LINK:
return await self._handle_link(element, element_path)
await self._handle_link(element, element_path)
elif element.type == IliasElementType.BOOKING:
return await self._handle_booking(element, element_path)
await self._handle_booking(element, element_path)
elif element.type == IliasElementType.VIDEO:
return await self._handle_file(element, element_path)
await self._handle_file(element, element_path)
elif element.type == IliasElementType.VIDEO_PLAYER:
return await self._handle_video(element, element_path)
await self._handle_video(element, element_path)
elif element.type in _DIRECTORY_PAGES:
return await self._handle_ilias_page(element.url, element, element_path)
maybe_cl = await self.crawl(element_path)
if not maybe_cl:
return None
await self._crawl_ilias_page(element.url, element, maybe_cl)
else:
# This will retry it a few times, failing everytime. It doesn't make any network
# requests, so that's fine.
@ -413,7 +340,7 @@ instance's greatest bottleneck.
self,
element: IliasPageElement,
element_path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]:
) -> None:
log.explain_topic(f"Decision: Crawl Link {fmt_path(element_path)}")
log.explain(f"Links type is {self._links}")
@ -430,7 +357,7 @@ instance's greatest bottleneck.
if not maybe_dl:
return None
return self._download_link(element, link_template_maybe, maybe_dl)
await self._download_link(element, link_template_maybe, maybe_dl)
@anoncritical
@_iorepeat(3, "resolving link")
@ -522,7 +449,7 @@ instance's greatest bottleneck.
self,
element: IliasPageElement,
element_path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]:
) -> None:
# Copy old mapping as it is likely still relevant
if self.prev_report:
self.report.add_custom_value(
@ -548,7 +475,7 @@ instance's greatest bottleneck.
return None
return self._download_video(element_path, element, maybe_dl)
await self._download_video(element_path, element, maybe_dl)
def _previous_contained_videos(self, video_path: PurePath) -> List[PurePath]:
if not self.prev_report:
@ -630,11 +557,11 @@ instance's greatest bottleneck.
self,
element: IliasPageElement,
element_path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]:
) -> None:
maybe_dl = await self.download(element_path, mtime=element.mtime)
if not maybe_dl:
return None
return self._download_file(element, maybe_dl)
await self._download_file(element, maybe_dl)
@anoncritical
@_iorepeat(3, "downloading file")
@ -677,11 +604,11 @@ instance's greatest bottleneck.
self,
element: IliasPageElement,
element_path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]:
) -> None:
maybe_cl = await self.crawl(element_path)
if not maybe_cl:
return None
return self._crawl_forum(element, maybe_cl)
await self._crawl_forum(element, maybe_cl)
@_iorepeat(3, "crawling forum")
@anoncritical

View File

@ -2,7 +2,7 @@ import os
import re
from dataclasses import dataclass
from pathlib import PurePath
from typing import Awaitable, List, Optional, Pattern, Set, Tuple, Union
from typing import List, Optional, Pattern, Set, Tuple, Union
from urllib.parse import urljoin
from bs4 import BeautifulSoup, Tag
@ -64,42 +64,37 @@ class KitIpdCrawler(HttpCrawler):
self._file_regex = section.link_regex()
async def _run(self) -> None:
maybe_cl = await self.crawl(PurePath("."))
if not maybe_cl:
cl = await self.crawl(PurePath("."))
if not cl:
return
tasks: List[Awaitable[None]] = []
async with maybe_cl:
async with cl:
for item in await self._fetch_items():
if isinstance(item, KitIpdFolder):
tasks.append(self._crawl_folder(item))
await self._crawl_folder(item)
else:
# Orphan files are placed in the root folder
tasks.append(self._download_file(PurePath("."), item))
await self.gather(tasks)
await self._download_file(PurePath("."), item)
async def _crawl_folder(self, folder: KitIpdFolder) -> None:
path = PurePath(folder.name)
if not await self.crawl(path):
return
tasks = [self._download_file(path, file) for file in folder.files]
await self.gather(tasks)
for file in folder.files:
await self._download_file(path, file)
async def _download_file(self, parent: PurePath, file: KitIpdFile) -> None:
element_path = parent / file.name
maybe_dl = await self.download(element_path)
if not maybe_dl:
dl = await self.download(element_path)
if not dl:
return
async with maybe_dl as (bar, sink):
async with dl as (bar, sink):
await self._stream_from_url(file.url, sink, bar)
async def _fetch_items(self) -> Set[Union[KitIpdFile, KitIpdFolder]]:
page, url = await self.get_page()
page, url = await self._get_page()
elements: List[Tag] = self._find_file_links(page)
items: Set[Union[KitIpdFile, KitIpdFolder]] = set()
@ -159,12 +154,12 @@ class KitIpdCrawler(HttpCrawler):
sink.done()
async def get_page(self) -> Tuple[BeautifulSoup, str]:
async with self.session.get(self._url) as request:
async def _get_page(self) -> Tuple[BeautifulSoup, str]:
response = self.session.get(self._url)
# The web page for Algorithmen für Routenplanung contains some
# weird comments that beautifulsoup doesn't parse correctly. This
# hack enables those pages to be crawled, and should hopefully not
# cause issues on other pages.
content = (await request.read()).decode("utf-8")
content = re.sub(r"<!--.*?-->", "", content)
content = re.sub(r"<!--.*?-->", "", response.text)
return soupify(content.encode("utf-8")), str(request.url)

View File

@ -71,8 +71,6 @@ class LocalCrawler(Crawler):
if not cl:
return
tasks = []
async with cl:
await asyncio.sleep(random.uniform(
0.5 * self._crawl_delay,
@ -81,9 +79,7 @@ class LocalCrawler(Crawler):
for child in path.iterdir():
pure_child = cl.path / child.name
tasks.append(self._crawl_path(child, pure_child))
await self.gather(tasks)
await self._crawl_path(child, pure_child)
async def _crawl_file(self, path: Path, pure: PurePath) -> None:
stat = path.stat()

View File

@ -1,97 +0,0 @@
import asyncio
import time
from contextlib import asynccontextmanager
from dataclasses import dataclass
from typing import AsyncIterator, Optional
@dataclass
class Slot:
active: bool = False
last_left: Optional[float] = None
class Limiter:
def __init__(
self,
task_limit: int,
download_limit: int,
task_delay: float
):
if task_limit <= 0:
raise ValueError("task limit must be at least 1")
if download_limit <= 0:
raise ValueError("download limit must be at least 1")
if download_limit > task_limit:
raise ValueError("download limit can't be greater than task limit")
if task_delay < 0:
raise ValueError("Task delay must not be negative")
self._slots = [Slot() for _ in range(task_limit)]
self._downloads = download_limit
self._delay = task_delay
self._condition = asyncio.Condition()
def _acquire_slot(self) -> Optional[Slot]:
for slot in self._slots:
if not slot.active:
slot.active = True
return slot
return None
async def _wait_for_slot_delay(self, slot: Slot) -> None:
if slot.last_left is not None:
delay = slot.last_left + self._delay - time.time()
if delay > 0:
await asyncio.sleep(delay)
def _release_slot(self, slot: Slot) -> None:
slot.last_left = time.time()
slot.active = False
@asynccontextmanager
async def limit_crawl(self) -> AsyncIterator[None]:
slot: Slot
async with self._condition:
while True:
if found_slot := self._acquire_slot():
slot = found_slot
break
await self._condition.wait()
await self._wait_for_slot_delay(slot)
try:
yield
finally:
async with self._condition:
self._release_slot(slot)
self._condition.notify_all()
@asynccontextmanager
async def limit_download(self) -> AsyncIterator[None]:
slot: Slot
async with self._condition:
while True:
if self._downloads <= 0:
await self._condition.wait()
continue
if found_slot := self._acquire_slot():
slot = found_slot
self._downloads -= 1
break
await self._condition.wait()
await self._wait_for_slot_delay(slot)
try:
yield
finally:
async with self._condition:
self._release_slot(slot)
self._downloads += 1
self._condition.notify_all()

View File

@ -92,17 +92,32 @@ def url_set_query_params(url: str, params: Dict[str, str]) -> str:
def str_path(path: PurePath) -> str:
"""
Turn a path into a string, in a platform-independent way.
This function always uses "/" as path separator, even on Windows.
"""
if not path.parts:
return "."
return "/".join(path.parts)
def fmt_path(path: PurePath) -> str:
"""
Turn a path into a delimited string.
This is useful if file or directory names contain weird characters like
newlines, leading/trailing whitespace or unprintable characters. This way,
they are escaped and visible to the user.
"""
return repr(str_path(path))
def fmt_real_path(path: Path) -> str:
return repr(str(path.absolute()))
"""
Like fmt_path, but resolves the path before converting it to a string.
"""
return fmt_path(path.absolute())
class ReusableAsyncContextManager(ABC, Generic[T]):

View File

@ -14,4 +14,4 @@ pip install --editable .
# Installing tools and type hints
pip install --upgrade mypy flake8 autopep8 isort pyinstaller
pip install --upgrade types-chardet types-certifi
mypy PFERD --install-types --non-interactive

View File

@ -11,6 +11,7 @@ install_requires =
rich>=11.0.0
keyring>=23.5.0
certifi>=2021.10.8
requests>=2.28.1
[options.entry_points]
console_scripts =