Compare commits

...

10 Commits

Author SHA1 Message Date
bf27f4a686 TODO 2023-04-19 10:13:36 +02:00
5adfdfbd2b Switch http_crawler to requests 2023-04-19 10:12:48 +02:00
5c3942a13d Fix flake8 error 2023-04-19 10:12:48 +02:00
5c9209b12e Document path formatting functions 2023-04-19 10:12:48 +02:00
50c7778d38 Use mypy to install library stub packages 2023-04-19 10:12:48 +02:00
354a22d1e3 Add vscode settings 2023-04-19 10:12:48 +02:00
6f87c5c774 Make ipd crawler synchronous 2023-04-19 10:12:48 +02:00
1ca10571f0 Remove limiter 2023-04-19 10:12:48 +02:00
10e1a5e871 De-Async ilias crawler 2023-04-19 10:12:48 +02:00
a2ffce4702 Make local crawler synchronous 2023-04-19 10:12:48 +02:00
11 changed files with 118 additions and 316 deletions

1
.gitignore vendored
View File

@ -2,7 +2,6 @@
/.venv/ /.venv/
/PFERD.egg-info/ /PFERD.egg-info/
__pycache__/ __pycache__/
/.vscode/
# pyinstaller # pyinstaller
/pferd.spec /pferd.spec

8
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,8 @@
{
"files.insertFinalNewline": true,
"files.trimFinalNewlines": true,
"python.formatting.provider": "autopep8",
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.linting.mypyEnabled": true,
}

View File

@ -9,7 +9,6 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Ty
from ..auth import Authenticator from ..auth import Authenticator
from ..config import Config, Section from ..config import Config, Section
from ..deduplicator import Deduplicator from ..deduplicator import Deduplicator
from ..limiter import Limiter
from ..logging import ProgressBar, log from ..logging import ProgressBar, log
from ..output_dir import FileSink, FileSinkToken, OnConflict, OutputDirectory, OutputDirError, Redownload from ..output_dir import FileSink, FileSinkToken, OnConflict, OutputDirectory, OutputDirError, Redownload
from ..report import MarkConflictError, MarkDuplicateError, Report from ..report import MarkConflictError, MarkDuplicateError, Report
@ -98,10 +97,9 @@ def anoncritical(f: AWrapped) -> AWrapped:
class CrawlToken(ReusableAsyncContextManager[ProgressBar]): class CrawlToken(ReusableAsyncContextManager[ProgressBar]):
def __init__(self, limiter: Limiter, path: PurePath): def __init__(self, path: PurePath):
super().__init__() super().__init__()
self._limiter = limiter
self._path = path self._path = path
@property @property
@ -110,17 +108,15 @@ class CrawlToken(ReusableAsyncContextManager[ProgressBar]):
async def _on_aenter(self) -> ProgressBar: async def _on_aenter(self) -> ProgressBar:
self._stack.callback(lambda: log.status("[bold cyan]", "Crawled", fmt_path(self._path))) self._stack.callback(lambda: log.status("[bold cyan]", "Crawled", fmt_path(self._path)))
await self._stack.enter_async_context(self._limiter.limit_crawl())
bar = self._stack.enter_context(log.crawl_bar("[bold bright_cyan]", "Crawling", fmt_path(self._path))) bar = self._stack.enter_context(log.crawl_bar("[bold bright_cyan]", "Crawling", fmt_path(self._path)))
return bar return bar
class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]): class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]):
def __init__(self, limiter: Limiter, fs_token: FileSinkToken, path: PurePath): def __init__(self, fs_token: FileSinkToken, path: PurePath):
super().__init__() super().__init__()
self._limiter = limiter
self._fs_token = fs_token self._fs_token = fs_token
self._path = path self._path = path
@ -129,7 +125,6 @@ class DownloadToken(ReusableAsyncContextManager[Tuple[ProgressBar, FileSink]]):
return self._path return self._path
async def _on_aenter(self) -> Tuple[ProgressBar, FileSink]: async def _on_aenter(self) -> Tuple[ProgressBar, FileSink]:
await self._stack.enter_async_context(self._limiter.limit_download())
sink = await self._stack.enter_async_context(self._fs_token) sink = await self._stack.enter_async_context(self._fs_token)
# The "Downloaded ..." message is printed in the output dir, not here # The "Downloaded ..." message is printed in the output dir, not here
bar = self._stack.enter_context(log.download_bar("[bold bright_cyan]", "Downloading", bar = self._stack.enter_context(log.download_bar("[bold bright_cyan]", "Downloading",
@ -235,12 +230,6 @@ class Crawler(ABC):
self.name = name self.name = name
self.error_free = True self.error_free = True
self._limiter = Limiter(
task_limit=section.tasks(),
download_limit=section.downloads(),
task_delay=section.task_delay(),
)
self._deduplicator = Deduplicator(section.windows_paths()) self._deduplicator = Deduplicator(section.windows_paths())
self._transformer = Transformer(section.transform()) self._transformer = Transformer(section.transform())
@ -288,7 +277,7 @@ class Crawler(ABC):
return None return None
log.explain("Answer: Yes") log.explain("Answer: Yes")
return CrawlToken(self._limiter, path) return CrawlToken(path)
async def download( async def download(
self, self,
@ -313,7 +302,7 @@ class Crawler(ABC):
return None return None
log.explain("Answer: Yes") log.explain("Answer: Yes")
return DownloadToken(self._limiter, fs_token, path) return DownloadToken(fs_token, path)
async def _cleanup(self) -> None: async def _cleanup(self) -> None:
log.explain_topic("Decision: Clean up files") log.explain_topic("Decision: Clean up files")

View File

@ -1,12 +1,9 @@
import asyncio import asyncio
import http.cookies from http.cookiejar import LWPCookieJar
import ssl
from pathlib import Path, PurePath from pathlib import Path, PurePath
from typing import Any, Dict, List, Optional from typing import Dict, List, Optional
import aiohttp import requests
import certifi
from aiohttp.client import ClientTimeout
from ..auth import Authenticator from ..auth import Authenticator
from ..config import Config from ..config import Config
@ -35,9 +32,9 @@ class HttpCrawler(Crawler):
self._authentication_id = 0 self._authentication_id = 0
self._authentication_lock = asyncio.Lock() self._authentication_lock = asyncio.Lock()
self._request_count = 0 self._http_timeout = section.http_timeout() # TODO Use or remove
self._http_timeout = section.http_timeout()
self._cookie_jar = LWPCookieJar()
self._cookie_jar_path = self._output_dir.resolve(self.COOKIE_FILE) self._cookie_jar_path = self._output_dir.resolve(self.COOKIE_FILE)
self._shared_cookie_jar_paths: Optional[List[Path]] = None self._shared_cookie_jar_paths: Optional[List[Path]] = None
self._shared_auth = shared_auth self._shared_auth = shared_auth
@ -57,7 +54,6 @@ class HttpCrawler(Crawler):
# This should reduce the amount of requests we make: If an authentication is in progress # This should reduce the amount of requests we make: If an authentication is in progress
# all future requests wait for authentication to complete. # all future requests wait for authentication to complete.
async with self._authentication_lock: async with self._authentication_lock:
self._request_count += 1
return self._authentication_id return self._authentication_id
async def authenticate(self, caller_auth_id: int) -> None: async def authenticate(self, caller_auth_id: int) -> None:
@ -106,32 +102,13 @@ class HttpCrawler(Crawler):
self._shared_cookie_jar_paths.append(self._cookie_jar_path) self._shared_cookie_jar_paths.append(self._cookie_jar_path)
def _load_cookies_from_file(self, path: Path) -> None:
jar: Any = http.cookies.SimpleCookie()
with open(path, encoding="utf-8") as f:
for i, line in enumerate(f):
# Names of headers are case insensitive
if line[:11].lower() == "set-cookie:":
jar.load(line[11:])
else:
log.explain(f"Line {i} doesn't start with 'Set-Cookie:', ignoring it")
self._cookie_jar.update_cookies(jar)
def _save_cookies_to_file(self, path: Path) -> None:
jar: Any = http.cookies.SimpleCookie()
for morsel in self._cookie_jar:
jar[morsel.key] = morsel
with open(path, "w", encoding="utf-8") as f:
f.write(jar.output(sep="\n"))
f.write("\n") # A trailing newline is just common courtesy
def _load_cookies(self) -> None: def _load_cookies(self) -> None:
log.explain_topic("Loading cookies") log.explain_topic("Loading cookies")
cookie_jar_path: Optional[Path] = None cookie_jar_path: Optional[Path] = None
if self._shared_cookie_jar_paths is None: if self._shared_cookie_jar_paths is None:
log.explain("Not sharing any cookies") log.explain("Not sharing cookies")
cookie_jar_path = self._cookie_jar_path cookie_jar_path = self._cookie_jar_path
else: else:
log.explain("Sharing cookies") log.explain("Sharing cookies")
@ -154,46 +131,38 @@ class HttpCrawler(Crawler):
log.explain(f"Loading cookies from {fmt_real_path(cookie_jar_path)}") log.explain(f"Loading cookies from {fmt_real_path(cookie_jar_path)}")
try: try:
self._load_cookies_from_file(cookie_jar_path) self._cookie_jar.load(filename=str(cookie_jar_path))
except Exception as e: except Exception as e:
log.explain("Failed to load cookies") log.explain(f"Failed to load cookies: {e}")
log.explain(str(e)) log.explain("Proceeding without cookies")
def _save_cookies(self) -> None: def _save_cookies(self) -> None:
log.explain_topic("Saving cookies") log.explain_topic("Saving cookies")
try: try:
log.explain(f"Saving cookies to {fmt_real_path(self._cookie_jar_path)}") log.explain(f"Saving cookies to {fmt_real_path(self._cookie_jar_path)}")
self._save_cookies_to_file(self._cookie_jar_path) self._cookie_jar.save(filename=str(self._cookie_jar_path))
except Exception as e: except Exception as e:
log.warn(f"Failed to save cookies to {fmt_real_path(self._cookie_jar_path)}") log.warn(f"Failed to save cookies: {e}")
log.warn(str(e))
async def run(self) -> None: async def run(self) -> None:
self._request_count = 0 self._request_count = 0
self._cookie_jar = aiohttp.CookieJar()
self._load_cookies() self._load_cookies()
async with aiohttp.ClientSession( self.session = requests.Session()
headers={"User-Agent": f"{NAME}/{VERSION}"}, self.session.headers["User-Agent"] = f"{NAME}/{VERSION}"
cookie_jar=self._cookie_jar,
connector=aiohttp.TCPConnector(ssl=ssl.create_default_context(cafile=certifi.where())), # From the request docs: "All requests code should work out of the box
timeout=ClientTimeout( # with externally provided instances of CookieJar, e.g. LWPCookieJar and
# 30 minutes. No download in the history of downloads was longer than 30 minutes. # FileCookieJar."
# This is enough to transfer a 600 MB file over a 3 Mib/s connection. # https://requests.readthedocs.io/en/latest/api/#requests.cookies.RequestsCookieJar
# Allowing an arbitrary value could be annoying for overnight batch jobs self.session.cookies = self._cookie_jar # type: ignore
total=15 * 60,
connect=self._http_timeout, with self.session:
sock_connect=self._http_timeout,
sock_read=self._http_timeout,
)
) as session:
self.session = session
try: try:
await super().run() await super().run()
finally: finally:
del self.session del self.session
log.explain_topic(f"Total amount of HTTP requests: {self._request_count}")
# They are saved in authenticate, but a final save won't hurt # They are saved in authenticate, but a final save won't hurt
self._save_cookies() self._save_cookies()

View File

@ -126,13 +126,6 @@ def _iorepeat(attempts: int, name: str, failure_is_error: bool = False) -> Calla
return decorator return decorator
def _wrap_io_in_warning(name: str) -> Callable[[AWrapped], AWrapped]:
"""
Wraps any I/O exception in a CrawlWarning.
"""
return _iorepeat(1, name)
# Crawler control flow: # Crawler control flow:
# #
# crawl_desktop -+ # crawl_desktop -+
@ -226,114 +219,45 @@ instance's greatest bottleneck.
return return
cl = maybe_cl # Not mypy's fault, but explained here: https://github.com/python/mypy/issues/2608 cl = maybe_cl # Not mypy's fault, but explained here: https://github.com/python/mypy/issues/2608
elements: List[IliasPageElement] = [] def ensure_is_valid_course_id(parent: Optional[IliasPageElement], soup: BeautifulSoup) -> None:
# A list as variable redefinitions are not propagated to outer scopes if parent is None and expected_id is not None:
description: List[BeautifulSoup] = [] perma_link_element: Tag = soup.find(id="current_perma_link")
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
@_iorepeat(3, "crawling url") await self._crawl_ilias_page(url, None, cl, ensure_is_valid_course_id)
async def gather_elements() -> None:
elements.clear()
async with cl:
next_stage_url: Optional[str] = url
current_parent = None
# Duplicated code, but the root page is special - we want to avoid fetching it twice!
while next_stage_url:
soup = await self._get_page(next_stage_url)
if current_parent is None and expected_id is not None:
perma_link_element: Tag = soup.find(id="current_perma_link")
if not perma_link_element or "crs_" not in perma_link_element.get("value"):
raise CrawlError("Invalid course id? Didn't find anything looking like a course")
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
log.explain(f"URL: {next_stage_url}")
page = IliasPage(soup, next_stage_url, current_parent)
if next_element := page.get_next_stage_element():
current_parent = next_element
next_stage_url = next_element.url
else:
next_stage_url = None
elements.extend(page.get_child_elements())
if description_string := page.get_description():
description.append(description_string)
# Fill up our task list with the found elements
await gather_elements()
if description:
await self._download_description(PurePath("."), description[0])
elements.sort(key=lambda e: e.id())
tasks: List[Awaitable[None]] = []
for element in elements:
if handle := await self._handle_ilias_element(PurePath("."), element):
tasks.append(asyncio.create_task(handle))
# And execute them
await self.gather(tasks)
async def _handle_ilias_page(
self,
url: str,
parent: IliasPageElement,
path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]:
maybe_cl = await self.crawl(path)
if not maybe_cl:
return None
return self._crawl_ilias_page(url, parent, maybe_cl)
@anoncritical @anoncritical
async def _crawl_ilias_page( async def _crawl_ilias_page(
self, self,
url: str, url: str,
parent: IliasPageElement, parent: Optional[IliasPageElement],
cl: CrawlToken, cl: CrawlToken,
next_stage_hook: Callable[[Optional[IliasPageElement], BeautifulSoup], None] = lambda a, b: None
) -> None: ) -> None:
elements: List[IliasPageElement] = [] async with cl:
# A list as variable redefinitions are not propagated to outer scopes next_stage_url: Optional[str] = url
description: List[BeautifulSoup] = [] current_parent = parent
@_iorepeat(3, "crawling folder") while next_stage_url:
async def gather_elements() -> None: soup = await self._get_page(next_stage_url)
elements.clear() log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
async with cl: log.explain(f"URL: {next_stage_url}")
next_stage_url: Optional[str] = url
current_parent = parent
while next_stage_url: next_stage_hook(current_parent, soup)
soup = await self._get_page(next_stage_url)
log.explain_topic(f"Parsing HTML page for {fmt_path(cl.path)}")
log.explain(f"URL: {next_stage_url}")
page = IliasPage(soup, next_stage_url, current_parent)
if next_element := page.get_next_stage_element():
current_parent = next_element
next_stage_url = next_element.url
else:
next_stage_url = None
elements.extend(page.get_child_elements()) page = IliasPage(soup, next_stage_url, current_parent)
if description_string := page.get_description(): if next_element := page.get_next_stage_element():
description.append(description_string) current_parent = next_element
next_stage_url = next_element.url
else:
next_stage_url = None
# Fill up our task list with the found elements for element in sorted(page.get_child_elements(), key=lambda e: e.id()):
await gather_elements() await self._handle_ilias_element(cl.path, element)
if description: if description_string := page.get_description():
await self._download_description(cl.path, description[0]) await self._download_description(cl.path, description_string)
elements.sort(key=lambda e: e.id())
tasks: List[Awaitable[None]] = []
for element in elements:
if handle := await self._handle_ilias_element(cl.path, element):
tasks.append(asyncio.create_task(handle))
# And execute them
await self.gather(tasks)
# These decorators only apply *to this method* and *NOT* to the returned # These decorators only apply *to this method* and *NOT* to the returned
# awaitables! # awaitables!
@ -345,7 +269,7 @@ instance's greatest bottleneck.
self, self,
parent_path: PurePath, parent_path: PurePath,
element: IliasPageElement, element: IliasPageElement,
) -> Optional[Coroutine[Any, Any, None]]: ) -> None:
if element.url in self._visited_urls: if element.url in self._visited_urls:
raise CrawlWarning( raise CrawlWarning(
f"Found second path to element {element.name!r} at {element.url!r}. " f"Found second path to element {element.name!r} at {element.url!r}. "
@ -367,7 +291,7 @@ instance's greatest bottleneck.
return None return None
if element.type == IliasElementType.FILE: if element.type == IliasElementType.FILE:
return await self._handle_file(element, element_path) await self._handle_file(element, element_path)
elif element.type == IliasElementType.FORUM: elif element.type == IliasElementType.FORUM:
if not self._forums: if not self._forums:
log.status( log.status(
@ -377,7 +301,7 @@ instance's greatest bottleneck.
"[bright_black](enable with option 'forums')" "[bright_black](enable with option 'forums')"
) )
return None return None
return await self._handle_forum(element, element_path) await self._handle_forum(element, element_path)
elif element.type == IliasElementType.TEST: elif element.type == IliasElementType.TEST:
log.status( log.status(
"[bold bright_black]", "[bold bright_black]",
@ -395,15 +319,18 @@ instance's greatest bottleneck.
) )
return None return None
elif element.type == IliasElementType.LINK: elif element.type == IliasElementType.LINK:
return await self._handle_link(element, element_path) await self._handle_link(element, element_path)
elif element.type == IliasElementType.BOOKING: elif element.type == IliasElementType.BOOKING:
return await self._handle_booking(element, element_path) await self._handle_booking(element, element_path)
elif element.type == IliasElementType.VIDEO: elif element.type == IliasElementType.VIDEO:
return await self._handle_file(element, element_path) await self._handle_file(element, element_path)
elif element.type == IliasElementType.VIDEO_PLAYER: elif element.type == IliasElementType.VIDEO_PLAYER:
return await self._handle_video(element, element_path) await self._handle_video(element, element_path)
elif element.type in _DIRECTORY_PAGES: elif element.type in _DIRECTORY_PAGES:
return await self._handle_ilias_page(element.url, element, element_path) maybe_cl = await self.crawl(element_path)
if not maybe_cl:
return None
await self._crawl_ilias_page(element.url, element, maybe_cl)
else: else:
# This will retry it a few times, failing everytime. It doesn't make any network # This will retry it a few times, failing everytime. It doesn't make any network
# requests, so that's fine. # requests, so that's fine.
@ -413,7 +340,7 @@ instance's greatest bottleneck.
self, self,
element: IliasPageElement, element: IliasPageElement,
element_path: PurePath, element_path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]: ) -> None:
log.explain_topic(f"Decision: Crawl Link {fmt_path(element_path)}") log.explain_topic(f"Decision: Crawl Link {fmt_path(element_path)}")
log.explain(f"Links type is {self._links}") log.explain(f"Links type is {self._links}")
@ -430,7 +357,7 @@ instance's greatest bottleneck.
if not maybe_dl: if not maybe_dl:
return None return None
return self._download_link(element, link_template_maybe, maybe_dl) await self._download_link(element, link_template_maybe, maybe_dl)
@anoncritical @anoncritical
@_iorepeat(3, "resolving link") @_iorepeat(3, "resolving link")
@ -522,7 +449,7 @@ instance's greatest bottleneck.
self, self,
element: IliasPageElement, element: IliasPageElement,
element_path: PurePath, element_path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]: ) -> None:
# Copy old mapping as it is likely still relevant # Copy old mapping as it is likely still relevant
if self.prev_report: if self.prev_report:
self.report.add_custom_value( self.report.add_custom_value(
@ -548,7 +475,7 @@ instance's greatest bottleneck.
return None return None
return self._download_video(element_path, element, maybe_dl) await self._download_video(element_path, element, maybe_dl)
def _previous_contained_videos(self, video_path: PurePath) -> List[PurePath]: def _previous_contained_videos(self, video_path: PurePath) -> List[PurePath]:
if not self.prev_report: if not self.prev_report:
@ -630,11 +557,11 @@ instance's greatest bottleneck.
self, self,
element: IliasPageElement, element: IliasPageElement,
element_path: PurePath, element_path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]: ) -> None:
maybe_dl = await self.download(element_path, mtime=element.mtime) maybe_dl = await self.download(element_path, mtime=element.mtime)
if not maybe_dl: if not maybe_dl:
return None return None
return self._download_file(element, maybe_dl) await self._download_file(element, maybe_dl)
@anoncritical @anoncritical
@_iorepeat(3, "downloading file") @_iorepeat(3, "downloading file")
@ -677,11 +604,11 @@ instance's greatest bottleneck.
self, self,
element: IliasPageElement, element: IliasPageElement,
element_path: PurePath, element_path: PurePath,
) -> Optional[Coroutine[Any, Any, None]]: ) -> None:
maybe_cl = await self.crawl(element_path) maybe_cl = await self.crawl(element_path)
if not maybe_cl: if not maybe_cl:
return None return None
return self._crawl_forum(element, maybe_cl) await self._crawl_forum(element, maybe_cl)
@_iorepeat(3, "crawling forum") @_iorepeat(3, "crawling forum")
@anoncritical @anoncritical

View File

@ -2,7 +2,7 @@ import os
import re import re
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import PurePath from pathlib import PurePath
from typing import Awaitable, List, Optional, Pattern, Set, Tuple, Union from typing import List, Optional, Pattern, Set, Tuple, Union
from urllib.parse import urljoin from urllib.parse import urljoin
from bs4 import BeautifulSoup, Tag from bs4 import BeautifulSoup, Tag
@ -64,42 +64,37 @@ class KitIpdCrawler(HttpCrawler):
self._file_regex = section.link_regex() self._file_regex = section.link_regex()
async def _run(self) -> None: async def _run(self) -> None:
maybe_cl = await self.crawl(PurePath(".")) cl = await self.crawl(PurePath("."))
if not maybe_cl: if not cl:
return return
tasks: List[Awaitable[None]] = [] async with cl:
async with maybe_cl:
for item in await self._fetch_items(): for item in await self._fetch_items():
if isinstance(item, KitIpdFolder): if isinstance(item, KitIpdFolder):
tasks.append(self._crawl_folder(item)) await self._crawl_folder(item)
else: else:
# Orphan files are placed in the root folder # Orphan files are placed in the root folder
tasks.append(self._download_file(PurePath("."), item)) await self._download_file(PurePath("."), item)
await self.gather(tasks)
async def _crawl_folder(self, folder: KitIpdFolder) -> None: async def _crawl_folder(self, folder: KitIpdFolder) -> None:
path = PurePath(folder.name) path = PurePath(folder.name)
if not await self.crawl(path): if not await self.crawl(path):
return return
tasks = [self._download_file(path, file) for file in folder.files] for file in folder.files:
await self._download_file(path, file)
await self.gather(tasks)
async def _download_file(self, parent: PurePath, file: KitIpdFile) -> None: async def _download_file(self, parent: PurePath, file: KitIpdFile) -> None:
element_path = parent / file.name element_path = parent / file.name
maybe_dl = await self.download(element_path) dl = await self.download(element_path)
if not maybe_dl: if not dl:
return return
async with maybe_dl as (bar, sink): async with dl as (bar, sink):
await self._stream_from_url(file.url, sink, bar) await self._stream_from_url(file.url, sink, bar)
async def _fetch_items(self) -> Set[Union[KitIpdFile, KitIpdFolder]]: async def _fetch_items(self) -> Set[Union[KitIpdFile, KitIpdFolder]]:
page, url = await self.get_page() page, url = await self._get_page()
elements: List[Tag] = self._find_file_links(page) elements: List[Tag] = self._find_file_links(page)
items: Set[Union[KitIpdFile, KitIpdFolder]] = set() items: Set[Union[KitIpdFile, KitIpdFolder]] = set()
@ -159,12 +154,12 @@ class KitIpdCrawler(HttpCrawler):
sink.done() sink.done()
async def get_page(self) -> Tuple[BeautifulSoup, str]: async def _get_page(self) -> Tuple[BeautifulSoup, str]:
async with self.session.get(self._url) as request: response = self.session.get(self._url)
# The web page for Algorithmen für Routenplanung contains some
# weird comments that beautifulsoup doesn't parse correctly. This # The web page for Algorithmen für Routenplanung contains some
# hack enables those pages to be crawled, and should hopefully not # weird comments that beautifulsoup doesn't parse correctly. This
# cause issues on other pages. # hack enables those pages to be crawled, and should hopefully not
content = (await request.read()).decode("utf-8") # cause issues on other pages.
content = re.sub(r"<!--.*?-->", "", content) content = re.sub(r"<!--.*?-->", "", response.text)
return soupify(content.encode("utf-8")), str(request.url) return soupify(content.encode("utf-8")), str(request.url)

View File

@ -71,8 +71,6 @@ class LocalCrawler(Crawler):
if not cl: if not cl:
return return
tasks = []
async with cl: async with cl:
await asyncio.sleep(random.uniform( await asyncio.sleep(random.uniform(
0.5 * self._crawl_delay, 0.5 * self._crawl_delay,
@ -81,9 +79,7 @@ class LocalCrawler(Crawler):
for child in path.iterdir(): for child in path.iterdir():
pure_child = cl.path / child.name pure_child = cl.path / child.name
tasks.append(self._crawl_path(child, pure_child)) await self._crawl_path(child, pure_child)
await self.gather(tasks)
async def _crawl_file(self, path: Path, pure: PurePath) -> None: async def _crawl_file(self, path: Path, pure: PurePath) -> None:
stat = path.stat() stat = path.stat()

View File

@ -1,97 +0,0 @@
import asyncio
import time
from contextlib import asynccontextmanager
from dataclasses import dataclass
from typing import AsyncIterator, Optional
@dataclass
class Slot:
active: bool = False
last_left: Optional[float] = None
class Limiter:
def __init__(
self,
task_limit: int,
download_limit: int,
task_delay: float
):
if task_limit <= 0:
raise ValueError("task limit must be at least 1")
if download_limit <= 0:
raise ValueError("download limit must be at least 1")
if download_limit > task_limit:
raise ValueError("download limit can't be greater than task limit")
if task_delay < 0:
raise ValueError("Task delay must not be negative")
self._slots = [Slot() for _ in range(task_limit)]
self._downloads = download_limit
self._delay = task_delay
self._condition = asyncio.Condition()
def _acquire_slot(self) -> Optional[Slot]:
for slot in self._slots:
if not slot.active:
slot.active = True
return slot
return None
async def _wait_for_slot_delay(self, slot: Slot) -> None:
if slot.last_left is not None:
delay = slot.last_left + self._delay - time.time()
if delay > 0:
await asyncio.sleep(delay)
def _release_slot(self, slot: Slot) -> None:
slot.last_left = time.time()
slot.active = False
@asynccontextmanager
async def limit_crawl(self) -> AsyncIterator[None]:
slot: Slot
async with self._condition:
while True:
if found_slot := self._acquire_slot():
slot = found_slot
break
await self._condition.wait()
await self._wait_for_slot_delay(slot)
try:
yield
finally:
async with self._condition:
self._release_slot(slot)
self._condition.notify_all()
@asynccontextmanager
async def limit_download(self) -> AsyncIterator[None]:
slot: Slot
async with self._condition:
while True:
if self._downloads <= 0:
await self._condition.wait()
continue
if found_slot := self._acquire_slot():
slot = found_slot
self._downloads -= 1
break
await self._condition.wait()
await self._wait_for_slot_delay(slot)
try:
yield
finally:
async with self._condition:
self._release_slot(slot)
self._downloads += 1
self._condition.notify_all()

View File

@ -92,17 +92,32 @@ def url_set_query_params(url: str, params: Dict[str, str]) -> str:
def str_path(path: PurePath) -> str: def str_path(path: PurePath) -> str:
"""
Turn a path into a string, in a platform-independent way.
This function always uses "/" as path separator, even on Windows.
"""
if not path.parts: if not path.parts:
return "." return "."
return "/".join(path.parts) return "/".join(path.parts)
def fmt_path(path: PurePath) -> str: def fmt_path(path: PurePath) -> str:
"""
Turn a path into a delimited string.
This is useful if file or directory names contain weird characters like
newlines, leading/trailing whitespace or unprintable characters. This way,
they are escaped and visible to the user.
"""
return repr(str_path(path)) return repr(str_path(path))
def fmt_real_path(path: Path) -> str: def fmt_real_path(path: Path) -> str:
return repr(str(path.absolute())) """
Like fmt_path, but resolves the path before converting it to a string.
"""
return fmt_path(path.absolute())
class ReusableAsyncContextManager(ABC, Generic[T]): class ReusableAsyncContextManager(ABC, Generic[T]):

View File

@ -14,4 +14,4 @@ pip install --editable .
# Installing tools and type hints # Installing tools and type hints
pip install --upgrade mypy flake8 autopep8 isort pyinstaller pip install --upgrade mypy flake8 autopep8 isort pyinstaller
pip install --upgrade types-chardet types-certifi mypy PFERD --install-types --non-interactive

View File

@ -11,6 +11,7 @@ install_requires =
rich>=11.0.0 rich>=11.0.0
keyring>=23.5.0 keyring>=23.5.0
certifi>=2021.10.8 certifi>=2021.10.8
requests>=2.28.1
[options.entry_points] [options.entry_points]
console_scripts = console_scripts =