mirror of
https://github.com/Garmelon/PFERD.git
synced 2023-12-21 10:23:01 +01:00
Add support for TGI website
This commit is contained in:
parent
52852d11a6
commit
458cc1c6d6
@ -3,6 +3,7 @@ import logging
|
||||
from .ffm import *
|
||||
from .ilias import *
|
||||
from .norbert import *
|
||||
from .tgi import *
|
||||
from .ti import *
|
||||
from .utils import *
|
||||
|
||||
@ -11,6 +12,7 @@ __all__ = ["STYLE", "FORMAT", "DATE_FORMAT", "FORMATTER", "enable_logging"]
|
||||
__all__ += ffm.__all__
|
||||
__all__ += ilias.__all__
|
||||
__all__ += norbert.__all__
|
||||
__all__ += tgi.__all__
|
||||
__all__ += ti.__all__
|
||||
__all__ += utils.__all__
|
||||
|
||||
|
75
PFERD/tgi.py
Normal file
75
PFERD/tgi.py
Normal file
@ -0,0 +1,75 @@
|
||||
# TGI Lecture slides
|
||||
|
||||
import logging
|
||||
import pathlib
|
||||
import re
|
||||
import zipfile
|
||||
|
||||
import bs4
|
||||
import requests
|
||||
|
||||
from .organizer import Organizer
|
||||
from .utils import rename, stream_to_path, PrettyLogger
|
||||
|
||||
__all__ = ["TGI"]
|
||||
logger = logging.getLogger(__name__)
|
||||
pretty = PrettyLogger(logger)
|
||||
|
||||
class TGI:
|
||||
CRAWL_URL = "https://i11www.iti.kit.edu/teaching/winter2019/tgi/index"
|
||||
BASE_URL = "https://i11www.iti.kit.edu"
|
||||
LINK_RE = re.compile(r"^/_media/teaching/.*?/(tgi-\d+-\d+-)([^/]*\.pdf)$")
|
||||
|
||||
def __init__(self, base_path):
|
||||
self.base_path = base_path
|
||||
|
||||
self._session = requests.Session()
|
||||
|
||||
def synchronize(self, to_dir, transform=lambda x: x):
|
||||
pretty.starting_synchronizer(to_dir, "TGI")
|
||||
|
||||
sync_path = pathlib.Path(self.base_path, to_dir)
|
||||
orga = Organizer(self.base_path, sync_path)
|
||||
|
||||
orga.clean_temp_dir()
|
||||
|
||||
files = self._crawl()
|
||||
self._download(orga, files, transform)
|
||||
|
||||
orga.clean_sync_dir()
|
||||
orga.clean_temp_dir()
|
||||
|
||||
def _crawl(self):
|
||||
url = self.CRAWL_URL
|
||||
r = self._session.get(url)
|
||||
|
||||
text = r.text
|
||||
soup = bs4.BeautifulSoup(text, "html.parser")
|
||||
|
||||
files = []
|
||||
|
||||
for found in soup.find_all("a", href=self.LINK_RE):
|
||||
url = found["href"]
|
||||
full_url = self.BASE_URL + url
|
||||
|
||||
filename = re.search(self.LINK_RE, url).group(2)
|
||||
path = pathlib.PurePath(filename)
|
||||
|
||||
logger.debug(f"Found file {filename} at {full_url}")
|
||||
files.append((path, full_url))
|
||||
|
||||
return files
|
||||
|
||||
def _download(self, orga, files, transform):
|
||||
for path, url in sorted(files):
|
||||
logger.debug(f"Downloading {path}")
|
||||
|
||||
new_path = transform(path)
|
||||
if new_path is not None:
|
||||
temp_file = orga.temp_file()
|
||||
self._download_file(url, temp_file)
|
||||
orga.add_file(temp_file, new_path)
|
||||
|
||||
def _download_file(self, url, to_path):
|
||||
with self._session.get(url, stream=True) as r:
|
||||
stream_to_path(r, to_path)
|
Loading…
Reference in New Issue
Block a user