Merge pull request #2 from I-Al-Istannen/master

Add support for TGI
This commit is contained in:
Garmelon 2019-10-15 20:01:10 +00:00 committed by GitHub
commit e152bfc4a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 78 additions and 0 deletions

View File

@ -3,6 +3,7 @@ import logging
from .ffm import *
from .ilias import *
from .norbert import *
from .tgi import *
from .ti import *
from .utils import *
@ -11,6 +12,7 @@ __all__ = ["STYLE", "FORMAT", "DATE_FORMAT", "FORMATTER", "enable_logging"]
__all__ += ffm.__all__
__all__ += ilias.__all__
__all__ += norbert.__all__
__all__ += tgi.__all__
__all__ += ti.__all__
__all__ += utils.__all__

76
PFERD/tgi.py Normal file
View File

@ -0,0 +1,76 @@
# TGI Lecture slides
import logging
import pathlib
import re
import zipfile
import bs4
import requests
from .organizer import Organizer
from .utils import rename, stream_to_path, PrettyLogger
__all__ = ["TGI"]
logger = logging.getLogger(__name__)
pretty = PrettyLogger(logger)
class TGI:
CRAWL_URL = "https://i11www.iti.kit.edu/teaching/{year}/tgi/index"
BASE_URL = "https://i11www.iti.kit.edu"
LINK_RE = re.compile(r"^/_media/teaching/.*?/(tgi-\d+-\d+-)([^/]*\.pdf)$")
def __init__(self, base_path, year="winter2019"):
self.base_path = base_path
self._session = requests.Session()
self.year = year
def synchronize(self, to_dir, transform=lambda x: x):
pretty.starting_synchronizer(to_dir, "TGI")
sync_path = pathlib.Path(self.base_path, to_dir)
orga = Organizer(self.base_path, sync_path)
orga.clean_temp_dir()
files = self._crawl()
self._download(orga, files, transform)
orga.clean_sync_dir()
orga.clean_temp_dir()
def _crawl(self):
url = self.CRAWL_URL.replace("{year}", self.year)
r = self._session.get(url)
text = r.text
soup = bs4.BeautifulSoup(text, "html.parser")
files = []
for found in soup.find_all("a", href=self.LINK_RE):
url = found["href"]
full_url = self.BASE_URL + url
filename = re.search(self.LINK_RE, url).group(2)
path = pathlib.PurePath(filename)
logger.debug(f"Found file {filename} at {full_url}")
files.append((path, full_url))
return files
def _download(self, orga, files, transform):
for path, url in sorted(files):
logger.debug(f"Downloading {path}")
new_path = transform(path)
if new_path is not None:
temp_file = orga.temp_file()
self._download_file(url, temp_file)
orga.add_file(temp_file, new_path)
def _download_file(self, url, to_path):
with self._session.get(url, stream=True) as r:
stream_to_path(r, to_path)