diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 9960b056440ff489c09f353e576101663f475763..34aca70fe5e6276d60749bf11cf73d974a1c90eb 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,27 +1,22 @@
 linter:
-  image: debian:buster
+  image: registry.git.fsmpi.rwth-aachen.de/videoaginfra/testenvs/stretch
   stage: test
   script:
-  - apt update
-  - apt install -y python3
-  - python3 -V
   - uname -a
-  - apt install -y sqlite3 locales-all git python3-flask python3-ldap3 python3-requests python3-lxml python3-icalendar python3-requests python3-coverage pylint3
-  - pylint3 --indent-string='\t' --indent-after-paren=1 --max-line-length=160 --disable=missing-docstring,unused-wildcard-import --output-format=text *.py | tee pylint.txt
+  - python3 -V
+  - pylint --version
+  - pylint --rcfile=.pylintrc *.py | tee pylint.txt
   artifacts:
       paths:
       - pylint.txt
 
 unittest: &unittest
-  image: debian:stretch
+  image: registry.git.fsmpi.rwth-aachen.de/videoaginfra/testenvs/stretch
   stage: test
   script:
-  - apt update
-  - apt install -y python3
-  - python3 -V
   - uname -a
-  - apt install -y sqlite3 locales-all git python3-flask python3-ldap3 python3-requests python3-lxml python3-icalendar python3-mysql.connector python3-requests python3-coverage
-  - python3 -m coverage run runTests.py
+  - python3 -V
+  - python3 -m coverage run run_tests.py
   - python3 -m coverage report --include "./*"
   - python3 -m coverage report -m  --include "./*" > report.txt
   - python3 -m coverage html --include "./*"
@@ -36,21 +31,21 @@ unittest: &unittest
 #unittest_buster:
 #  <<: *unittest
 #  image: debian:buster
-
-livetest:
-  image: debian:stretch
-  stage: test
-  script:
-  - apt update
-  - apt install -y python3
-  - python3 -V
-  - uname -a
-  - apt install -y python3-requests
+#
+#livetest:
+#  image: debian:stretch
+#  stage: test
+#  script:
+#  - apt update
+#  - apt install -y python3
+#  - python3 -V
+#  - uname -a
+#  - apt install -y python3-requests
 #    - ./tests/urlcheck_sinf.py
-
-deploy_staging:
-  image: archlinux/base
-  stage: deploy
-  script:
-  - pacman --noconfirm -Sy ansible git
-  
+#
+#deploy_staging:
+#  image: archlinux/base
+#  stage: deploy
+#  script:
+#  - pacman --noconfirm -Sy ansible git
+#
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 0000000000000000000000000000000000000000..836f4d0e9e8555ca2c4b3f7b06d9443f76ea17f7
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,602 @@
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code.
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
+# number of processors available to use.
+jobs=1
+
+# Control the amount of potential inferred values when inferring a single
+# object. This can help the performance when dealing with large functions or
+# complex, nested conditions.
+limit-inference-results=100
+
+# List of plugins (as comma separated values of python module names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages.
+suggestion-mode=yes
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once). You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use "--disable=all --enable=classes
+# --disable=W".
+disable=missing-module-docstring,
+        missing-class-docstring,
+        missing-function-docstring,
+        pointless-string-statement, # docstrings were misdetected
+        print-statement,
+        parameter-unpacking,
+        unpacking-in-except,
+        old-raise-syntax,
+        backtick,
+        long-suffix,
+        old-ne-operator,
+        old-octal-literal,
+        import-star-module-level,
+        non-ascii-bytes-literal,
+        raw-checker-failed,
+        bad-inline-option,
+        locally-disabled,
+        file-ignored,
+        suppressed-message,
+        useless-suppression,
+        deprecated-pragma,
+        use-symbolic-message-instead,
+        unused-wildcard-import,
+        apply-builtin,
+        basestring-builtin,
+        buffer-builtin,
+        cmp-builtin,
+        coerce-builtin,
+        execfile-builtin,
+        file-builtin,
+        long-builtin,
+        raw_input-builtin,
+        reduce-builtin,
+        standarderror-builtin,
+        unicode-builtin,
+        xrange-builtin,
+        coerce-method,
+        delslice-method,
+        getslice-method,
+        setslice-method,
+        no-absolute-import,
+        old-division,
+        dict-iter-method,
+        dict-view-method,
+        next-method-called,
+        metaclass-assignment,
+        indexing-exception,
+        raising-string,
+        reload-builtin,
+        oct-method,
+        hex-method,
+        nonzero-method,
+        cmp-method,
+        input-builtin,
+        round-builtin,
+        intern-builtin,
+        unichr-builtin,
+        map-builtin-not-iterating,
+        zip-builtin-not-iterating,
+        range-builtin-not-iterating,
+        filter-builtin-not-iterating,
+        using-cmp-argument,
+        eq-without-hash,
+        div-method,
+        idiv-method,
+        rdiv-method,
+        exception-message-attribute,
+        invalid-str-codec,
+        sys-max-int,
+        bad-python3-import,
+        deprecated-string-function,
+        deprecated-str-translate-call,
+        deprecated-itertools-function,
+        deprecated-types-field,
+        next-method-defined,
+        dict-items-not-iterating,
+        dict-keys-not-iterating,
+        dict-values-not-iterating,
+        deprecated-operator-function,
+        deprecated-urllib-function,
+        xreadlines-attribute,
+        deprecated-sys-function,
+        exception-escape,
+        comprehension-escape,
+        wildcard-import,
+        bad-continuation, # remove me later
+        redefined-builtin,
+        redefined-outer-name, # remove me later
+        function-redefined,  # remove me later
+        cyclic-import, # remove me later, should be fixed but needs major refactoring
+        no-else-return
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=c-extension-no-member
+
+
+[REPORTS]
+
+# Python expression which should return a score less than or equal to 10. You
+# have access to the variables 'error', 'warning', 'refactor', and 'convention'
+# which contain the number of messages in each category, as well as 'statement'
+# which is the total number of statements analyzed. This score is used by the
+# global evaluation report (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details.
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio). You can also give a reporter class, e.g.
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages.
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+# Complete name of functions that never returns. When checking for
+# inconsistent-return-statements if a never returning function is called then
+# it will be considered as an explicit return statement and no message will be
+# printed.
+never-returning-functions=sys.exit
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# Tells whether to warn about missing members when the owner of the attribute
+# is inferred to be None.
+ignore-none=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis). It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+# List of decorators that change the signature of a decorated function.
+signature-mutators=
+
+
+[BASIC]
+
+# Naming style matching correct argument names.
+argument-naming-style=snake_case
+
+# Regular expression matching correct argument names. Overrides argument-
+# naming-style.
+#argument-rgx=
+
+# Naming style matching correct attribute names.
+attr-naming-style=snake_case
+
+# Regular expression matching correct attribute names. Overrides attr-naming-
+# style.
+#attr-rgx=
+
+# Bad variable names which should always be refused, separated by a comma.
+bad-names=foo,
+          bar,
+          baz,
+          toto,
+          tutu,
+          tata
+
+# Naming style matching correct class attribute names.
+class-attribute-naming-style=any
+
+# Regular expression matching correct class attribute names. Overrides class-
+# attribute-naming-style.
+#class-attribute-rgx=
+
+# Naming style matching correct class names.
+class-naming-style=PascalCase
+
+# Regular expression matching correct class names. Overrides class-naming-
+# style.
+#class-rgx=
+
+# Naming style matching correct constant names.
+const-naming-style=UPPER_CASE
+
+# Regular expression matching correct constant names. Overrides const-naming-
+# style.
+#const-rgx=
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming style matching correct function names.
+function-naming-style=snake_case
+
+# Regular expression matching correct function names. Overrides function-
+# naming-style.
+#function-rgx=
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=i,
+           j,
+           e,
+           k,
+           f,
+           r,
+           ex,
+           Run,
+           _,
+           id,
+           db,
+           ip,
+           app,
+           config,
+           cur
+
+# Include a hint for the correct naming format with invalid-name.
+include-naming-hint=no
+
+# Naming style matching correct inline iteration names.
+inlinevar-naming-style=any
+
+# Regular expression matching correct inline iteration names. Overrides
+# inlinevar-naming-style.
+#inlinevar-rgx=
+
+# Naming style matching correct method names.
+method-naming-style=snake_case
+
+# Regular expression matching correct method names. Overrides method-naming-
+# style.
+#method-rgx=
+
+# Naming style matching correct module names.
+module-naming-style=snake_case
+
+# Regular expression matching correct module names. Overrides module-naming-
+# style.
+#module-rgx=
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+# These decorators are taken in consideration only for invalid-name.
+property-classes=abc.abstractproperty
+
+# Naming style matching correct variable names.
+variable-naming-style=snake_case
+
+# Regular expression matching correct variable names. Overrides variable-
+# naming-style.
+#variable-rgx=
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,
+      XXX,
+      TODO
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid defining new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,
+          _cb
+
+# A regular expression matching the name of dummy variables (i.e. expected to
+# not be used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore.
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
+
+
+[STRING]
+
+# This flag controls whether the implicit-str-concat-in-sequence should
+# generate a warning on implicit string concatenation in sequences defined over
+# several lines.
+check-str-concat-over-line-jumps=no
+
+
+[SPELLING]
+
+# Limits count of emitted suggestions for spelling mistakes.
+max-spelling-suggestions=4
+
+# Spelling dictionary name. Available dictionaries: none. To make it work,
+# install the python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains the private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to the private dictionary (see the
+# --spelling-private-dict-file option) instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=1
+
+# String used as indentation unit. This is usually "    " (4 spaces) or "\t" (1
+# tab).
+indent-string=\t
+
+# Maximum number of characters on a single line.
+max-line-length=160
+
+# Maximum number of lines in a module.
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1  : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,
+               dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[LOGGING]
+
+# Format style used to check logging format string. `old` means using %
+# formatting, `new` is for `{}` formatting,and `fstr` is for f-strings.
+logging-format-style=old
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format.
+logging-modules=logging
+
+
+[IMPORTS]
+
+# List of modules that can be imported at any level, not just the top level
+# one.
+allow-any-import-level=
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma.
+deprecated-modules=optparse,tkinter.tix
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled).
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled).
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled).
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+# Couples of modules and preferred modules, separated by a comma.
+preferred-modules=
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,
+                      __new__,
+                      setUp,
+                      __post_init__
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,
+                  _fields,
+                  _replace,
+                  _source,
+                  _make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=cls
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method.
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in an if statement (see R0916).
+max-bool-expr=5
+
+# Maximum number of branch for function / method body.
+max-branches=12
+
+# Maximum number of locals for function / method body.
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body.
+max-returns=6
+
+# Maximum number of statements in function / method body.
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "BaseException, Exception".
+overgeneral-exceptions=BaseException,
+                       Exception
diff --git a/README.md b/README.md
index 9583ea5313a80a06a2b0aca976370a31997ad8c6..690f2ae056060a9c35db3fafb2c96d98e794562c 100644
--- a/README.md
+++ b/README.md
@@ -15,9 +15,9 @@ Hinweis: diese Variante startet eine lokale Testversion der Website, es sind nic
 Alternativ, insbesondere zum Testen der Zugriffsbeschränkungen: Siehe `nginx.example.conf`.
 
 ### Unittests
-Tests können mittels `./runTests.py` ausgeführt werden.
+Tests können mittels `./run_tests.py` ausgeführt werden.
 
-Coverage Tests können mittels `rm .coverage; python -m coverage run runTests.py; python -m coverage html` ausgeführt werden. Dies erstellt einen Ordner `htmlcov` in dem HTML Output liegt.
+Coverage Tests können mittels `rm .coverage; python -m coverage run run_tests.py; python -m coverage html` ausgeführt werden. Dies erstellt einen Ordner `htmlcov` in dem HTML Output liegt.
 
 ### Zum Mitmachen:
 1. Repo für den eigenen User forken, dafür den "Fork-Button" auf der Website verwenden
diff --git a/chapters.py b/chapters.py
index cf908764c91a71e24b1871dc97d4936223cc12c5..67b1ecb7624dc343813eeab3daf1e2659ec89455 100644
--- a/chapters.py
+++ b/chapters.py
@@ -2,7 +2,7 @@ import json
 from server import *
 
 @job_handler('probe', 'probe-raw')
-def import_xmp_chapters(jobid, jobtype, data, state, status):
+def import_xmp_chapters(jobid, jobtype, data, state, status): #pylint: disable=unused-argument
 	if 'lecture_id' not in data or not data.get('import-chapters', False):
 		return
 	times = set()
@@ -57,7 +57,7 @@ def chapters(lectureid):
 	last = None
 	for chapter in chapters:
 		chapter['start'] = chapter['time']
-		chapter['end'] = last['start'] if last else 9999
+		chapter['end'] = last['start'] if last else 9999 #pylint: disable=unsubscriptable-object
 		last = chapter
 	if 'json' in request.values:
 		return Response(json.dumps([{'time': c['time'], 'text': c['text']} for c in chapters]), mimetype='application/json')
diff --git a/cutprogress.py b/cutprogress.py
index 0c0ba5d43ea7ea53b80bd1de5880975c2807cc42..e08b9b5ac19d466e5af28b5399f42c3b574cca44 100644
--- a/cutprogress.py
+++ b/cutprogress.py
@@ -24,7 +24,7 @@ def cutprogress(user=None):
 			ORDER BY users.realname ASC
 			''', course['id'])
 		if not people:
-			people = [{ 'realname': 'Niemand', 'id': -1 }]
+			people = [{'realname': 'Niemand', 'id': -1}]
 		course['responsible'] = people
 	if user is not None:
 		courses = [
@@ -33,7 +33,7 @@ def cutprogress(user=None):
 		]
 	# Fetch lectures for all courses
 	lectures = []
-	for c in courses:
+	for course in courses:
 		lectures += query('''
 			SELECT
 				lectures.id,
@@ -50,7 +50,7 @@ def cutprogress(user=None):
 				AND NOT lectures.norecording
 			GROUP BY lectures.id
 			ORDER BY lectures.time ASC, lectures.id ASC
-			''', c['id'], datetime.now())
+			''', course['id'], datetime.now())
 	# Generate list of days, figure out when weeks change
 	dates = sorted({row['time'].date() for row in lectures}, reverse=True)
 	is_new_weeks = [
diff --git a/db.py b/db.py
index bb3996fb199be9d34e2f4822104926c2fa88460a..42730d5924b1e91a0f4d4347d736abcc5f83cd30 100644
--- a/db.py
+++ b/db.py
@@ -1,8 +1,9 @@
+import sqlite3
+from flask import g
+
 from server import *
 
 if config['DB_ENGINE'] == 'sqlite':
-	import sqlite3
-
 	# From sqlite3 module, but with error catching
 	def convert_timestamp(val):
 		try:
@@ -19,13 +20,13 @@ if config['DB_ENGINE'] == 'sqlite':
 	sqlite3.register_converter('timestamp', convert_timestamp)
 
 	if config['DB_ENGINE'] == 'sqlite':
-		created = not os.path.exists(config['SQLITE_DB'])
+		DBCREATED = not os.path.exists(config['SQLITE_DB'])
 		db = sqlite3.connect(config['SQLITE_DB'])
 		cur = db.cursor()
 		if config['SQLITE_INIT_SCHEMA']:
 			print('Init db schema')
 			cur.executescript(open(config['DB_SCHEMA']).read())
-		if config['SQLITE_INIT_DATA'] and created:
+		if config['SQLITE_INIT_DATA'] and DBCREATED:
 			print('Init db data')
 			cur.executescript(open(config['DB_DATA']).read())
 		db.commit()
@@ -43,12 +44,11 @@ if config['DB_ENGINE'] == 'sqlite':
 		params = [(p.replace(microsecond=0) if isinstance(p, datetime) else p) for p in params]
 		return operation, params
 
-	def show(operation, host=None):
+	def show(operation, host=None): #pylint: disable=unused-argument
 		return {}
 
 elif config['DB_ENGINE'] == 'mysql':
 	import mysql.connector
-
 	def get_dbcursor():
 		if 'db' not in g or not g.db.is_connected():
 			g.db = mysql.connector.connect(
@@ -77,8 +77,8 @@ elif config['DB_ENGINE'] == 'mysql':
 		rows = []
 		try:
 			rows = cur.fetchall()
-		except mysql.connector.errors.InterfaceError as ie:
-			if ie.msg == 'No result set to fetch from.':
+		except mysql.connector.errors.InterfaceError as e:
+			if e.msg == 'No result set to fetch from.':
 				# no problem, we were just at the end of the result set
 				pass
 			else:
@@ -93,22 +93,23 @@ elif config['DB_ENGINE'] == 'mysql':
 def query(operation, *params, delim="sep", nlfix=True):
 	operation, params = fix_query(operation, params)
 	tries = 0
-	while (tries < 10):
+	retry = True
+	while tries < 10 and retry:
+		retry = False
 		try:
 			cur = get_dbcursor()
 			cur.execute(operation, params)
 		except mysql.connector.errors.InternalError as e:
 			if e.msg == 'Deadlock found when trying to get lock; try restarting transaction':
 				tries += 1
-				continue
+				retry = True
 			else:
 				raise
-		break
 	rows = []
 	try:
 		rows = cur.fetchall()
-	except mysql.connector.errors.InterfaceError as ie:
-		if ie.msg == 'No result set to fetch from.':
+	except mysql.connector.errors.InterfaceError as e:
+		if e.msg == 'No result set to fetch from.':
 			# no problem, we were just at the end of the result set
 			pass
 		else:
@@ -122,7 +123,7 @@ def query(operation, *params, delim="sep", nlfix=True):
 			if name == delim:
 				ptr = res[-1][col] = {}
 				continue
-			if type(col) == str and nlfix:
+			if isinstance(col, str) and nlfix:
 				col = col.replace('\\n', '\n').replace('\\r', '\r')
 			ptr[name] = col
 	return res
@@ -134,13 +135,13 @@ def modify(operation, *params):
 	return cur.lastrowid
 
 @app.teardown_request
-def commit_db(*args):
+def commit_db(*args): #pylint: disable=unused-argument
 	if hasattr(request, 'db'):
 		request.db.close()
 		g.db.commit()
 
 @app.teardown_appcontext
-def close_db(*args):
+def close_db(*args): #pylint: disable=unused-argument
 	if 'db' in g:
 		g.db.close()
 		del g.db
diff --git a/db_schema.sql b/db_schema.sql
index ae064216eb3debe2adc69bfd81f3a518ea130289..bfcdd2cb7fac48530c4566617ad0644472a9cee9 100644
--- a/db_schema.sql
+++ b/db_schema.sql
@@ -155,6 +155,7 @@ CREATE TABLE IF NOT EXISTS `log` (
 CREATE TABLE IF NOT EXISTS `hlslog` (
 	`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
 	`time` datetime NOT NULL,
+	`segment` INTEGER,
 	`source` INTEGER,
 	`lecture` INTEGER,
 	`handle` varchar(32),
diff --git a/edit.py b/edit.py
index c04b7917db3a6d6790c9c95dee51a961a904b26e..e0f8b43f70541c21a1a5a49fe2a57f3204a62a14 100644
--- a/edit.py
+++ b/edit.py
@@ -1,3 +1,5 @@
+import math
+
 from server import *
 
 # field types:
@@ -7,7 +9,8 @@ from server import *
 # 	datetime
 # 	duration
 # 	videotime
-editable_tables = {
+
+editable_tables = { #pylint: disable=invalid-name
 	'courses': {
 		'table': 'courses_data',
 		'idcolumn': 'id',
@@ -29,7 +32,7 @@ editable_tables = {
 			'coursechapters':	{'type': 'boolean', 'description': 'Sollen auf der Kursseite die Kapitelmarker der Videos angezeigt werden?'},
 			'autopublish':	{'type': 'boolean', 'description': 'Sollen encodete Videos automatisch verschoben werden?'},
 			'autovisible':	{'type': 'boolean', 'description': 'Sollen neue Videos automatisch sichtbar sein?'}},
-		'creationtime_fields': ['created_by', 'time_created', 'time_updated'] },
+		'creationtime_fields': ['created_by', 'time_created', 'time_updated']},
 	'lectures': {
 		'table': 'lectures_data',
 		'idcolumn': 'id',
@@ -48,14 +51,14 @@ editable_tables = {
 			'norecording': {'type': 'boolean', 'description:': 'Führt dazu, dass der Termin ausgegraut wird.'},
 			'stream_settings':	{'type': 'text'}
 			},
-		'creationtime_fields': ['course_id', 'time_created', 'time_updated'] },
+		'creationtime_fields': ['course_id', 'time_created', 'time_updated']},
 	'videos': {
 		'table': 'videos_data',
 		'idcolumn': 'id',
 		'editable_fields': {
 			'visible':	{'type': 'boolean', 'description': 'Ein nicht sichtbares Video kann nicht abgerufen werden.'},
 			'deleted':	{'type': 'boolean'}},
-		'creationtime_fields': ['created_by', 'time_created', 'time_updated'] },
+		'creationtime_fields': ['created_by', 'time_created', 'time_updated']},
 	'chapters': {
 		'table': 'chapters',
 		'idcolumn': 'id',
@@ -64,7 +67,7 @@ editable_tables = {
 			'text':		{'type': 'shortstring'},
 			'visible':	{'type': 'boolean'},
 			'deleted':	{'type': 'boolean'}},
-		'creationtime_fields': ['created_by', 'time_created', 'time_updated'] },
+		'creationtime_fields': ['created_by', 'time_created', 'time_updated']},
 	'announcements': {
 		'table': 'announcements',
 		'idcolumn': 'id',
@@ -75,7 +78,7 @@ editable_tables = {
 			'deleted':	{'type': 'boolean'},
 			'time_publish':	{'type': 'datetime'},
 			'time_expire':	{'type': 'datetime'}},
-		'creationtime_fields': ['created_by', 'time_created', 'time_updated'] },
+		'creationtime_fields': ['created_by', 'time_created', 'time_updated']},
 	'featured': {
 		'table': 'featured',
 		'idcolumn': 'id',
@@ -87,8 +90,8 @@ editable_tables = {
 			'deleted':	{'type': 'boolean'},
 			'param':	{'type': 'shortstring'},
 			'param2':	{'type': 'shortstring'},
-			'order':	{'type': 'integer' }},
-		'creationtime_fields': ['created_by', 'time_created', 'time_updated', 'type'] },
+			'order':	{'type': 'integer'}},
+		'creationtime_fields': ['created_by', 'time_created', 'time_updated', 'type']},
 	'perm': {
 		'table': 'perm',
 		'idcolumn': 'id',
@@ -97,13 +100,13 @@ editable_tables = {
 			'param1':	{'type': 'shortstring'},
 			'param2':	{'type': 'shortstring'},
 			'deleted':	{'type': 'boolean'}},
-		'creationtime_fields': ['course_id', 'lecture_id', 'video_id', 'created_by', 'time_created', 'time_updated'] },
+		'creationtime_fields': ['course_id', 'lecture_id', 'video_id', 'created_by', 'time_created', 'time_updated']},
 	'sorterrorlog': {
 		'table': 'sorterrorlog_data',
 		'idcolumn': 'id',
 		'editable_fields': {
 			'deleted':	{'type': 'boolean'}},
-		'creationtime_fields': ['time_created', 'time_updated'] },
+		'creationtime_fields': ['time_created', 'time_updated']},
 	'users': {
 		'table': 'users',
 		'idcolumn': 'id',
@@ -113,7 +116,7 @@ editable_tables = {
 			'notify_new_video': {'type': 'boolean'},
 			'notify_edit': {'type': 'boolean'}
 		},
-		'creationtime_fields': [] },
+		'creationtime_fields': []},
 	'live_sources': {
 		'table': 'live_sources',
 		'idcolumn': 'id',
@@ -122,7 +125,7 @@ editable_tables = {
 			'description': {'type': 'text'},
 			'deleted':  {'type': 'boolean'}
 		},
-		'creationtime_fields': ['created_by', 'time_created', 'time_updated'] }
+		'creationtime_fields': ['created_by', 'time_created', 'time_updated']}
 	}
 
 #parses the path to a dict, containing the table, id, field and field type
@@ -135,25 +138,31 @@ def parseeditpath(path):
 	return {'table': table, 'id': id, 'column': column, 'type': type, 'tableinfo': editable_tables[table]}
 
 @app.template_filter(name='getfielddescription')
-def getfielddescription(path):
-	p = parseeditpath(path)
-	desc = p['tableinfo']['editable_fields'][p['column']].get('description', '')
+def getfielddescription(inputpath):
+	path = parseeditpath(inputpath)
+	desc = path['tableinfo']['editable_fields'][path['column']].get('description', '')
 	if desc != '':
 		desc = '<br>'+desc
 	return desc
 
 @app.template_filter(name='getfieldchangelog')
-def getfieldchangelog(path):
-	p = parseeditpath(path)
-	changelog = query('SELECT * FROM changelog LEFT JOIN users ON (changelog.who = users.id) WHERE `table` = ? AND `id_value` = ? and `field` = ? ORDER BY `when` DESC LIMIT 5', p['table'], p['id'], p['column'])
+def getfieldchangelog(inputpath):
+	path = parseeditpath(inputpath)
+	changelog = query('SELECT * FROM changelog \
+			LEFT JOIN users ON (changelog.who = users.id) WHERE `table` = ? AND `id_value` = ? and `field` = ? \
+			ORDER BY `when` DESC LIMIT 5', path['table'], path['id'], path['column'])
 	for entry in changelog:
+		entry['id_value'] = str(entry['id_value'])
+		entry['value_new'] = str(entry['value_new'])
 		entry['path'] = '.'.join([entry['table'], entry['id_value'], entry['field']])
 	return changelog
 
 @app.route('/internal/edit', methods=['GET', 'POST'])
 @mod_required
 @csrf_protect
-def edit(prefix='', ignore=[]):
+def edit(prefix='', ignore=None):
+	if not ignore:
+		ignore = []
 	# All editable tables are expected to have a 'time_updated' field
 	ignore.append('ref')
 	ignore.append('prefix')
@@ -168,10 +177,24 @@ def edit(prefix='', ignore=[]):
 			continue
 		key = prefix+key
 		path = parseeditpath(key)
-		modify('INSERT INTO changelog (`table`,id_value, id_key, field, value_new, value_old, `when`, who, executed) \
-			VALUES (?,?,?,?,?,(SELECT `%s` FROM %s WHERE %s = ?),?,?,1)'%(path['column'], path['tableinfo']['table'], path['tableinfo']['idcolumn']),
-				path['table'], path['id'], path['tableinfo']['idcolumn'], path['column'], val, path['id'], datetime.now(), session['user']['dbid'])
-		modify('UPDATE %s SET `%s` = ?, time_updated = ? WHERE `%s` = ?'%(path['tableinfo']['table'], path['column'], path['tableinfo']['idcolumn']), val, datetime.now(),path['id'])
+		modify('INSERT INTO changelog \
+				(`table`,id_value, id_key, field, value_new, value_old, `when`, who, executed) \
+				VALUES (?,?,?,?,?, \
+				(SELECT `%s` FROM %s WHERE %s = ?),?,?,1)'%(
+					path['column'],
+					path['tableinfo']['table'],
+					path['tableinfo']['idcolumn']
+					),
+				path['table'],
+				path['id'],
+				path['tableinfo']['idcolumn'],
+				path['column'],
+				val,
+				path['id'],
+				datetime.now(),
+				session['user']['dbid'])
+		modify('UPDATE %s SET `%s` = ?, time_updated = ? WHERE `%s` = ?'%(path['tableinfo']['table'], path['column'], path['tableinfo']['idcolumn']),
+				val, datetime.now(), path['id'])
 		for func in edit_handlers.get(path['table'], {}).get(None, []):
 			func(path['table'], path['column'], val, path['id'], session['user']['dbid'])
 		for func in edit_handlers.get(path['table'], {}).get(path['column'], []):
@@ -196,7 +219,7 @@ def create(table):
 	if (request.method == 'POST') and (request.get_json()):
 		args = request.get_json().items()
 	for column, val in args:
-		if (column == 'ref') or (column == '_csrf_token'):
+		if column in ['ref', '_csrf_token']:
 			continue
 		assert column in list(editable_tables[table]['editable_fields'].keys())+editable_tables[table]['creationtime_fields']
 		assert column not in defaults
@@ -214,14 +237,8 @@ def create(table):
 @register_navbar('Changelog', icon='book', group='weitere')
 @mod_required
 def changelog():
-	if 'page' in request.args:
-		page = max(0, int(request.args['page']))
-	else:
-		page = 0
-	if 'pagesize' in request.args:
-		pagesize = min(500, int(request.args['pagesize']))
-	else:
-		pagesize = 50
+	page = max(0, int(request.args.get('page', 0)))
+	pagesize = min(500, int(request.args.get('pagesize', 50)))
 	changelog = query('SELECT * FROM changelog LEFT JOIN users ON (changelog.who = users.id) ORDER BY `when` DESC LIMIT ? OFFSET ?', pagesize, page*pagesize)
 	pagecount = math.ceil(query('SELECT count(id) as count FROM changelog')[0]['count']/pagesize)
 	for entry in changelog:
@@ -234,12 +251,12 @@ def changelog():
 @csrf_protect
 def set_responsible(course_id, user_id, value):
 	if value:
-		modify('REPLACE INTO responsible (course_id, user_id) values (?, ?)', course_id, user_id);
+		modify('REPLACE INTO responsible (course_id, user_id) values (?, ?)', course_id, user_id)
 	else:
-		modify('DELETE FROM responsible WHERE course_id = ? AND user_id = ?', course_id, user_id);
+		modify('DELETE FROM responsible WHERE course_id = ? AND user_id = ?', course_id, user_id)
 	return "OK", 200
-	
-edit_handlers = {}
+
+edit_handlers = {} #pylint: disable=invalid-name
 def edit_handler(*tables, field=None):
 	def wrapper(func):
 		for table in tables:
diff --git a/encoding.py b/encoding.py
index d48d827c9891d06f2f9104ede3781d245e71817b..a4dc849a2812ea43a748de9a84d29a6fcff7222e 100644
--- a/encoding.py
+++ b/encoding.py
@@ -1,8 +1,10 @@
-from server import *
-from sorter import insert_video
 import os.path
 import json
 
+from server import *
+from sorter import insert_video
+from edit import edit_handler
+
 def set_metadata(dest, course, lecture):
 	chapters = query('SELECT text, time FROM chapters WHERE lecture_id = ? AND visible ORDER BY time', lecture['id'])
 	metadata = {'title': lecture['title'], 'album': course['title'],
@@ -12,12 +14,13 @@ def set_metadata(dest, course, lecture):
 	dest['metadata'] = metadata
 	dest['chapters'] = chapters
 
-def schedule_intro(lectureid):
-	lecture = query('SELECT * FROM lectures where id = ?', lectureid)
-	course = query('SELECT * FROM course where id = ?', lecture['course_id'])
-	data = {'path': path, 'lecture_id': lectureid}
-	set_metadata(data, course, lecture)
-	schedule_job('intro', data)
+# Incomplete and not enabled currently
+#def schedule_intro(lectureid):
+#	lecture = query('SELECT * FROM lectures where id = ?', lectureid)
+#	course = query('SELECT * FROM course where id = ?', lecture['course_id'])
+#	data = {'path': path, 'lecture_id': lectureid}
+#	set_metadata(data, course, lecture)
+#	schedule_job('intro', data)
 
 def schedule_remux(lectureid, videoid=None):
 	lecture = query('SELECT * FROM lectures WHERE id = ?', lectureid)[0]
@@ -55,8 +58,8 @@ def add_remux_job():
 def schedule_transcode(source, fmt_id=None, video=None):
 	if video:
 		fmt_id = video['video_format']
-		assert(video['lecture_id'] == source['lecture_id'])
-	assert(fmt_id != None)
+		assert video['lecture_id'] == source['lecture_id']
+	assert fmt_id is not None
 	fmt = query('SELECT * FROM formats WHERE id = ?', fmt_id)[0]
 	lecture = query('SELECT * FROM lectures WHERE id = ?', source['lecture_id'])[0]
 	course = query('SELECT * FROM courses WHERE id = ?', lecture['course_id'])[0]
@@ -67,7 +70,7 @@ def schedule_transcode(source, fmt_id=None, video=None):
 		stream = {'name': 'audio', 'type': 'audio'}
 		data['input']['streams'].append(stream)
 	else:
-		assert(False)
+		assert False
 	set_metadata(data['output'], course, lecture)
 	basename = os.path.basename(source['path']).rsplit('.', 1)[0]
 	data['output']['path'] = 'pub/'+course['handle']+'/'+basename+fmt['suffix']
@@ -84,12 +87,19 @@ def schedule_transcode(source, fmt_id=None, video=None):
 	return schedule_job('transcode', data, queue="background")
 
 @job_handler('transcode')
-def insert_transcoded_video(jobid, jobtype, data, state, status):
+def insert_transcoded_video(jobid, jobtype, data, state, status): #pylint: disable=unused-argument
 	if 'lecture_id' not in data or 'source_id' not in data or 'format_id' not in data:
 		return
 	if 'video_id' in data:
 		return
-	video_id = insert_video(data['lecture_id'], data['output']['path'], data['format_id'], status['hash'], status['filesize'], status['duration'], data['source_id'])
+	video_id = insert_video(
+			data['lecture_id'],
+			data['output']['path'],
+			data['format_id'],
+			status['hash'],
+			status['filesize'],
+			status['duration'],
+			data['source_id'])
 	schedule_remux(data['lecture_id'], video_id)
 
 @app.route('/internal/jobs/add/reencode', methods=['GET', 'POST'])
@@ -106,7 +116,7 @@ def add_reencode_job():
 	return redirect(request.values.get('ref', url_for('jobs_overview')))
 
 @job_handler('probe-raw', 'intro')
-def update_lecture_videos(jobid, jobtype, data, state, status):
+def update_lecture_videos(jobid, jobtype, data, state, status): #pylint: disable=unused-argument
 	if 'lecture_id' not in data:
 		return
 	if jobtype == 'probe-raw':
@@ -117,26 +127,25 @@ def update_lecture_videos(jobid, jobtype, data, state, status):
 	if not sources:
 		return
 	latest = sources[-1]
-	lecture = query('SELECT * FROM lectures where id = ?', data['lecture_id'])
-	if False and jobtype == 'probe-raw':
-		schedule_intro(data['lecture_id'])
-	else:
-		videos = query('SELECT * FROM videos WHERE videos.lecture_id = ?', data['lecture_id'])
-		current_fmts = [v['video_format'] for v in videos]
-		formats = query('''SELECT formats.* FROM formats
-				JOIN profiles ON formats.id = profiles.format
-				JOIN courses ON profiles.name = courses.profile
-				JOIN lectures ON courses.id = lectures.course_id
-				WHERE lectures.id = ?''', data['lecture_id'])
-		for fmt in formats:
-			if fmt['id'] not in current_fmts:
-				schedule_transcode(latest, fmt_id=fmt['id'])
-		for video in videos:
-			if video['source'] != latest['id']:
-				schedule_transcode(latest, video=video)
+	# Incomplete and not enabled currently
+	#if False and jobtype == 'probe-raw':
+	#	schedule_intro(data['lecture_id'])
+	videos = query('SELECT * FROM videos WHERE videos.lecture_id = ?', data['lecture_id'])
+	current_fmts = [v['video_format'] for v in videos]
+	formats = query('''SELECT formats.* FROM formats
+			JOIN profiles ON formats.id = profiles.format
+			JOIN courses ON profiles.name = courses.profile
+			JOIN lectures ON courses.id = lectures.course_id
+			WHERE lectures.id = ?''', data['lecture_id'])
+	for fmt in formats:
+		if fmt['id'] not in current_fmts:
+			schedule_transcode(latest, fmt_id=fmt['id'])
+	for video in videos:
+		if video['source'] != latest['id']:
+			schedule_transcode(latest, video=video)
 
 @edit_handler('chapters')
-def chapter_changed(table, column, value, id, user):
+def chapter_changed(table, column, value, id, user): #pylint: disable=unused-argument
 	chapters = query('SELECT * FROM chapters WHERE id = ?', id)
 	if not chapters:
 		return
@@ -145,7 +154,7 @@ def chapter_changed(table, column, value, id, user):
 		schedule_remux(chapter['lecture_id'])
 
 @edit_handler('courses')
-def course_changed(table, column, value, id, user):
+def course_changed(table, column, value, id, user): #pylint: disable=unused-argument
 	if column not in ['title', 'organizer']:
 		return
 	lectures = query('SELECT * FROM lectures WHERE course_id = ?', id)
@@ -153,7 +162,6 @@ def course_changed(table, column, value, id, user):
 		schedule_remux(lecture['id'])
 
 @edit_handler('lectures')
-def lecture_changed(table, column, value, id, user):
+def lecture_changed(table, column, value, id, user): #pylint: disable=unused-argument
 	if column in ['title', 'comment', 'time', 'speaker']:
 		schedule_remux(id)
-
diff --git a/feeds.py b/feeds.py
index 701a813a358d1d051a7c228f10eff36d905ff8f6..ba8c2dfd79d38060c6b08274af868e20ebbb229f 100644
--- a/feeds.py
+++ b/feeds.py
@@ -1,24 +1,25 @@
+import hashlib
+from datetime import MINYEAR
+
 from server import *
 
-def gen_atomid(s):
-	return 'urn:md5:'+hashlib.md5(s.encode('utf-8')).hexdigest().upper()
+def gen_atomid(value):
+	return 'urn:md5:'+hashlib.md5(value.encode('utf-8')).hexdigest().upper()
 
-def fixdate(d):
-	if not isinstance(d, datetime):
+def fixdate(value):
+	if not isinstance(value, datetime):
 		return datetime(MINYEAR, 1, 1)
-	return d
+	return value
 
 @app.route('/feed')
 @app.route('/<handle>/feed')
 @handle_errors(None, 'Diese Veranstaltung existiert nicht!', 404, IndexError)
 def feed(handle=None):
-	id = None
 	course = {'id': None, 'title': 'Neueste Videos', 'time_created': None, 'time_updated': None}
 	course['atomid'] = gen_atomid('FROM videos SELECT *')
 	if handle:
 		course = query('SELECT * FROM courses WHERE handle = ? AND visible', handle)[0]
 		course['atomid'] = gen_atomid('Video AG, courses['+str(course['id'])+']: '+course['handle'])
-		id = course['id']
 	entries = query('''
 			SELECT lectures.*, "video" AS sep, videos.*, formats.description AS format_description, formats.prio, "course" AS sep, courses.*
 				FROM lectures
@@ -62,10 +63,16 @@ def rss_feed(handle):
 			WHERE courses.id = ? AND videos.video_format = ? AND courses.visible AND lectures.visible AND videos.visible
 			ORDER BY lectures.time DESC
 			LIMIT 100''', course['id'], fmt['id'])
-	chapters = query('SELECT chapters.* FROM chapters JOIN lectures ON lectures.id = chapters.lecture_id WHERE lectures.course_id = ? AND NOT chapters.deleted AND chapters.visible ORDER BY time ASC', course['id'])
+	chapters = query('SELECT chapters.* FROM chapters \
+			JOIN lectures ON lectures.id = chapters.lecture_id \
+			WHERE lectures.course_id = ? AND NOT chapters.deleted AND chapters.visible \
+			ORDER BY time ASC', course['id'])
 	for item in items:
 		item['updated'] = max(item['video']['time_created'], item['video']['time_updated'], item['time_created'], item['time_updated'], key=fixdate)
-	return Response(render_template('feed.rss', course=course, format=fmt, formats=formats, items=items, chapters=chapters), 200, {'Content-Type': 'application/rss+xml; charset=UTF-8'})
+	return Response(
+			render_template('feed.rss', course=course, format=fmt, formats=formats, items=items, chapters=chapters),
+			200,
+			{'Content-Type': 'application/rss+xml; charset=UTF-8'})
 
 @app.route('/courses/feed')
 def courses_feed():
diff --git a/icalexport.py b/icalexport.py
index fa90fdb94d5753e7c64f42f007d57e762f832324..cf50ff70a5941aa8912aba6a0274b836c18c6b6a 100644
--- a/icalexport.py
+++ b/icalexport.py
@@ -1,34 +1,36 @@
-from server import *
+from datetime import timedelta, datetime
+from ipaddress import ip_address, ip_network
 import icalendar
 from werkzeug.datastructures import Headers
-from datetime import timedelta, datetime
+
+from server import *
 
 def export_lectures(lectures, responsible, name):
 	cal = icalendar.Calendar()
 	cal.add('prodid', '-//Video AG//rwth.video//')
 	cal.add('version', '1.0')
-	for l in lectures:
+	for lecture in lectures:
 		resp = []
-		for r in responsible:
-			if r['course_id'] == l['course_id']:
+		for r in responsible: #pylint: disable=invalid-name
+			if r['course_id'] == lecture['course_id']:
 				resp.append(r['realname'])
 		event = icalendar.Event()
-		event.add('summary', l['course']['short']+': '+l['title'])
+		event.add('summary', lecture['course']['short']+': '+lecture['title'])
 		event.add('description', '\n\n'.join([s for s in [
-					l['comment'],
-					l['internal'],
+					lecture['comment'],
+					lecture['internal'],
 					'Zuständig: '+', '.join(resp) if resp else ''
 			] if s]))
-		event.add('uid', '%i@rwth.video'%l['id'])
+		event.add('uid', '%i@rwth.video'%lecture['id'])
 		event.add('dtstamp', datetime.utcnow())
-		event.add('categories', l['course']['short'])
-		event.add('dtstart', l['time'])
-		event.add('location', l['place'])
-		event.add('dtend', l['time'] + timedelta(minutes=l['duration']))
+		event.add('categories', lecture['course']['short'])
+		event.add('dtstart', lecture['time'])
+		event.add('location', lecture['place'])
+		event.add('dtend', lecture['time'] + timedelta(minutes=lecture['duration']))
 		cal.add_component(event)
-	h = Headers()
-	h.add_header("Content-Disposition", "inline", filename=name)
-	return Response(cal.to_ical(), mimetype="text/calendar", headers=h)
+	headers = Headers()
+	headers.add_header("Content-Disposition", "inline", filename=name)
+	return Response(cal.to_ical(), mimetype="text/calendar", headers=headers)
 
 def calperm(func):
 	@wraps(func)
@@ -45,8 +47,7 @@ def calperm(func):
 				permission = True
 		if permission:
 			return func(*args, **kwargs)
-		else:
-			return Response("Login required", 401, {'WWW-Authenticate': 'Basic realm="FS-Login required"'})
+		return Response("Login required", 401, {'WWW-Authenticate': 'Basic realm="FS-Login required"'})
 	return decorator
 
 def get_responsible():
diff --git a/importer.py b/importer.py
index 502226e7d49e3a4f4613f14df79b39904875f6c1..2d65fed5a0ef50efe490894052404cd0498cb43c 100644
--- a/importer.py
+++ b/importer.py
@@ -1,14 +1,14 @@
-from server import *
-
 import urllib.request
 import urllib.parse
 
+from server import *
+
 @app.route('/internal/import/<int:id>', methods=['GET', 'POST'])
 @mod_required
 def list_import_sources(id):
 	courses = query('SELECT * FROM courses WHERE id = ?', id)[0]
 
-	campus={}
+	campus = {}
 	for i in request.values:
 		group, importid, field = i.split('.', 2)
 		if group == 'campus':
@@ -18,19 +18,20 @@ def list_import_sources(id):
 	for i in campus:
 		if i.startswith('new'):
 			if campus[i]['url'] != '':
-				modify('INSERT INTO import_campus (url, type, course_id, last_checked, changed) VALUES (?, ?, ?, ?, 1)',campus[i]['url'],campus[i]['type'],id,datetime.now())
+				modify('INSERT INTO import_campus (url, type, course_id, last_checked, changed) VALUES (?, ?, ?, ?, 1)',
+					campus[i]['url'], campus[i]['type'], id, datetime.now())
 		else:
 			if campus[i]['url'] != '':
-				query('UPDATE import_campus SET url = ?, `type` = ? WHERE (course_id = ?) AND (id = ?)', campus[i]['url'],campus[i]['type'],id,int(i))	
+				query('UPDATE import_campus SET url = ?, `type` = ? WHERE (course_id = ?) AND (id = ?)', campus[i]['url'], campus[i]['type'], id, int(i))
 			else:
-				query('DELETE FROM import_campus WHERE (id = ?) AND (course_id = ?)',int(i),id)
-	import_campus = query('SELECT * FROM import_campus WHERE course_id = ?',id)
+				query('DELETE FROM import_campus WHERE (id = ?) AND (course_id = ?)', int(i), id)
+	import_campus = query('SELECT * FROM import_campus WHERE course_id = ?', id)
 
 	return render_template('import_campus.html', course=courses, import_campus=import_campus, events=[])
 
 def fetch_co_course_events(i):
-	from lxml import html
-	from lxml import etree
+	# pylint: disable=too-many-locals,too-many-branches,too-many-statements,invalid-name,bare-except
+	from lxml import html #  pylint: disable=import-outside-toplevel
 	events = []
 	try:
 		remote_html = urllib.request.urlopen(i['url']).read()
@@ -38,7 +39,6 @@ def fetch_co_course_events(i):
 		flash("Ungültige URL: '"+i['url']+"'")
 	tablexpath = "//td[text()='Termine und Ort']/following::table[1]"
 	basetable = html.fromstring(remote_html).xpath(tablexpath)[0]
-	parsebase = html.tostring(basetable);
 
 	#parse recurring events
 	toparse = [i['url']]
@@ -66,7 +66,7 @@ def fetch_co_course_events(i):
 		elif baserow.xpath("td[6]/a"):
 			rowdata['place'] = baserow.xpath("td[6]/a")[0].text_content()
 		else:
-			rowdata['place'] = baserow.xpath("td[6]/text()")[0].split(' ',1)[0]
+			rowdata['place'] = baserow.xpath("td[6]/text()")[0].split(' ', 1)[0]
 
 		rowdata['start'] = baserow.xpath("td[3]/text()")[0]
 		rowdata['end'] = baserow.xpath("td[5]/text()")[0]
@@ -83,7 +83,7 @@ def fetch_co_course_events(i):
 			elif row.xpath("a"):
 				rowdata['place'] = row.xpath("a")[0].text_content()
 			else:
-				rowdata['place'] = row.xpath("text()[2]")[0].split(' ',1)[0]
+				rowdata['place'] = row.xpath("text()[2]")[0].split(' ', 1)[0]
 
 			rowdata['dates'] = [row.xpath("text()[1]")[0][4:14]]
 			rowdata['start'] = row.xpath("text()[1]")[0][17:22]
@@ -94,12 +94,13 @@ def fetch_co_course_events(i):
 	for j in events_raw:
 		for k in j['dates']:
 			e = {}
-			fmt= "%d.%m.%Y %H:%M"
-			e['time'] = datetime.strptime("%s %s"%(k,j['start']) ,fmt)
-			e['duration'] = int((datetime.strptime("%s %s"%(k,j['end']) ,fmt) - e['time']).seconds/60)
+			fmt = "%d.%m.%Y %H:%M"
+			e['time'] = datetime.strptime("%s %s"%(k, j['start']), fmt)
+			e['duration'] = int((datetime.strptime("%s %s"%(k, j['end']), fmt) - e['time']).seconds/60)
 			j['place'] = str(j['place'])
 			if j['place'] != '':
-				dbplace = query("SELECT name FROM places WHERE (campus_room = ?) OR (campus_name = ?) OR ((NOT campus_name) AND name = ?)",j['place'],j['place'],j['place'])
+				dbplace = query("SELECT name FROM places WHERE (campus_room = ?) OR (campus_name = ?) OR ((NOT campus_name) AND name = ?)",
+					j['place'], j['place'], j['place'])
 				if dbplace:
 					e['place'] = dbplace[0]['name']
 				else:
@@ -117,12 +118,13 @@ def fetch_ro_event_ical(ids):
 	for id in ids:
 		data.append(('pTerminNr', id))
 	data = urllib.parse.urlencode(data).encode('utf-8')
-	r = urllib.request.Request('https://online.rwth-aachen.de/RWTHonline/pl/ui/%24ctx/wbKalender.wbExport',
+	req = urllib.request.Request('https://online.rwth-aachen.de/RWTHonline/pl/ui/%24ctx/wbKalender.wbExport',
 			data=data, method='POST')
-	with urllib.request.urlopen(r) as f:
+	with urllib.request.urlopen(req) as f:
 		return f.read().decode('utf-8')
 
 def fetch_ro_course_ical(id):
+	# pylint: disable=import-outside-toplevel
 	from lxml import html
 	url = 'https://online.rwth-aachen.de/RWTHonline/pl/ui/%24ctx/wbTermin_List.wbLehrveranstaltung?pStpSpNr='+'%i'%(int(id))
 	req = urllib.request.urlopen(url)
@@ -131,6 +133,7 @@ def fetch_ro_course_ical(id):
 	return fetch_ro_event_ical(event_ids)
 
 def fetch_ro_course_events(item):
+	# pylint: disable=import-outside-toplevel
 	import icalendar
 	import pytz
 	localtz = pytz.timezone('Europe/Berlin')
@@ -151,29 +154,30 @@ def fetch_ro_course_events(item):
 			continue
 		if comp.get('STATUS') != 'CONFIRMED':
 			continue
-		e = {}
+		event = {}
 		place = str(comp.get('LOCATION', ''))
 		if place:
 			campus_room = place.split('(')[-1].split(')')[0]
 			dbplace = query('SELECT name FROM places WHERE campus_room = ?', campus_room)
 			if dbplace:
-				e['place'] = dbplace[0]['name']
+				event['place'] = dbplace[0]['name']
 			else:
-				e['place'] = 'Unbekannter Ort ('+place+')'
+				event['place'] = 'Unbekannter Ort ('+place+')'
 		else:
-			e['place'] = ''
-		e['time'] = comp['DTSTART'].dt.astimezone(localtz).replace(tzinfo=None)
-		e['duration'] = int((comp['DTEND'].dt - comp['DTSTART'].dt).seconds/60)
-		e['title'] = item['type']
-		events.append(e)
+			event['place'] = ''
+		event['time'] = comp['DTSTART'].dt.astimezone(localtz).replace(tzinfo=None)
+		event['duration'] = int((comp['DTEND'].dt - comp['DTSTART'].dt).seconds/60)
+		event['title'] = item['type']
+		events.append(event)
 	return events
 
 @app.route('/internal/import/<int:id>/now', methods=['GET', 'POST'])
 @mod_required
 def import_from(id):
+	# pylint: disable=too-many-branches
 	courses = query('SELECT * FROM courses WHERE id = ?', id)[0]
 	lectures = query('SELECT * FROM lectures WHERE course_id = ?', courses['id'])
-	import_campus = query('SELECT * FROM import_campus WHERE course_id = ?',id)
+	import_campus = query('SELECT * FROM import_campus WHERE course_id = ?', id)
 	events = []
 	try:
 		# if u have to port this to anything new, god be with you.
@@ -200,7 +204,7 @@ def import_from(id):
 				break
 		if (not unique) and (not exists):
 			newevents.append(i)
-	
+
 	# deleted events
 	deletedlectures = []
 	for i in lectures:
diff --git a/jobmanagement.py b/jobmanagement.py
index 9e6ccb3354c6a3da90b225081f80af6f308728aa..818fb095fbb0440c47b333b3a184d50a14fe3f09 100644
--- a/jobmanagement.py
+++ b/jobmanagement.py
@@ -1,9 +1,10 @@
-from server import modify, query, date_json_handler, sched_func, notify_admins
 from datetime import datetime, timedelta
 import traceback
 import json
 
-job_handlers = {}
+from server import modify, query, date_json_handler, sched_func, notify_admins
+
+job_handlers = {} #pylint: disable=invalid-name
 def job_handler(*types, state='finished'):
 	def wrapper(func):
 		for jobtype in types:
@@ -21,25 +22,25 @@ def job_handler_handle(id, state):
 	for func in job_handlers.get(type, {}).get(state, []):
 		try:
 			func(id, job['type'], json.loads(job['data']), state, json.loads(job['status']))
-		except Exception:
+		except Exception: #pylint: disable=broad-except
 			notify_admins('scheduler_exception', name=func.__name__, traceback=traceback.format_exc())
 			traceback.print_exc()
 
 @sched_func(10)
 def job_catch_broken():
 	# scheduled but never pinged
-	query('BEGIN')
+	modify("BEGIN")
 	query('UPDATE jobs SET state="ready" WHERE state="scheduled" and time_scheduled < ?', datetime.now() - timedelta(seconds=10))
 	try:
-		query('COMMIT')
-	except:
+		modify("COMMIT")
+	except: #pylint: disable=bare-except
 		pass
 	# no pings since 60s
-	query('BEGIN')
+	modify("BEGIN")
 	query('UPDATE jobs SET state="failed" WHERE state="running" and last_ping < ?', datetime.now() - timedelta(seconds=60))
 	try:
-		query('COMMIT')
-	except:
+		modify("COMMIT")
+	except: #pylint: disable=bare-except
 		pass
 
 def job_set_state(id, state):
diff --git a/jobs.py b/jobs.py
index aea699be2e09ed0328f3e238a976bf9d47a4a6f0..515b3ee02348b6d7a3b674e73797743754882475 100644
--- a/jobs.py
+++ b/jobs.py
@@ -1,21 +1,16 @@
-from server import *
 import json
 import random
+import math
 from time import sleep
 
+from server import *
+
 @app.route('/internal/jobs/overview')
 @register_navbar('Jobs', iconlib='fa', icon='suitcase', group='weitere')
 @mod_required
 def jobs_overview():
-	if 'page' in request.args:
-		page = max(0, int(request.args['page']))
-	else:
-		page = 0
-	if 'pagesize' in request.args:
-		pagesize = min(500, int(request.args['pagesize']))
-	else:
-		pagesize = 50
-
+	page = max(0, int(request.args.get('page', 0)))
+	pagesize = min(500, int(request.args.get('pagesize', 50)))
 	worker = query('SELECT * FROM worker ORDER BY last_ping DESC')
 
 	# get filter options
@@ -23,22 +18,36 @@ def jobs_overview():
 			'type': query('SELECT distinct type FROM jobs'),
 			'state': query('SELECT distinct state FROM jobs'),
 			'worker': query('SELECT distinct worker FROM jobs')}
-	
+
 	# parse filter
 	filter = {
-			'type': request.args.get('type','%'),
-			'state': request.args.get('state','failed'),
-			'worker': request.args.get('worker','%') }
+			'type': request.args.get('type', '%'),
+			'state': request.args.get('state', 'failed'),
+			'worker': request.args.get('worker', '%')}
 
-	pagecount = math.ceil(query('SELECT count(id) as count FROM jobs WHERE (type like ?) AND (worker like ? OR (worker IS NULL AND ? = "%")) AND (state like ?)', filter['type'], filter['worker'], filter['worker'], filter['state'])[0]['count']/pagesize)
-	jobs = query('SELECT * FROM jobs WHERE (type like ?) AND (worker like ? OR (worker IS NULL AND ? = "%")) AND (state like ?) ORDER BY `time_created` DESC LIMIT ? OFFSET ?', filter['type'], filter['worker'], filter['worker'], filter['state'], pagesize, page*pagesize)
-	active_streams = query('SELECT lectures.*, "course" AS sep, courses.*, "job" AS sep, jobs.* FROM lectures JOIN courses ON (courses.id = lectures.course_id) JOIN jobs ON (jobs.id = lectures.stream_job) WHERE lectures.stream_job')
+	pagecount = math.ceil(query('SELECT count(id) as count FROM jobs WHERE (type like ?) AND (worker like ? OR (worker IS NULL AND ? = "%")) AND (state like ?)',
+		filter['type'], filter['worker'], filter['worker'], filter['state'])[0]['count']/pagesize)
+	jobs = query('SELECT * FROM jobs \
+			WHERE (type like ?) AND (worker like ? OR (worker IS NULL AND ? = "%")) AND (state like ?) \
+			ORDER BY `time_created` DESC LIMIT ? OFFSET ?',
+			filter['type'], filter['worker'], filter['worker'], filter['state'], pagesize, page*pagesize)
+	active_streams = query('SELECT lectures.*, "course" AS sep, courses.*, "job" AS sep, jobs.* FROM lectures \
+			JOIN courses ON (courses.id = lectures.course_id) \
+			JOIN jobs ON (jobs.id = lectures.stream_job) WHERE lectures.stream_job')
 	for stream in active_streams:
 		try:
 			stream['destbase'] = json.loads((stream['job']['data'] or '{}')).get('destbase')
-		except:
+		except: #pylint: disable=bare-except
 			pass
-	return render_template('jobs_overview.html',worker=worker,jobs=jobs, filter_values=filter_values, filter=filter, page=page, pagesize=pagesize, pagecount=pagecount, active_streams=active_streams)
+	return render_template('jobs_overview.html',
+			worker=worker,
+			jobs=jobs,
+			filter_values=filter_values,
+			filter=filter,
+			page=page,
+			pagesize=pagesize,
+			pagecount=pagecount,
+			active_streams=active_streams)
 
 @app.route('/internal/jobs/action/<action>', methods=['GET', 'POST'])
 @app.route('/internal/jobs/action/<action>/<jobid>', methods=['GET', 'POST'])
@@ -50,31 +59,17 @@ def jobs_action(action, jobid=None):
 	elif action == 'retry_failed':
 		query('UPDATE jobs SET state = "ready", canceled = 0 WHERE state = "failed" AND (id = ? OR ? IS NULL)', jobid, jobid)
 	elif action == 'copy' and jobid:
-		query("INSERT INTO jobs (type, priority, queue, state, data, time_created) SELECT type, priority, queue, 'ready', data, ? FROM jobs where id = ?", datetime.now(), jobid)
+		query("INSERT INTO jobs (type, priority, queue, state, data, time_created) \
+				SELECT type, priority, queue, 'ready', data, ? FROM jobs where id = ?",
+				datetime.now(), jobid)
 	elif action == 'delete' and jobid:
 		query('UPDATE jobs SET state = "deleted" WHERE id = ?', jobid)
 	elif action == 'cancel' and jobid:
 		cancel_job(jobid)
 	return redirect(request.values.get('ref', url_for('jobs_overview')))
 
-def jobs_api_token_required(func):
-	@wraps(func)
-	def decorator(*args, **kwargs):
-		if 'apikey' in request.values:
-			token = request.values['apikey']
-		elif request.get_json() and ('apikey' in request.get_json()):
-			token = request.get_json()['apikey']
-		else:
-			token = None
-		
-		if not token == config.get('JOBS_API_KEY', [None]):
-			return 'Permission denied', 403
-		else:
-			return func(*args, **kwargs)
-	return decorator
-
 @app.route('/internal/jobs/api/job/<int:id>/ping', methods=['GET', 'POST'])
-@jobs_api_token_required
+@api_token_required('JOBS_API_KEY')
 def jobs_ping(id):
 	hostname = request.values['host']
 	status = json.dumps(json.loads(request.values['status']), default=date_json_handler)
@@ -87,11 +82,10 @@ def jobs_ping(id):
 	job = query('SELECT * FROM jobs WHERE id = ?', id, nlfix=False)[0]
 	if job['canceled']:
 		return 'Job canceled', 205
-	else:
-		return 'OK', 200
+	return 'OK', 200
 
 @app.route('/internal/jobs/api/worker/<hostname>/schedule', methods=['POST'])
-@jobs_api_token_required
+@api_token_required('JOBS_API_KEY')
 def jobs_schedule(hostname):
 	query('REPLACE INTO worker (hostname, last_ping) values (?, ?)', hostname, datetime.now())
 	hostdata = request.get_json()
@@ -99,19 +93,18 @@ def jobs_schedule(hostname):
 		return 'no hostdata sent', 400
 	job = None
 	tries = 0
-	jobtypes = hostdata['jobtypes'] if 'jobtypes' in hostdata else []
-	while (not job):
+	while not job:
 		try:
-			query("BEGIN")
+			modify("BEGIN")
 			for i in query('SELECT * FROM jobs WHERE state = "ready" ORDER BY priority DESC'):
 				if i['type'] in hostdata['jobtypes'] and i['queue'] in hostdata['queues']:
 					job = i
 					break
 			if not job:
 				return 'no jobs', 503
-			modify('UPDATE jobs SET state="scheduled", worker = ?, time_scheduled = ? WHERE id = ?', hostname, datetime.now(), job['id']) 
-			query("COMMIT")
-		except:
+			modify('UPDATE jobs SET state="scheduled", worker = ?, time_scheduled = ? WHERE id = ?', hostname, datetime.now(), job['id'])
+			modify("COMMIT")
+		except: #pylint: disable=bare-except
 			tries += 1
 			job = None
 			sleep(random.random())
@@ -127,4 +120,3 @@ def add_forward_job():
 	schedule_job('live_forward', {'src': request.values['src'],
 			'dest': request.values['dest'], 'format': 'flv'}, priority=9)
 	return redirect(request.values.get('ref', url_for('jobs_overview')))
-
diff --git a/l2pauth.py b/l2pauth.py
index 57d29dded8e29c745d9cf6d8f6e56bd368c19118..a975eb1a296b127d3e4c18552eb0c92a1b9d9692 100644
--- a/l2pauth.py
+++ b/l2pauth.py
@@ -1,6 +1,6 @@
-from server import *
 import requests
-from mail import notify_admins
+
+from server import *
 
 L2P_BASE = 'https://www3.elearning.rwth-aachen.de/_vti_bin/l2pservices/api.svc/v1/'
 OAUTH_BASE = 'https://oauth.campus.rwth-aachen.de/oauth2waitress/oauth2.svc/'
@@ -73,6 +73,5 @@ def finish_oauth():
 				session['moodle_courses'].append(str(course['id']))
 		else:
 			notify_admins('endpoint_exception', traceback="finish_oauth failed while getting moodle courses, data={}".format(str(data)))
-			raise
 	del session['oauthscope']
 	oauthget('token', refresh_token=token['refresh_token'], grant_type='invalidate')
diff --git a/ldap.py b/ldap.py
index 4ea1530ec74ca318c94d8f12625968553475558a..dae4a3f1a5e19d3b6ab2431c70781fb97dd573be 100644
--- a/ldap.py
+++ b/ldap.py
@@ -1,14 +1,16 @@
-from server import *
 import re
 
+from server import *
+
 LDAP_USERRE = re.compile(r'[^a-z0-9]')
 if 'LDAP_HOST' in config:
 	import ldap3
 
-	def ldapauth(user, password):
+	def ldapauth(user, password): # pylint: disable=function-redefined
 		user = LDAP_USERRE.sub(r'', user.lower())
 		try:
-			conn = ldap3.Connection(ldap3.Server(config['LDAP_HOST'], port=config['LDAP_PORT'], use_ssl=True), 'fsmpi\\%s'%user, password, auto_bind=True, check_names=False)
+			server = ldap3.Server(config['LDAP_HOST'], port=config['LDAP_PORT'], use_ssl=True)
+			conn = ldap3.Connection(server, 'fsmpi\\%s'%user, password, auto_bind=True, check_names=False)
 		except (ldap3.core.exceptions.LDAPBindError, ldap3.core.exceptions.LDAPPasswordIsMandatoryError):
 			return {}, []
 		conn.search("cn=users,dc=fsmpi,dc=rwth-aachen,dc=de", "(cn=%s)"%user, attributes=['memberOf', 'givenName', 'sn'])
@@ -18,13 +20,13 @@ if 'LDAP_HOST' in config:
 		return info, groups
 
 else:
-	notldap = {
-		'videoag':('videoag', ['fachschaft','videoag'], {'uid': 'videoag', 'givenName': 'Video', 'sn': 'Geier'}),
-		'gustav':('passwort', ['fachschaft'], {'uid': 'gustav', 'givenName': 'Gustav', 'sn': 'Geier'})
+	NOTLDAP = {
+		'videoag': ('videoag', ['fachschaft', 'videoag'], {'uid': 'videoag', 'givenName': 'Video', 'sn': 'Geier'}),
+		'gustav': ('passwort', ['fachschaft'], {'uid': 'gustav', 'givenName': 'Gustav', 'sn': 'Geier'})
 	}
 
-	def ldapauth(user, password):
+	def ldapauth(user, password): # pylint: disable=function-redefined
 		user = LDAP_USERRE.sub(r'', user.lower())
-		if config.get('DEBUG') and user in notldap and password == notldap[user][0]:
-			return notldap[user][2], notldap[user][1]
+		if config.get('DEBUG') and user in NOTLDAP and password == NOTLDAP[user][0]:
+			return NOTLDAP[user][2], NOTLDAP[user][1]
 		return {}, []
diff --git a/legacy.py b/legacy.py
index 14beb0fd934aa6c812ed38505cb5fdf9d0bd8c6c..e1c82a7a7a8d276d511063ccf6fea6af994c74ac 100644
--- a/legacy.py
+++ b/legacy.py
@@ -3,38 +3,38 @@ from server import *
 def legacy_index():
 	# handle legacy urls...
 	if 'course' in request.args:
-		return redirect(url_for('course', handle=request.args['course']),code=302)
+		return redirect(url_for('course', handle=request.args['course']), code=302)
 	if 'view' in request.args:
-		if (request.args['view'] == 'player') and ('lectureid' in request.args) :
+		if (request.args['view'] == 'player') and ('lectureid' in request.args):
 			courses = query('SELECT courses.handle FROM courses JOIN lectures ON courses.id = lectures.course_id WHERE lectures.id = ?', request.args['lectureid'])
 			if not courses:
 				return "Not found", 404
-			return redirect(url_for('lecture', course=courses[0]['handle'], id=request.args['lectureid']),code=302)
+			return redirect(url_for('lecture', course=courses[0]['handle'], id=request.args['lectureid']), code=302)
 		if request.args['view'] == 'faq':
-			return redirect(url_for('faq'),code=302)
+			return redirect(url_for('faq'), code=302)
 	return None
 
 @app.route('/site/')
 @app.route('/site/<string:phpfile>')
-def legacy(phpfile=None):
-	if phpfile=='embed.php' and ('lecture' in request.args):
+def legacy(phpfile=None): #pylint: disable=too-many-return-statements
+	if phpfile == 'embed.php' and ('lecture' in request.args):
 		courses = query('SELECT courses.handle FROM courses JOIN lectures ON courses.id = lectures.course_id WHERE lectures.id = ?', request.args['lecture'])
 		if not courses:
 			return render_endpoint('index', 'Diese Seite existiert nicht!'), 404
-		return redirect(url_for('embed', course=courses[0]['handle'], id=request.args['lecture']),code=302)
-	if phpfile=='embed.php' and ('vid' in request.args):
+		return redirect(url_for('embed', course=courses[0]['handle'], id=request.args['lecture']), code=302)
+	if phpfile == 'embed.php' and ('vid' in request.args):
 		lectures = query('SELECT lecture_id FROM videos WHERE id = ?', request.args['vid'])
 		if not lectures:
 			return render_endpoint('index', 'Dieses Videos existiert nicht!'), 404
 		courses = query('SELECT courses.handle FROM courses JOIN lectures ON courses.id = lectures.course_id WHERE lectures.id = ?', lectures[0]['lecture_id'])
 		if not courses:
 			return render_endpoint('index', 'Diese Seite existiert nicht!'), 404
-		return redirect(url_for('embed', course=courses[0]['handle'], id=lectures[0]['lecture_id']),code=302)
-	if phpfile=='feed.php' and ('all' in request.args):
-		return redirect(url_for('feed'),code=302)
-	if phpfile=='feed.php' and ('newcourses' in request.args):
-		return redirect(url_for('courses_feed'),code=302)
-	if phpfile=='feed.php':
-		return redirect(url_for('feed', handle=request.args.copy().popitem()[0]),code=302)
-	print("Unknown legacy url:",request.url)
-	return redirect(url_for('index'),code=302)
+		return redirect(url_for('embed', course=courses[0]['handle'], id=lectures[0]['lecture_id']), code=302)
+	if phpfile == 'feed.php' and ('all' in request.args):
+		return redirect(url_for('feed'), code=302)
+	if phpfile == 'feed.php' and ('newcourses' in request.args):
+		return redirect(url_for('courses_feed'), code=302)
+	if phpfile == 'feed.php':
+		return redirect(url_for('feed', handle=request.args.copy().popitem()[0]), code=302)
+	print("Unknown legacy url:", request.url)
+	return redirect(url_for('index'), code=302)
diff --git a/livestreams.py b/livestreams.py
index ed2b76fe6c509b873280f96b4482eedc61d04c37..241b804ea3adf9df9308f189c29fb254b0b8b76a 100644
--- a/livestreams.py
+++ b/livestreams.py
@@ -1,19 +1,23 @@
-from server import *
-import requests
 from xml.etree import ElementTree
 import random
 import string
+from ipaddress import ip_address, ip_network
+import json
+import requests
+
+from server import *
 
 @sched_func(120)
 def livestream_thumbnail():
 	livestreams = query('SELECT streams.lecture_id, streams.handle AS livehandle FROM streams WHERE streams.active')
 	lectures = query('SELECT * FROM lectures WHERE stream_job IS NOT NULL')
-	for v in genlive(livestreams)+genlive_new(lectures):
-		schedule_job('thumbnail', {'src': v['path'], 'filename': 'l_%i.jpg'%v['lecture_id']})
+	for stream in genlive(livestreams)+genlive_new(lectures):
+		schedule_job('thumbnail', {'src': stream['path'], 'filename': 'l_%i.jpg'%stream['lecture_id']})
 
 @app.route('/internal/streaming/legacy_auth', methods=['GET', 'POST'])
 @app.route('/internal/streaming/legacy_auth/<server>', methods=['GET', 'POST'])
 def streamauth_legacy(server=None):
+	# pylint: disable=too-many-branches,bare-except,chained-comparison
 	internal = False
 	if 'X-Real-IP' in request.headers:
 		for net in config.get('FSMPI_IP_RANGES', []):
@@ -26,7 +30,11 @@ def streamauth_legacy(server=None):
 	if request.values['call'] == 'publish':
 		if request.values['pass'] != 'caisoh8aht0wuSu':
 			return 'Forbidden', 403
-		matches = query("SELECT lectures.* FROM lectures JOIN courses ON lectures.course_id = courses.id WHERE courses.handle = ? ORDER BY lectures.time DESC", request.values['name'])
+		matches = query('''SELECT lectures.*
+			FROM lectures
+			JOIN courses ON lectures.course_id = courses.id
+			WHERE courses.handle = ?
+			ORDER BY lectures.time DESC''', request.values['name'])
 		now = datetime.now()
 		match = {'id': -1}
 		for lecture in matches:
@@ -59,22 +67,23 @@ def streamauth_legacy(server=None):
 	return 'OK', 200
 
 @job_handler('simple_live_transcode', state='failed')
-def restart_failed_live_transcode(id, type, data, state, status):
+def restart_failed_live_transcode(id, type, data, state, status): # pylint: disable=unused-argument
 	restart_job(id)
 
 @app.route('/internal/streaming')
 @register_navbar('Streaming', icon='broadcast-tower', iconlib='fa')
 @mod_required
 def streaming():
+	# pylint: disable=invalid-name
 	sources = query('SELECT * FROM live_sources WHERE NOT deleted')
 	for source in sources:
 		if not source['clientid']:
 			continue
-		r = requests.get('http://%s:8080/stats'%source['server'])
-		if r.status_code != 200:
+		req = requests.get('http://%s:8080/stats'%source['server'])
+		if req.status_code != 200:
 			continue
 		source['stat'] = {}
-		tree = ElementTree.fromstring(r.text)
+		tree = ElementTree.fromstring(req.text)
 		if not tree:
 			continue
 		s = tree.find("./server/application/[name='src']/live/stream/[name='%i']"%source['id'])
@@ -98,7 +107,16 @@ def gentoken():
 def streamrekey(id):
 	modify('UPDATE live_sources SET `key` = ? WHERE id = ? AND NOT deleted', gentoken(), id)
 	source = query('SELECT * FROM live_sources WHERE NOT deleted AND id = ?', id)[0]
-	flash('Der Streamkey von <strong>'+source['name']+'</strong> wurde neu generiert: <span><input readonly type="text" style="width: 15em" value="'+source['key']+'"></span><br>Trage diesen Streamkey zusammen mit einem der folgenden Streamingserver in die Streamingsoftware ein:<ul><li>'+config['STREAMING_SERVER']+'</li><li>'+config['BACKUP_STREAMING_SERVER']+'</li></ul>Insgesamt sollte die Streaming-URL z.B. so aussehen: <a href="'+config['STREAMING_SERVER']+source['key']+'">'+config['STREAMING_SERVER']+source['key']+'</a>')
+	flash('''Der Streamkey von <strong>{name}</strong> wurde neu generiert:
+			<span><input readonly type="text" style="width: 15em" value="{key}"></span><br>
+			Trage diesen Streamkey zusammen mit einem der folgenden Streamingserver in die Streamingsoftware ein:
+			<ul>
+				<li>{server}</li>
+				<li>{backup_server}</li>
+			</ul>Insgesamt sollte die Streaming-URL z.B. so aussehen:
+			<a href="{server}{key}">{server}{key}</a>''',
+		name=source['name'], key=source['key'], server=config['STREAMING_SERVER'],
+		backup_server=config['BACKUP_STREAMING_SERVER'])
 	return redirect(url_for('streaming'))
 
 @app.route('/internal/streaming/drop/<int:id>')
@@ -122,36 +140,34 @@ def live_source_thumbnail():
 	for source in sources:
 		schedule_job('thumbnail', {'srcurl': 'rtmp://%s/src/%i'%(source['server'], source['id']), 'filename': 's_%i.jpg'%source['id']})
 
+def ip_in_networks(ip, networks):
+	for net in networks:
+		if ip_address(ip) in ip_network(net):
+			return True
+	return False
+
 @app.route('/internal/streaming/auth/<server>', methods=['GET', 'POST'])
 def streamauth(server):
-	internal = False
-	for net in config.get('FSMPI_IP_RANGES', []):
-		if ip_address(request.headers['X-Real-IP']) in ip_network(net):
-			internal = True
-	if not internal:
+	# pylint: disable=too-many-return-statements
+	if not ip_in_networks(request.headers['X-Real-IP'], config.get('FSMPI_IP_RANGES', [])):
 		return 'Forbidden', 403
+	# Sources publish their streams at rtmp://example.com/src/{key} and are
+	# the redirected to rtmp://example.com/src/{id} to hide the secret stream key
 	if request.values['call'] == 'publish':
 		sources = query('SELECT * FROM live_sources WHERE NOT deleted AND `key` = ?', request.values['name'])
 		if not sources:
 			return 'Not found', 404
-		modify('UPDATE live_sources SET server = ?, server_public = ?, clientid = ?, last_active = ?, preview_key = ? WHERE id = ?', server, request.args.get('public_ip', server), request.values['clientid'], datetime.now(), gentoken(), sources[0]['id'])
+		modify('UPDATE live_sources SET server = ?, server_public = ?, clientid = ?, last_active = ?, preview_key = ? WHERE id = ?',
+			server, request.args.get('public_ip', server), request.values['clientid'],
+			datetime.now(), gentoken(), sources[0]['id'])
 		live_source_thumbnail()
 		ret = Response('Redirect', 301, {'Location': '%i'%sources[0]['id']})
 		ret.autocorrect_location_header = False
 		return ret
-	if request.values['call'] == 'play':
-		source = (query('SELECT * FROM live_sources WHERE NOT deleted AND id = ?', request.values['name']) or [None])[0]
-		if not source:
-			return 'Not found', 404
-		for net in config.get('INTERNAL_IP_RANGES', []):
-			if ip_address(request.values['addr']) in ip_network(net):
-				return 'Ok', 200
-		if source['preview_key'] == request.values.get('preview_key'):
-			return 'Ok', 200
-		return 'Forbidden', 403
 	elif request.values['call'] == 'publish_done':
 		source = (query('SELECT * FROM live_sources WHERE server = ? AND clientid = ?', server, request.values['clientid']) or [None])[0]
-		modify('UPDATE live_sources SET server = NULL, clientid = NULL, preview_key = NULL, last_active = ? WHERE server = ? AND clientid = ?', datetime.now(), server, request.values['clientid'])
+		modify('UPDATE live_sources SET server = NULL, clientid = NULL, preview_key = NULL, last_active = ? WHERE server = ? AND clientid = ?',
+			datetime.now(), server, request.values['clientid'])
 		if not source:
 			return 'Ok', 200
 		for lecture in query('SELECT * FROM lectures WHERE stream_job IS NOT NULL'):
@@ -159,23 +175,64 @@ def streamauth(server):
 			if str(source['id']) in [str(settings.get('source1')), str(settings.get('source2'))]:
 				cancel_job(lecture['stream_job'])
 		return 'Ok', 200
+	elif request.values['call'] == 'play':
+		source = (query('SELECT * FROM live_sources WHERE NOT deleted AND id = ?', request.values['name']) or [None])[0]
+		if not source:
+			return 'Not found', 404
+		if ip_in_networks(request.values['addr'], config.get('INTERNAL_IP_RANGES', [])):
+			return 'Ok', 200
+		if source['preview_key'] == request.values.get('preview_key'):
+			return 'Ok', 200
+		return 'Forbidden', 403
 	return 'Bad request', 400
 
 def schedule_livestream(lecture_id):
-	def build_filter(l):
-		return ','.join(l) if l else None
-	server = 'rwth.video'
+	# pylint: disable=too-many-branches,too-many-statements
 	lecture = query('SELECT * FROM lectures WHERE id = ?', lecture_id)[0]
 	settings = json.loads(lecture['stream_settings'])
-	data = {'src1': {'afilter': [], 'vfilter': []}, 'src2': {'afilter': [], 'vfilter': []}, 'afilter': [], 'videoag_logo': int(bool(settings.get('video_showlogo'))), 'lecture_id': lecture['id']}
+	# Server that receives transcoded streams and generates HLS data, later
+	# (hopefully) overwritten with one of the source's ingestion servers to
+	# reduce the number of servers the stream' stability relies on
+	dest_server = 'rwth.video'
+	# Used by complex_live_transcode.c (ffworker) to open the sources and
+	# construct a ffmpeg filter graph <https://ffmpeg.org/ffmpeg-filters.html>:
+	#
+	# Audio graph
+	# src1 -> {src1.afilter} \
+	#                         amix -> {data.afilter} -> output
+	# src2 -> {src2.afilter} /
+	# Video graph
+	# src1 -> {src1.vfilter} \
+	#                         {vmix} -> scale=1920:1080 -> opt. logo overlay -> output
+	# src2 -> {src2.vfilter} /
+	data = {
+		'src1':
+		{
+			#'url': 'rtmp://...',
+			'afilter': [],
+			'vfilter': [],
+		},
+		'src2': {
+			#'url': 'rtmp://...',
+			'afilter': [],
+			'vfilter': [],
+		},
+		'afilter': [],
+		#'vmix': 'streamselect=map=0',
+		'videoag_logo': int(bool(settings.get('video_showlogo'))),
+		'lecture_id': lecture['id'],
+		#'destbase': 'rtmp://...'
+	}
+	# afilter/vfilter are lists here to simplify the code below and must be
+	# converted to a single filter expression afterwards.
 	src1 = (query('SELECT * FROM live_sources WHERE NOT deleted AND id = ?', settings.get('source1')) or [{}])[0]
 	src2 = (query('SELECT * FROM live_sources WHERE NOT deleted AND id = ?', settings.get('source2')) or [{}])[0]
-	for idx, obj in zip([1,2], [src1, src2]):
-		if obj:
-			server = obj['server']
-			data['src%i'%idx]['url'] = 'rtmp://%s/src/%i'%(obj['server'], obj['id'])
-			if not obj['clientid']:
-				flash('Quelle „%s“ ist nicht aktiv!'%obj['name'])
+	for idx, src in zip([1, 2], [src1, src2]):
+		if src:
+			dest_server = src['server']
+			data['src%i'%idx]['url'] = 'rtmp://%s/src/%i'%(src['server'], src['id'])
+			if not src['clientid']:
+				flash('Quelle „%s“ ist nicht aktiv!'%src['name'])
 				return None
 		if settings.get('source%i_deinterlace'%idx):
 			data['src%i'%idx]['vfilter'].append('yadif')
@@ -191,7 +248,8 @@ def schedule_livestream(lecture_id):
 		elif mode == 'off':
 			data['src%i'%idx]['afilter'].append('pan=mono|c0=0*c0')
 		else:
-			raise(Exception())
+			raise Exception()
+	data['destbase'] = 'rtmp://%s/hls/%i'%(dest_server, lecture['id'])
 	mode = settings.get('videomode')
 	if mode == '1':
 		data['vmix'] = 'streamselect=map=0'
@@ -211,12 +269,15 @@ def schedule_livestream(lecture_id):
 		data['vmix'] = 'hstack,pad=1920:1080:0:270'
 	if settings.get('audio_normalize'):
 		data['afilter'].append('loudnorm')
+	# Filter setup done, now lists of ffmpeg filter expressions must be
+	# converted to single expressions
+	def build_filter(exprs):
+		return ','.join(exprs) if exprs else None
 	data['afilter'] = build_filter(data['afilter'])
 	data['src1']['afilter'] = build_filter(data['src1']['afilter'])
 	data['src1']['vfilter'] = build_filter(data['src1']['vfilter'])
 	data['src2']['afilter'] = build_filter(data['src2']['afilter'])
 	data['src2']['vfilter'] = build_filter(data['src2']['vfilter'])
-	data['destbase'] = 'rtmp://%s/hls/%i'%(server, lecture['id'])
 	if lecture['stream_job']:
 		flash('Stream läuft bereits!')
 		return None
@@ -229,12 +290,12 @@ def schedule_livestream(lecture_id):
 	return job_id
 
 @job_handler('complex_live_transcode', state='failed')
-def restart_failed_complex_live_transcode(id, type, data, state, status):
+def restart_failed_complex_live_transcode(id, type, data, state, status): # pylint: disable=unused-argument
 	restart_job(id)
 
 @job_handler('complex_live_transcode', state='failed')
 @job_handler('complex_live_transcode', state='finished')
-def cleanup_after_complex_live_transcode_ended(id, type, data, state, status):
+def cleanup_after_complex_live_transcode_ended(id, type, data, state, status): # pylint: disable=unused-argument
 	job = query('SELECT * FROM jobs WHERE id = ?', id, nlfix=False)[0]
 	if state == 'finished' or (state == 'failed' and job['canceled']):
 		modify('UPDATE lectures_data SET stream_job = NULL WHERE stream_job = ?', id)
diff --git a/mail.py b/mail.py
index a464002cb83424ea5d6eea94d743b2f102e519fa..136714f6a365a7aa46e75c53ac068eaa9f55a36b 100644
--- a/mail.py
+++ b/mail.py
@@ -1,14 +1,14 @@
-from server import *
-
 from email.message import EmailMessage
 import smtplib
 import traceback
 
+from server import *
+
 def send_message(msgtype, recipients, **kwargs):
 	msg = EmailMessage()
 	msg['From'] = config['MAIL_FROM']
 	msg['To'] = ', '.join([r.replace(',', '') for r in recipients])
-	cc = kwargs.pop('cc', [])
+	cc = kwargs.pop('cc', []) #pylint: disable=invalid-name
 	if cc:
 		msg['Cc'] = ', '.join([r.replace(',', '') for r in cc])
 	try:
@@ -16,13 +16,14 @@ def send_message(msgtype, recipients, **kwargs):
 		msg.set_content(render_template('mails/'+msgtype+'.body', **kwargs))
 		if not config.get('MAIL_SERVER'):
 			return
-		s = smtplib.SMTP(config['MAIL_SERVER'])
+		mailserver = smtplib.SMTP(config['MAIL_SERVER'])
 		if config.get('MAIL_ADDRESS_OVERWRITE'):
-			s.send_message(msg, to_addrs=[config['MAIL_ADDRESS_OVERWRITE']])
+			mailserver.send_message(msg, to_addrs=[config['MAIL_ADDRESS_OVERWRITE']])
 		else:
-			s.send_message(msg)
-		s.quit()
-	except:
+			mailserver.send_message(msg)
+		mailserver.quit()
+	except: #pylint: disable=bare-except
+		# we musst not raise an exception here, else we would send another mail, rinse and repeat
 		traceback.print_exc()
 
 def notify_users(msgtype, uids, **kwargs):
@@ -41,14 +42,14 @@ def notify_users(msgtype, uids, **kwargs):
 						config['MAIL_SUFFIX']))
 		else:
 			recipients.append('%s@%s'%(user[0]['fsacc'], config['MAIL_SUFFIX']))
-	cc = kwargs.get('cc', [])
+	cc = kwargs.get('cc', []) #pylint: disable=invalid-name
 	if kwargs.pop('importend', False):
 		cc.append(config['MAIL_DEFAULT'])
 	if kwargs.pop('notify_admins', False):
 		cc.append(config['MAIL_ADMINS'])
 	if not recipients:
 		recipients = cc
-		cc = []
+		cc = [] #pylint: disable=invalid-name
 	if not recipients:
 		return
 	kwargs['cc'] = cc
@@ -64,7 +65,8 @@ def notify_mods(msgtype, course_id, **kwargs):
 def notify_admins(msgtype, **kwargs):
 	try:
 		send_message(msgtype, [config['MAIL_ADMINS']], **kwargs)
-	except:
+	except: #pylint: disable=bare-except
+		# we musst not raise an exception here, else we would send another mail, rinse and repeat
 		traceback.print_exc()
 
 @app.route('/internal/user/<int:user>/notifications')
@@ -72,4 +74,3 @@ def notify_admins(msgtype, **kwargs):
 @mod_required
 def user_notifications(user):
 	return render_template('notifications.html', user=query('SELECT * FROM users WHERE id = ?', user)[0])
-
diff --git a/meetings.py b/meetings.py
index f5b0b84f21b3210c3426921c221e9d237c0d6141..8d3a155098c3c4c0f9a13e27651898379acab9bf 100644
--- a/meetings.py
+++ b/meetings.py
@@ -1,7 +1,8 @@
-from server import *
 import icalendar
 import requests
 
+from server import *
+
 def get_next_meeting():
 	ical = requests.get(config['ICAL_URL']).content
 	events = icalendar.Calendar.from_ical(ical).walk('VEVENT')
@@ -11,21 +12,20 @@ def get_next_meeting():
 	for event in events:
 		try:
 			start = event['DTSTART'].dt.date()
-			if 'VIDEO' != event['SUMMARY'].upper() or start < now or start > now+delta:
+			if event['SUMMARY'].upper() != 'VIDEO' or start < now or start > now+delta:
 				continue
 			meetings.append(event)
-		except:
+		except KeyError:
 			pass
 	if not meetings:
-		return
+		return None, None
 	event = sorted(meetings, key=lambda e: e['DTSTART'].dt)[0]
 	return str(event['UID']), event['DTSTART'].dt.replace(tzinfo=None)
 
 @sched_func(60*60)
 def update_meeting():
-	try:
-		uid, start = get_next_meeting()
-	except:
+	uid, start = get_next_meeting()
+	if uid is None:
 		return
 	text = 'Die nächste Video AG-Sitzung findet am %s ab %s Uhr in den Räumlichkeiten der Fachschaft im Augustinerbach 2a statt.'%(
 			human_date(start), human_time(start))
diff --git a/profiling.py b/profiling.py
index 9b6e1eed7aa3c3cad8af39bae8a60544fa1c9edf..e89c3a175ba3dbaaa79a24e25a5d8ecfc62b4d13 100755
--- a/profiling.py
+++ b/profiling.py
@@ -1,6 +1,9 @@
 #!/usr/bin/python3
-from werkzeug.contrib.profiler import ProfilerMiddleware
+try:
+	from werkzeug.contrib.profiler import ProfilerMiddleware
+except ImportError:
+	from werkzeug.middleware.profiler import ProfilerMiddleware
+
 from server import app
 app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
-app.run(debug = True)
-
+app.run(debug=True)
diff --git a/runTests.py b/run_tests.py
similarity index 77%
rename from runTests.py
rename to run_tests.py
index b265725ac2931b9df833c23587aece770ee5aa0b..92a81ab300e57d97b8de33d9bdc3c62ac4274a73 100755
--- a/runTests.py
+++ b/run_tests.py
@@ -3,17 +3,16 @@ import unittest
 import os
 import server
 
-def setUp():
+def set_up():
 	server.app.testing = True
 
-def tearDown():
+def tear_down():
 	os.unlink(server.app.config['SQLITE_DB'])
 
 if __name__ == '__main__':
-	setUp()
+	set_up()
 	try:
-		suite = unittest.defaultTestLoader.discover('./tests/', pattern="*")
+		suite = unittest.defaultTestLoader.discover('./tests/', pattern="*") #pylint: disable=invalid-name
 		unittest.TextTestRunner(verbosity=2, failfast=True).run(suite)
 	finally:
-		tearDown()
-
+		tear_down()
diff --git a/scheduler.py b/scheduler.py
index 59326d271abe1e1dea3f3e1363c2423f02a037cf..62bce6c88926c8b65e983fe0b1006700c03b610b 100644
--- a/scheduler.py
+++ b/scheduler.py
@@ -1,17 +1,20 @@
-from server import *
 import threading
 import sched
 from time import sleep
 
-scheduler = sched.scheduler()
+from server import *
+
+scheduler = sched.scheduler() # pylint: disable=invalid-name
 def run_scheduler():
 	sleep(1) # UWSGI does weird things on startup
 	while True:
 		scheduler.run()
 		sleep(10)
 
-def sched_func(delay, priority=0, firstdelay=None, args=[], kargs={}):
-	if firstdelay == None:
+def sched_func(delay, priority=0, firstdelay=None, args=None, kargs=None):
+	args = args or []
+	kargs = kargs or {}
+	if firstdelay is None:
 		firstdelay = random.randint(1, 120)
 	def wrapper(func):
 		def sched_wrapper():
@@ -20,7 +23,7 @@ def sched_func(delay, priority=0, firstdelay=None, args=[], kargs={}):
 					if config.get('DEBUG', False):
 						print("Scheduler: started {} (frequency 1/{}s)".format(func.__name__, delay))
 					func(*args, **kargs)
-				except Exception:
+				except Exception: # pylint: disable=broad-except
 					traceback.print_exc()
 					notify_admins('scheduler_exception', name=func.__name__,
 							traceback=traceback.format_exc())
diff --git a/server.py b/server.py
index 6e969ee95f51423e13f3a72c5273be5f54d68902..44be9b9167f5320ecbda3e500646031c74b4e2b0 100644
--- a/server.py
+++ b/server.py
@@ -1,32 +1,27 @@
-from flask import Flask, g, request, url_for, redirect, session, render_template, flash, Response, make_response
-from werkzeug.routing import Rule
 from functools import wraps
-from datetime import date, timedelta, datetime, time, MINYEAR
+from datetime import date, timedelta, datetime, time
 import os
 import sys
-import hashlib
 import random
 import traceback
 import string
-from socket import gethostname
-from ipaddress import ip_address, ip_network
-import math
 import locale
-import base64
-import json
 import urllib
 
+from flask import Flask, request, url_for, redirect, session, render_template, flash, Response, make_response
+from werkzeug.routing import Rule
+
 locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
 
 app = Flask(__name__)
 
 config = app.config
 config.from_pyfile('config.py.example', silent=True)
-if sys.argv[0].endswith('run.py'): 
+if sys.argv[0].endswith('run.py'):
 	config['SQLITE_INIT_DATA'] = True
 	config['DEBUG'] = True
 config.from_pyfile('config.py', silent=True)
-if sys.argv[0].endswith('runTests.py'):
+if sys.argv[0].endswith('run_tests.py'):
 	print('running in test mode')
 	import tempfile
 	# ensure we always use a clean sqlite db for tests
@@ -44,8 +39,7 @@ if config['DEBUG']:
 if not config.get('SECRET_KEY', None):
 	config['SECRET_KEY'] = os.urandom(24)
 
-mod_endpoints = []
-
+mod_endpoints = [] #pylint: disable=invalid-name
 def mod_required(func):
 	mod_endpoints.append(func.__name__)
 	@wraps(func)
@@ -71,17 +65,18 @@ def evalperm(perms):
 	if vperms:
 		return vperms
 	elif lperms:
-	 	return lperms
+		return lperms
 	elif cperms:
 		return cperms
 	return [{'type': 'public'}]
 
-
+#pylint: disable=wrong-import-position
 from db import query, modify, show, searchquery
 from template_helper import *
-from mail import notify_mods, notify_admins
+from mail import notify_mods, notify_admins #pylint: disable=unused-import
 from ldap import ldapauth
 from scheduler import sched_func
+#pylint: enable=wrong-import-position
 
 def render_endpoint(endpoint, flashtext=None, **kargs):
 	if flashtext:
@@ -106,12 +101,12 @@ def handle_errors(endpoint, text, code, *errors, **epargs):
 
 @app.errorhandler(404)
 @app.route('/invalidpath')
-def handle_not_found(e=None):
+def handle_not_found(e=None): #pylint: disable=unused-argument
 	return render_endpoint('index', 'Diese Seite existiert nicht!'), 404
 
 @app.errorhandler(500)
 @app.errorhandler(Exception)
-def handle_internal_error(e):
+def handle_internal_error(e): #pylint: disable=unused-argument
 	traceback.print_exc()
 	notify_admins('endpoint_exception', traceback=traceback.format_exc())
 	return render_template('500.html', online=True), 500
@@ -145,7 +140,7 @@ def genlive_new(lectures):
 				'file_size': 0, 'formats': hls_format, 'lecture_id': lecture['id']})
 	return res
 
-from legacy import legacy_index
+from legacy import legacy_index #pylint: disable=wrong-import-position
 
 @app.route('/')
 @register_navbar('Home', icon='home')
@@ -166,7 +161,7 @@ def index():
 		ORDER BY time ASC LIMIT 30''', start, end, ismod())
 	for i in upcomming:
 		i['date'] = i['time'].date()
-	latestvideos=query('''
+	latestvideos = query('''
 		SELECT lectures.*, "course" AS sep, courses.*
 		FROM lectures
 		LEFT JOIN videos ON (videos.lecture_id = lectures.id)
@@ -174,7 +169,7 @@ def index():
 		WHERE (? OR (courses.visible AND courses.listed AND lectures.visible AND videos.visible))
 		GROUP BY videos.lecture_id
 		ORDER BY MAX(videos.time_created) DESC
-		LIMIT 6	''',ismod())
+		LIMIT 6	''', ismod())
 	livestreams = query('''SELECT streams.handle AS livehandle, lectures.*, "course" AS sep, courses.*
 		FROM streams
 		JOIN lectures ON lectures.id = streams.lecture_id
@@ -217,7 +212,8 @@ def index():
 def courses():
 	courses = query('SELECT * FROM courses WHERE (? OR (visible AND listed)) ORDER BY lower(semester), lower(title)', ismod())
 	chapters = {}
-	for i in query('SELECT lectures.course_id AS id, COUNT(chapters.id) AS c FROM chapters JOIN lectures ON chapters.lecture_id = lectures.id WHERE NOT chapters.visible AND NOT chapters.deleted GROUP BY lectures.course_id'):
+	for i in query('SELECT lectures.course_id AS id, COUNT(chapters.id) AS c FROM chapters \
+			JOIN lectures ON chapters.lecture_id = lectures.id WHERE NOT chapters.visible AND NOT chapters.deleted GROUP BY lectures.course_id'):
 		chapters[i['id']] = i['c']
 	for course in courses:
 		course['chapter_count'] = chapters.get(course['id'], 0)
@@ -237,9 +233,13 @@ def course(id=None, handle=None):
 	else:
 		course = query('SELECT * FROM courses WHERE handle = ? AND (? OR visible)', handle, ismod())[0]
 	course['perm'] = query('SELECT * FROM perm WHERE (NOT perm.deleted) AND course_id = ? ORDER BY type', course['id'])
-	perms = query('SELECT perm.* FROM perm JOIN lectures ON (perm.lecture_id = lectures.id) WHERE (NOT perm.deleted) AND lectures.course_id = ? ORDER BY perm.type', course['id'])
+	perms = query('SELECT perm.* FROM perm JOIN lectures ON (perm.lecture_id = lectures.id)\
+			WHERE (NOT perm.deleted) AND lectures.course_id = ? ORDER BY perm.type', course['id'])
 	chapters = {}
-	for i in query('SELECT lectures.id AS id, COUNT(chapters.id) AS c FROM chapters JOIN lectures ON chapters.lecture_id = lectures.id WHERE lectures.course_id = ? AND NOT chapters.visible AND NOT chapters.deleted GROUP BY chapters.lecture_id;', course['id']):
+	for i in query('SELECT lectures.id AS id, COUNT(chapters.id) AS c FROM chapters \
+			JOIN lectures ON chapters.lecture_id = lectures.id \
+			WHERE lectures.course_id = ? AND NOT chapters.visible AND NOT chapters.deleted \
+			GROUP BY chapters.lecture_id;', course['id']):
 		chapters[i['id']] = i['c']
 	lectures = query('SELECT * FROM lectures WHERE course_id = ? AND (? OR visible) ORDER BY time, duration DESC', course['id'], ismod())
 	for lecture in lectures:
@@ -269,7 +269,9 @@ def course(id=None, handle=None):
 	videos += genlive_new(lectures)
 	chapters = []
 	if course['coursechapters']:
-		chapters = query('SELECT chapters.* FROM chapters JOIN lectures ON lectures.id = chapters.lecture_id WHERE lectures.course_id = ? AND NOT chapters.deleted AND chapters.visible ORDER BY time ASC', course['id'])
+		chapters = query('SELECT chapters.* FROM chapters \
+				JOIN lectures ON lectures.id = chapters.lecture_id \
+				WHERE lectures.course_id = ? AND NOT chapters.deleted AND chapters.visible ORDER BY time ASC', course['id'])
 	responsible = query('''SELECT users.*, responsible.course_id AS responsible
 			FROM users
 			LEFT JOIN responsible ON (responsible.user_id = users.id AND responsible.course_id = ?)
@@ -288,7 +290,7 @@ def faq():
 @app.route('/<course>/<int:id>/embed', endpoint='embed')
 @app.route('/<int:courseid>/<int:id>/embed', endpoint='embed')
 @handle_errors('course', 'Diese Vorlesung existiert nicht!', 404, IndexError)
-def lecture(id, course=None, courseid=None):
+def lecture(id, course=None, courseid=None): #pylint: disable=unused-argument,too-many-branches
 	lecture = query('SELECT * FROM lectures WHERE id = ? AND (? OR visible)', id, ismod())[0]
 	videos = query('''
 			SELECT videos.*, (videos.downloadable AND courses.downloadable) as downloadable, "formats" AS sep, formats.*
@@ -329,27 +331,49 @@ def lecture(id, course=None, courseid=None):
 			flash(text+'. <a target="_blank" class="reloadonclose" href="'+url_for('start_rwthauth')+'">Hier authorisieren</a>.', category='player')
 		elif mode == 'l2p':
 			if 'l2p_courses' in session:
-				flash(text+'. Du bist kein Teilnehmer des L2P-Kurses! <a target="_blank" class="reloadonclose" href="'+url_for('start_l2pauth')+'">Kurse aktualisieren</a>.', category='player')
+				flash(text+'. Du bist kein Teilnehmer des L2P-Kurses! \
+						<a target="_blank" class="reloadonclose" href="'+url_for('start_l2pauth')+'">Kurse aktualisieren</a>.', category='player')
 			else:
 				flash(text+'. <a target="_blank" class="reloadonclose" href="'+url_for('start_l2pauth')+'">Hier authorisieren</a>.', category='player')
 		elif mode == 'moodle':
 			if 'moodle_courses' in session:
-				flash(text+'. Du bist kein Teilnehmer des Moodle-Kurses! <a target="_blank" class="reloadonclose" href="'+url_for('start_moodleauth')+'">Kurse aktualisieren</a>.', category='player')
+				flash(text+'. Du bist kein Teilnehmer des Moodle-Kurses! \
+						<a target="_blank" class="reloadonclose" href="'+url_for('start_moodleauth')+'">Kurse aktualisieren</a>.', category='player')
 			else:
 				flash(text+'. <a target="_blank" class="reloadonclose" href="'+url_for('start_moodleauth')+'">Hier authorisieren</a>.', category='player')
 		else:
 			flash(text+'.', category='player')
-	return render_template('embed.html' if request.endpoint == 'embed' else 'lecture.html', course=courses[0], lecture=lecture, videos=videos, chapters=chapters, seek=request.args.get('t'))
+	return render_template('embed.html' if request.endpoint == 'embed' else 'lecture.html',
+			course=courses[0], lecture=lecture, videos=videos, chapters=chapters, seek=request.args.get('t'))
 
 
 @app.route('/search')
 def search():
 	if 'q' not in request.args:
 		return redirect(url_for('index'))
-	q = request.args['q']
-	courses = searchquery(q, '*', ['title', 'short', 'organizer', 'subject', 'description'],
+	searchtext = request.args['q']
+	courses = searchquery(searchtext, '*', ['title', 'short', 'organizer', 'subject', 'description'],
 			'courses', 'WHERE (? OR (visible AND listed)) GROUP BY id ORDER BY _score DESC, semester DESC LIMIT 20', ismod())
-	lectures = searchquery(q, 'lectures.*, courses.visible AS coursevisible, courses.listed, courses.id AS courses_id, courses.visible AS courses_visible, courses.listed AS courses_listed, courses.title AS courses_title, courses.short AS courses_short, courses.handle AS courses_handle, courses.organizer AS courses_organizer, courses.subject AS courses_subject, courses.credits AS courses_credits, courses.created_by AS courses_created_by, courses.time_created AS courses_time_created, courses.time_updated AS courses_time_updated, courses.semester AS courses_semester, courses.downloadable AS courses_downloadable, courses.embedinvisible AS courses_embedinvisible, courses.description AS courses_description, courses.internal AS courses_internal',
+	lectures = searchquery(searchtext, 'lectures.*, \
+			courses.visible AS coursevisible, \
+			courses.listed, \
+			courses.id AS courses_id, \
+			courses.visible AS courses_visible, \
+			courses.listed AS courses_listed, \
+			courses.title AS courses_title, \
+			courses.short AS courses_short, \
+			courses.handle AS courses_handle, \
+			courses.organizer AS courses_organizer, \
+			courses.subject AS courses_subject, \
+			courses.credits AS courses_credits, \
+			courses.created_by AS courses_created_by, \
+			courses.time_created AS courses_time_created, \
+			courses.time_updated AS courses_time_updated, \
+			courses.semester AS courses_semester, \
+			courses.downloadable AS courses_downloadable, \
+			courses.embedinvisible AS courses_embedinvisible, \
+			courses.description AS courses_description, \
+			courses.internal AS courses_internal',
 			['lectures.title', 'lectures.comment', 'lectures.speaker', 'courses.short'],
 			'lectures LEFT JOIN courses on (courses.id = lectures.course_id)',
 			'WHERE (? OR (coursevisible AND listed AND visible)) GROUP BY id ORDER BY _score DESC, time DESC LIMIT 30', ismod())
@@ -358,7 +382,7 @@ def search():
 		for key in lecture:
 			if key.startswith('courses_'):
 				lecture['course'][key[8:]] = lecture[key]
-	return render_template('search.html', searchtext=request.args['q'], courses=courses, lectures=lectures)
+	return render_template('search.html', searchtext=searchtext, courses=courses, lectures=lectures)
 
 def check_mod(user, groups):
 	if not user:
@@ -391,9 +415,9 @@ def login():
 def logout():
 	session.pop('user', None)
 	return redirect(request.values.get('ref', url_for('index')))
-
+# For use with nginx auth_request
 @app.route('/internal/auth')
-def auth(): # For use with nginx auth_request
+def auth(): #pylint: disable=too-many-branches
 	if 'X-Original-Uri' not in request.headers:
 		return 'Internal Server Error', 500
 	url = urllib.parse.unquote(request.headers['X-Original-Uri']).lstrip(config['VIDEOPREFIX'])
@@ -442,12 +466,14 @@ def auth(): # For use with nginx auth_request
 	if checkperm(perms, username=username, password=password):
 		try:
 			if not url.startswith('pub/hls/'):
-				modify('INSERT INTO log (id, `time`, `date`, video, source) VALUES (?, ?, ?, ?, 1)', cookie, datetime.now(), datetime.combine(date.today(), time()), perms[0]['vid'])
+				modify('INSERT INTO log (id, `time`, `date`, video, source) VALUES (?, ?, ?, ?, 1)',
+						cookie, datetime.now(), datetime.combine(date.today(), time()), perms[0]['vid'])
 			elif url.endswith('.ts'):
 				fmt = url.split('_')[-1].split('-')[0]
 				seg = url.split('.')[0].split('-')[-1]
-				modify('INSERT INTO hlslog (id, `time`, segment, lecture, handle, format) VALUES (?, ?, ?, ?, ?, ?)', cookie, datetime.now(), seg, perms[0]['lecture'], handle, fmt)
-		except:
+				modify('INSERT INTO hlslog (id, `time`, segment, lecture, handle, format) VALUES (?, ?, ?, ?, ?, ?)',
+						cookie, datetime.now(), seg, perms[0]['lecture'], handle, fmt)
+		except: #pylint: disable=bare-except
 			pass
 		r = make_response('OK', 200)
 		r.set_cookie('tracking', str(cookie), max_age=2147483647) # Many many years
@@ -467,19 +493,17 @@ def files(filename):
 
 @app.route('/sitemap.xml')
 def sitemap():
-	pages=[]
+	pages = []
 	# static pages
 	for rule in app.url_map.iter_rules():
-		if 'GET' in rule.methods and len(rule.arguments)==0:
+		if 'GET' in rule.methods and len(rule.arguments) == 0:
 			if rule.endpoint not in mod_endpoints:
 				pages.append([rule.rule])
 	for i in query('select * from courses where visible and listed'):
-		pages.append([url_for('course',handle=i['handle'])])
-		for j in query('select * from lectures where (course_id = ? and visible)',i['id']):
-			pages.append([url_for('lecture',course=i['handle'],id=j['id'])])
-
-
-	return Response(render_template('sitemap.xml', pages=pages), 200, {'Content-Type': 'application/atom+xml'} )
+		pages.append([url_for('course', handle=i['handle'])])
+		for j in query('select * from lectures where (course_id = ? and visible)', i['id']):
+			pages.append([url_for('lecture', course=i['handle'], id=j['id'])])
+	return Response(render_template('sitemap.xml', pages=pages), 200, {'Content-Type': 'application/atom+xml'})
 
 @app.route('/internal/dbstatus')
 @register_navbar('DB-Status', icon='ok', group='weitere')
@@ -493,14 +517,17 @@ def dbstatus():
 		try:
 			for _host in show('SHOW VARIABLES LIKE "wsrep_cluster_address"', host=host)['wsrep_cluster_address'][len('gcomm://'):].split(','):
 				hosts.add(_host)
-		except:
+		except: #pylint: disable=bare-except
 			pass
 	for host in sorted(list(hosts)):
 		try:
 			status[host] = show('SHOW GLOBAL STATUS LIKE "wsrep%"', host=host)
 			variables[host] = show('SHOW GLOBAL VARIABLES LIKE "wsrep%"', host=host)
-		except:
-			status[host] = {'wsrep_cluster_state_uuid': '', 'wsrep_local_state_comment': 'Not reachable', 'wsrep_cluster_conf_id': '0', 'wsrep_cluster_status': 'Unknown'}
+		except: #pylint: disable=bare-except
+			status[host] = {'wsrep_cluster_state_uuid': '',
+					'wsrep_local_state_comment': 'Not reachable',
+					'wsrep_cluster_conf_id': '0',
+					'wsrep_cluster_status': 'Unknown'}
 			variables[host] = {'wsrep_node_name': host, 'wsrep_cluster_name': 'unknown'}
 		cluster = variables[host]['wsrep_cluster_name']+'-'+status[host]['wsrep_cluster_conf_id']
 		if cluster not in clusters:
@@ -511,8 +538,25 @@ def dbstatus():
 def date_json_handler(obj):
 	return obj.isoformat() if hasattr(obj, 'isoformat') else obj
 
-from edit import edit_handler
-from jobmanagement import job_handler, job_handler_handle, job_set_state, schedule_job, cancel_job, restart_job
+def api_token_required(config_key):
+	def wrapper(func):
+		@wraps(func)
+		def decorator(*args, **kwargs):
+			if 'apikey' in request.values:
+				token = request.values['apikey']
+			elif request.get_json() and ('apikey' in request.get_json()):
+				token = request.get_json()['apikey']
+			else:
+				token = None
+			if not token == config.get(config_key, [None]):
+				return 'Permission denied', 403
+			else:
+				return func(*args, **kwargs)
+		return decorator
+	return wrapper
+
+#pylint: disable=wrong-import-position
+from jobmanagement import job_handler, job_handler_handle, job_set_state, schedule_job, cancel_job, restart_job #pylint: disable=unused-import
 import feeds
 import importer
 import stats
@@ -527,3 +571,4 @@ import livestreams
 import encoding
 import cutprogress
 import jobs
+#pylint: enable=wrong-import-position
diff --git a/sorter.py b/sorter.py
index 3da37e4f34bb3d9a208aa846e1478bc697a6289d..690f5b32bf62ef5aa14a800a234e217b985dc53c 100644
--- a/sorter.py
+++ b/sorter.py
@@ -1,12 +1,13 @@
-from server import *
 import traceback
 import os.path
 
+from server import *
+
 @app.route('/internal/sort/log')
 @register_navbar('Sortierlog', icon='sort-by-attributes-alt', group='weitere')
 @mod_required
 def sort_log():
-	return render_template('sortlog.html',sortlog=query('''
+	return render_template('sortlog.html', sortlog=query('''
 			SELECT 
 				sortlog.*,
 				lectures.id as lecture_id,
@@ -18,16 +19,16 @@ def sort_log():
 			JOIN courses ON courses.id = lectures.course_id 
 			ORDER BY sortlog.`when` DESC
 			LIMIT 50
-		'''),sorterrorlog=query('SELECT * FROM sorterrorlog ORDER BY sorterrorlog.`when` DESC'))
+		'''), sorterrorlog=query('SELECT * FROM sorterrorlog ORDER BY sorterrorlog.`when` DESC'))
 
 def to_ascii(inputstring):
 	asciistring = inputstring
 	for charset in [('ä', 'ae'), ('ö', 'oe'), ('ü', 'ue'), ('ß', 'ss')]:
-		asciistring = asciistring.replace(charset[0],charset[1])
+		asciistring = asciistring.replace(charset[0], charset[1])
 	return asciistring
 
 @job_handler('probe', 'remux', 'transcode')
-def update_video_metadata(jobid, jobtype, data, state, status):
+def update_video_metadata(jobid, jobtype, data, state, status): #pylint: disable=unused-argument
 	if 'video_id' not in data:
 		return
 	if jobtype not in ['remux', 'transcode']:
@@ -54,9 +55,9 @@ def add_thumbnail_job():
 	schedule_thumbnail(int(request.values['lectureid']))
 	return redirect(request.values.get('ref', url_for('jobs_overview')))
 
-def insert_video(lectureid, dbfilepath, fileformatid, hash="", filesize=-1, duration=-1, sourceid=None):
+def insert_video(lectureid, dbfilepath, fileformatid, hash="", filesize=-1, duration=-1, sourceid=None): #pylint: disable=too-many-arguments
 	visible = query('SELECT courses.autovisible FROM courses JOIN lectures ON lectures.course_id = courses.id WHERE lectures.id = ?', lectureid)[0]['autovisible']
-	video_id = modify('''INSERT INTO videos_data 
+	video_id = modify('''INSERT INTO videos_data
 		(lecture_id, visible, path, video_format, title, comment, internal, file_modified, time_created, time_updated, created_by, hash, file_size, duration, source)
 		VALUES 
 		(?, ?, ?, ?, "", "", "", ?, ?, ?, ?, ?, ?, ?, ?)''',
@@ -73,27 +74,27 @@ def insert_video(lectureid, dbfilepath, fileformatid, hash="", filesize=-1, dura
 
 def split_filename(filename):
 	# '_' and ' ' are handled like '-'
-	return filename.replace('_','-').replace(' ','-').split('-')
+	return filename.replace('_', '-').replace(' ', '-').split('-')
 
-def parse_filename(splitFileName):
-	# filenames: <handle>-<sorter>-<format>.mp4
+def parse_filename(filename):
+	# filenames: <handle>-<sorter>-<format>.mp4, split at '-' into an array
 	data = {'keywords': []}
-	for fileNameChunk in splitFileName:
-		fileNameChunk = fileNameChunk.replace('.mp4','')
+	for chunk in filename:
+		chunk = chunk.replace('.mp4', '')
 		#-<YYMMDD> (date)
 		#-<HHMM> (time)
 		#-<keyword>
 		#	Looking for keywords in: title,speaker,comment, comma seperated list in internal
 		try:
-			if len(fileNameChunk) == 6:
-				data['date'] = datetime.strptime(fileNameChunk,'%y%m%d').date()
-			elif  len(fileNameChunk) == 4:
-				data['time'] = datetime.strptime(fileNameChunk,'%H%M').time()
-			else:	
-				data['keywords'].append(fileNameChunk)
+			if len(chunk) == 6:
+				data['date'] = datetime.strptime(chunk, '%y%m%d').date()
+			elif  len(chunk) == 4:
+				data['time'] = datetime.strptime(chunk, '%H%M').time()
+			else:
+				data['keywords'].append(chunk)
 		except ValueError:
 			# if its not valid date or time, handle it as keyword
-			data['keywords'].append(fileNameChunk)
+			data['keywords'].append(chunk)
 	return data
 
 def filter_lectures_by_datetime(lectures, date, time):
@@ -110,7 +111,7 @@ def filter_lectures_by_datetime(lectures, date, time):
 	return matches
 
 def filter_lectures_by_keywords(lectures, keywords):
-	for field in ['title','speaker','comment','internal']:
+	for field in ['title', 'speaker', 'comment', 'internal']:
 		for lecture in lectures:
 			for keyword in keywords:
 				# first test for exact match, else make it asci and try substring test
@@ -121,25 +122,25 @@ def filter_lectures_by_keywords(lectures, keywords):
 					return [lecture]
 	return []
 
-def extract_format_keyword_from_filename(splitFileName):
-	return splitFileName[-1].split('.',1)[0].lower()
+def extract_format_keyword_from_filename(filename):
+	return filename[-1].split('.', 1)[0].lower()
 
-def filter_formats_by_filename(splitFileName):
-	formatstring = extract_format_keyword_from_filename(splitFileName)
+def filter_formats_by_filename(filename):
+	formatstring = extract_format_keyword_from_filename(filename)
 	formats = query('SELECT * FROM formats ORDER BY prio DESC')
 	for videoformat in formats:
 		# we match the last part of the file name without the extension
-		if formatstring in videoformat['keywords'].replace(',',' ').split(' '):
+		if formatstring in videoformat['keywords'].replace(',', ' ').split(' '):
 			return videoformat['id']
 	# default format is "unknown", with id 0
 	return 0
 
 def sort_file(filename, course=None, lectures=None):
-	splitFileName = split_filename(filename)
+	filename = split_filename(filename)
 	if not course:
-		handle = splitFileName[0]
-		if splitFileName[0].endswith('ws') or splitFileName[0].endswith('ss'):
-			handle = '-'.join(splitFileName[:2])
+		handle = filename[0]
+		if filename[0].endswith('ws') or filename[0].endswith('ss'):
+			handle = '-'.join(filename[:2])
 		courses = query('SELECT * FROM courses WHERE handle = ?', handle)
 		if not courses:
 			return [], 0
@@ -147,19 +148,19 @@ def sort_file(filename, course=None, lectures=None):
 	if not lectures:
 		lectures = query('SELECT * from lectures where course_id = ?', course['id'])
 	# parse all data from the file name
-	data = parse_filename(splitFileName)
+	data = parse_filename(filename)
 	# try to match the file on a single lecture
 	matches = filter_lectures_by_datetime(lectures, data.get('date'), data.get('time'))
 	# if we can't match exactly  based on date and time, we have to match keywords
 	if ((len(matches) != 1) and (len(data['keywords']) > 0)):
-		if not matches:
+		if matches:
 			# only test lectures with the correct date/time, if we have any
 			matches = filter_lectures_by_keywords(matches, data['keywords'])
 		else:
 			# Else test for matches in all lectures of this course
 			matches = filter_lectures_by_keywords(lectures, data['keywords'])
 	# now we should have found exactly one match
-	fmt = filter_formats_by_filename(splitFileName)
+	fmt = filter_formats_by_filename(filename)
 	return matches, fmt
 
 def log_sort_error(course_id, path, matches):
@@ -169,23 +170,8 @@ def log_sort_error(course_id, path, matches):
 	query('INSERT INTO sorterrorlog_data (course_id, path, matches, `when`, time_updated, time_created) VALUES (?, ?, ?, ?, ?, ?)',
 			course_id, path, ','.join(matches_id), datetime.now(), datetime.now(), datetime.now())
 
-def sort_api_token_required(func):
-	@wraps(func)
-	def decorator(*args, **kwargs):
-		if 'apikey' in request.values:
-			token = request.values['apikey']
-		elif request.get_json() and ('apikey' in request.get_json()):
-			token = request.get_json()['apikey']
-		else:
-			token = None
-		if not token == config.get('SORTER_API_KEY', [None]):
-			return 'Permission denied', 403
-		else:
-			return func(*args, **kwargs)
-	return decorator
-
 @app.route('/internal/sort/encoded/<filename>')
-@sort_api_token_required
+@api_token_required('SORTER_API_KEY')
 def sort_encoded(filename):
 	matches, fmt = sort_file(filename)
 	if len(matches) != 1:
@@ -198,11 +184,11 @@ def sort_encoded(filename):
 	return 'OK', 200
 
 @app.route('/internal/sort/autoencode')
-@sort_api_token_required
+@api_token_required('SORTER_API_KEY')
 def sort_autoencode():
 	filename = request.values['path']
 	path = 'autoencode/'+filename
-	matches, fmt = sort_file(filename)
+	matches, fmt = sort_file(filename) #pylint: disable=unused-variable
 	if len(matches) != 1:
 		log_sort_error(-1, 'raw/'+path, matches)
 		return "Could not match filename", 400
@@ -211,7 +197,7 @@ def sort_autoencode():
 	return 'OK', 200
 
 @job_handler('publish_video')
-def handle_published_video(jobid, jobtype, data, state, status):
+def handle_published_video(jobid, jobtype, data, state, status): #pylint: disable=unused-argument
 	if 'lecture_id' not in data or 'format_id' not in data:
 		return
 	insert_video(data['lecture_id'], data['path'], data['format_id'], hash=status['hash'], filesize=status['filesize'], duration=status['duration'])
@@ -224,12 +210,12 @@ def sort_now():
 	for course in courses:
 		modify('BEGIN')
 		for mountpoint in config['VIDEOMOUNT']:
-			existingvideos = query('SELECT videos.path FROM videos JOIN lectures ON (videos.lecture_id = lectures.id) WHERE lectures.course_id = ?',course['id'])
-			knownerrors = query('SELECT sorterrorlog.path FROM sorterrorlog WHERE sorterrorlog.course_id = ?',course['id'])
+			existingvideos = query('SELECT videos.path FROM videos JOIN lectures ON (videos.lecture_id = lectures.id) WHERE lectures.course_id = ?', course['id'])
+			knownerrors = query('SELECT sorterrorlog.path FROM sorterrorlog WHERE sorterrorlog.course_id = ?', course['id'])
 			ignorefiles = []
 			for path in existingvideos + knownerrors:
 				ignorefiles.append(os.path.basename(path['path']))
-			lectures = query('SELECT * from lectures where course_id = ?',course['id'])
+			lectures = query('SELECT * from lectures where course_id = ?', course['id'])
 			coursepath = mountpoint['mountpoint']+course['handle']
 			try:
 				files = os.listdir(coursepath)
@@ -248,11 +234,10 @@ def sort_now():
 						insert_video(matches[0]['id'], dbfilepath, fmt)
 					else:
 						log_sort_error(course['id'], dbfilepath, matches)
-				except Exception:
+				except: #pylint: disable=bare-except
 					traceback.print_exc()
 		modify('COMMIT')
 	if 'ref' in request.values:
 		return redirect(request.values['ref'])
 	else:
 		return 'OK', 200
-
diff --git a/stats.py b/stats.py
index cc19aae3db2d2e5341673ee779d5de1cca960019..7cbab86370d680297876bab63766e9e64aadecc7 100644
--- a/stats.py
+++ b/stats.py
@@ -1,34 +1,43 @@
-from server import *
 import json
-from hashlib import md5
 from datetime import datetime
 
+from server import *
+
 @app.route('/internal/stats')
 @app.route('/internal/stats/<semester>')
 @register_navbar('Statistiken', icon='stats')
 @mod_required
 def stats():
-	semester = query('SELECT DISTINCT semester from courses WHERE semester != ""');
-	for s in semester:
-		year = int(s['semester'][0:4])
-		if s['semester'].endswith('ss'):
-			s['from'] = datetime(year, 4, 1)
-			s['to'] = datetime(year, 10, 1)
-		if s['semester'].endswith('ws'):
-			s['from'] = datetime(year, 10, 1)
-			s['to'] = datetime(year+1, 4, 1)
+	semester = query('SELECT DISTINCT semester from courses WHERE semester != ""')
+	for i in semester:
+		year = int(i['semester'][0:4])
+		if i['semester'].endswith('ss'):
+			i['from'] = datetime(year, 4, 1)
+			i['to'] = datetime(year, 10, 1)
+		if i['semester'].endswith('ws'):
+			i['from'] = datetime(year, 10, 1)
+			i['to'] = datetime(year+1, 4, 1)
 	return render_template('stats.html', semester=semester, filter=request.args.get('filter'))
 
-statsqueries = {}
-statsqueries['formats_views'] = "SELECT formats.description AS labels, count(DISTINCT log.id) AS `values` FROM log JOIN videos ON (videos.id = log.video) JOIN formats ON (formats.id = videos.video_format) GROUP BY formats.id"
-statsqueries['course_count'] = 'SELECT semester AS x, count(id) AS y FROM courses WHERE semester != "" GROUP BY semester'
-statsqueries['lectures_count'] = 'SELECT semester AS x, count(lectures.id) AS y FROM lectures JOIN courses ON (courses.id = lectures.course_id) WHERE semester != "" GROUP BY semester'
-statsqueries['categories_courses'] = "SELECT courses.subject AS labels, count(courses.id) AS `values` FROM courses GROUP BY courses.subject ORDER BY labels DESC LIMIT 100"
-statsqueries['organizer_courses'] = "SELECT courses.organizer AS labels, count(courses.id) AS `values` FROM courses GROUP BY courses.organizer ORDER BY labels DESC LIMIT 100"
-statsqueries['categories_lectures'] = "SELECT courses.subject AS labels, count(lectures.id) AS `values` FROM lectures JOIN courses ON (courses.id = lectures.course_id) WHERE lectures.visible GROUP BY courses.subject ORDER BY `values` DESC LIMIT 100"
-statsqueries['lecture_views'] = "SELECT lectures.time AS x, count(DISTINCT log.id) AS y FROM log JOIN videos ON (videos.id = log.video) JOIN lectures ON (lectures.id = videos.lecture_id) WHERE (lectures.course_id = ?) GROUP BY lectures.id ORDER BY lectures.time"
-statsqueries['live_views'] = "SELECT hlslog.segment AS x, COUNT(DISTINCT hlslog.id) AS y FROM hlslog WHERE hlslog.lecture = ? GROUP BY hlslog.segment ORDER BY hlslog.segment"
-statsqueries['lecture_totalviews'] = "SELECT 42"
+STATS_QUERIES = {}
+STATS_QUERIES['formats_views'] = "SELECT formats.description AS labels, count(DISTINCT log.id) AS `values` FROM log \
+		JOIN videos ON (videos.id = log.video) JOIN formats ON (formats.id = videos.video_format) GROUP BY formats.id"
+STATS_QUERIES['course_count'] = 'SELECT semester AS x, count(id) AS y FROM courses WHERE semester != "" GROUP BY semester'
+STATS_QUERIES['lectures_count'] = 'SELECT semester AS x, count(lectures.id) AS y FROM lectures \
+		JOIN courses ON (courses.id = lectures.course_id) WHERE semester != "" GROUP BY semester'
+STATS_QUERIES['categories_courses'] = "SELECT courses.subject AS labels, count(courses.id) AS `values` FROM courses \
+		GROUP BY courses.subject ORDER BY labels DESC LIMIT 100"
+STATS_QUERIES['organizer_courses'] = "SELECT courses.organizer AS labels, count(courses.id) AS `values` FROM courses \
+		GROUP BY courses.organizer ORDER BY labels DESC LIMIT 100"
+STATS_QUERIES['categories_lectures'] = "SELECT courses.subject AS labels, count(lectures.id) AS `values` FROM lectures \
+		JOIN courses ON (courses.id = lectures.course_id) WHERE lectures.visible GROUP BY courses.subject ORDER BY `values` DESC LIMIT 100"
+STATS_QUERIES['lecture_views'] = "SELECT lectures.time AS x, count(DISTINCT log.id) AS y FROM log \
+		JOIN videos ON (videos.id = log.video) \
+		JOIN lectures ON (lectures.id = videos.lecture_id) \
+		WHERE (lectures.course_id = ?) GROUP BY lectures.id ORDER BY lectures.time"
+STATS_QUERIES['live_views'] = "SELECT hlslog.segment AS x, COUNT(DISTINCT hlslog.id) AS y FROM hlslog WHERE hlslog.lecture = ? \
+		GROUP BY hlslog.segment ORDER BY hlslog.segment"
+STATS_QUERIES['lecture_totalviews'] = "SELECT 42"
 
 def plotly_date_handler(obj):
 	return obj.strftime("%Y-%m-%d %H:%M:%S")
@@ -37,9 +46,9 @@ def plotly_date_handler(obj):
 @app.route('/internal/stats/generic/<req>/<param>')
 @mod_required
 def stats_generic(req, param=None):
-	if req not in statsqueries:
+	if req not in STATS_QUERIES:
 		return 404, 'Not found'
-	rows = query(statsqueries[req], *(statsqueries[req].count('?')*[param]))
+	rows = query(STATS_QUERIES[req], *(STATS_QUERIES[req].count('?')*[param]))
 	if req == 'live_views':
 		res = {'x': [], 'y': []}
 	else:
@@ -61,7 +70,7 @@ def stats_generic(req, param=None):
 @app.route('/internal/stats/viewsperday/<req>')
 @app.route('/internal/stats/viewsperday/<req>/<param>')
 @mod_required
-def stats_viewsperday(req, param=""):
+def stats_viewsperday(req, param=""): #pylint: disable=too-many-locals
 	update_expr = 'INSERT INTO logcache (req, param, trace, date, value) SELECT "%s", ?, trace, date, y FROM (%s) AS cachetmp WHERE date < ?'
 	query_expr = 'SELECT date, trace, value AS y FROM logcache WHERE req = "%s" AND param = ? UNION SELECT * FROM (%s) AS cachetmp'
 	date_subexpr = 'SELECT CASE WHEN MAX(date) IS NULL THEN "2000-00-00" ELSE MAX(date) END AS t FROM `logcache` WHERE req = "%s" AND param = ?'
@@ -116,9 +125,10 @@ def stats_viewsperday(req, param=""):
 	expr = queries[req].replace('%T', '"'+query(date_subexpr%('viewsperday.'+req), param)[0]['t']+'"')
 	params = [param]*expr.count('?')
 	try:
+		modify("BEGIN")
 		modify(update_expr%('viewsperday.'+req, expr), param, *(params+[datetime.combine(date.today(), time())]))
 		modify('COMMIT')
-	except Exception:
+	except Exception: #pylint: disable=broad-except
 		traceback.print_exc()
 	expr = queries[req].replace('%T', '"'+str(date.today())+'"')
 	rows = query(query_expr%('viewsperday.'+req, expr), param, *params)
@@ -146,4 +156,3 @@ def stats_viewsperday(req, param=""):
 			trace['y'].append(data.get(start, {}).get(trace['name'], 0))
 		start += timedelta(days=1)
 	return Response(json.dumps(res, default=plotly_date_handler), mimetype='application/json')
-
diff --git a/template_helper.py b/template_helper.py
index cece6949d9d5d0877e9d2bc088c7fcc2963ff7bd..281352d2aff01f17321109aa3ea9851bc168d748 100644
--- a/template_helper.py
+++ b/template_helper.py
@@ -1,10 +1,14 @@
-from server import *
 import subprocess
 from time import mktime
 from email.utils import formatdate
+from socket import gethostname
+from ipaddress import ip_address, ip_network
+import base64
+
+from server import *
 
-app.jinja_env.trim_blocks = True
-app.jinja_env.lstrip_blocks = True
+app.jinja_env.trim_blocks = True #pylint: disable=no-member
+app.jinja_env.lstrip_blocks = True #pylint: disable=no-member
 app.add_template_global(random.randint, name='randint')
 app.add_template_global(datetime, name='datetime')
 app.add_template_global(timedelta, name='timedelta')
@@ -13,24 +17,24 @@ app.add_template_global(min, name='min')
 app.add_template_global(max, name='max')
 
 # get git commit
-output = subprocess.check_output(['git', "log", "-g", "-1", "--pretty=%H#%h#%d#%s"]).decode('UTF-8').split('#', 3)
-app.jinja_env.globals['gitversion'] = {'hash': output[1], 'longhash': output[0], 'branch': output[2], 'msg': output[3]}
+GITOUTPUT = subprocess.check_output(['git', "log", "-g", "-1", "--pretty=%H#%h#%d#%s"]).decode('UTF-8').split('#', 3)
+app.jinja_env.globals['gitversion'] = {'hash': GITOUTPUT[1], 'longhash': GITOUTPUT[0], 'branch': GITOUTPUT[2], 'msg': GITOUTPUT[3]} #pylint: disable=no-member
 
 @app.url_defaults
 def static_version_inject(endpoint, values):
 	if endpoint == 'static':
-		values['v'] = app.jinja_env.globals['gitversion']['longhash']
+		values['v'] = app.jinja_env.globals['gitversion']['longhash'] #pylint: disable=no-member
 
 @app.template_global()
-def ismod(*args):
-	return ('user' in session)
+def ismod(*args): #pylint: disable=unused-argument
+	return 'user' in session
 
-app.jinja_env.globals['navbar'] = []
+app.jinja_env.globals['navbar'] = [] #pylint: disable=no-member
 # iconlib can be 'bootstrap'
 # ( see: http://getbootstrap.com/components/#glyphicons )
 # or 'fa'
 # ( see: http://fontawesome.io/icons/ )
-def register_navbar(name, iconlib='bootstrap', icon=None, userendpoint=False, group=None, endpoint=None):
+def register_navbar(name, iconlib='bootstrap', icon=None, userendpoint=False, group=None, endpoint=None): #pylint: disable=too-many-arguments
 	def wrapper(func):
 		urlendpoint = endpoint
 		if not endpoint:
@@ -43,11 +47,11 @@ def register_navbar(name, iconlib='bootstrap', icon=None, userendpoint=False, gr
 		item['endpoint'] = urlendpoint
 		item['visible'] = not urlendpoint in mod_endpoints
 		item['name'] = name
-		app.jinja_env.globals['navbar'].append(item)
+		app.jinja_env.globals['navbar'].append(item) #pylint: disable=no-member
 		return func
 	return wrapper
 
-csrf_endpoints = []
+csrf_endpoints = [] #pylint: disable=invalid-name
 def csrf_protect(func):
 	csrf_endpoints.append(func.__name__)
 	@wraps(func)
@@ -58,7 +62,7 @@ def csrf_protect(func):
 			token = request.get_json()['_csrf_token']
 		else:
 			token = None
-		if not ('_csrf_token' in session) or (session['_csrf_token'] != token ) or not token: 
+		if ('_csrf_token' not in session) or (session['_csrf_token'] != token) or not token:
 			return 'csrf test failed', 403
 		else:
 			return func(*args, **kwargs)
@@ -74,11 +78,11 @@ def csrf_inject(endpoint, values):
 def base64encode(str):
 	try:
 		return base64.b64encode(str.encode('UTF-8')).decode('UTF-8')
-	except:
+	except: #pylint: disable=bare-except
 		return ''
 
 @app.template_filter()
-def checkperm(perms, username=None, password=None):
+def checkperm(perms, username=None, password=None): #pylint: disable=too-many-branches,too-many-return-statements
 	if ismod():
 		return True
 	perms = evalperm(perms)
@@ -106,7 +110,7 @@ def checkperm(perms, username=None, password=None):
 	return False
 
 @app.template_filter()
-def permdescr(perms):
+def permdescr(perms): #pylint: disable=too-many-branches,too-many-return-statements
 	perms = evalperm(perms)
 	public = False
 	password = False
@@ -149,25 +153,25 @@ def permdescr(perms):
 
 # debian ships jinja2 without this test...
 @app.template_test(name='equalto')
-def equalto(a,b):
-	return a == b
+def equalto(value_a, value_b):
+	return value_a == value_b
 
 @app.template_filter(name='filterdict')
 def jinja2_filterdict(value, attrdel):
-	v = dict(value)
-	for a in attrdel:
-		if a in v:
-			del v[a]
-	return dict(v)
+	value = dict(value)
+	for attr in attrdel:
+		if attr in value:
+			del value[attr]
+	return dict(value)
 
 @app.template_filter(name='semester')
-def human_semester(s, long=False):
-	if not s or s == 'zeitlos' or len(s) != 6:
+def human_semester(value, long=False):
+	if not value or value == 'zeitlos' or len(value) != 6:
 		return 'Zeitlos'
-	year = s[0:4]
-	semester = s[4:6].upper()
+	year = value[0:4]
+	semester = value[4:6].upper()
 	if not year.isdigit() or semester not in ['SS', 'WS']:
-		print('Invalid semester string "%s"'%s)
+		print('Invalid semester string "%s"'%value)
 		return '??'
 	if not long:
 		return semester+year[2:]
@@ -177,28 +181,28 @@ def human_semester(s, long=False):
 		return 'Wintersemester %s/%s'%(year, str(int(year)+1)[2:])
 
 @app.template_filter(name='date')
-def human_date(d):
-	return d.strftime('%d.%m.%Y')
+def human_date(value):
+	return value.strftime('%d.%m.%Y')
 
 @app.template_filter(name='fulldate')
-def human_fulldate(d):
-	return d.strftime('%a, %d.%m.%Y, %H:%M Uhr')
+def human_fulldate(value):
+	return value.strftime('%a, %d.%m.%Y, %H:%M Uhr')
 
 @app.template_filter(name='time')
-def human_time(d):
-	return d.strftime('%H:%M')
+def human_time(value):
+	return value.strftime('%H:%M')
 
 @app.template_filter()
-def rfc3339(d):
-	return d.strftime('%Y-%m-%dT%H:%M:%S+02:00')
+def rfc3339(value):
+	return value.strftime('%Y-%m-%dT%H:%M:%S+02:00')
 
 @app.template_filter()
-def time_offset(s):
-	return '%02d:%02d:%02d'%(s//3600, (s//60)%60, s%60)
+def time_offset(value):
+	return '%02d:%02d:%02d'%(value//3600, (value//60)%60, value%60)
 
 @app.template_filter()
-def rfc822(d):
-	return formatdate(mktime(d.timetuple()))
+def rfc822(value):
+	return formatdate(mktime(value.timetuple()))
 
 @app.template_global()
 def get_announcements(minlevel=0):
@@ -206,29 +210,35 @@ def get_announcements(minlevel=0):
 	if ismod():
 		offset = timedelta(hours=24)
 	try:
-		return query('SELECT * FROM announcements WHERE NOT deleted AND ((time_expire = NULL) OR time_expire > ?) AND (? OR (visible AND time_publish < ?)) AND level >= ? ORDER BY level DESC', datetime.now()-offset, ismod(), datetime.now(), minlevel)
-	except:
+		return query('SELECT * FROM announcements WHERE \
+				NOT deleted AND \
+				((time_expire = NULL) OR time_expire > ?) AND \
+				(? OR (visible AND time_publish < ?)) AND \
+				level >= ? \
+				ORDER BY level DESC',
+				datetime.now()-offset, ismod(), datetime.now(), minlevel)
+	except: #pylint: disable=bare-except
 		return []
 
 @app.template_filter()
-def fixnl(s):
+def fixnl(value):
 	# To be remove, as soon as db schema is cleaned-up
-	return str(s).replace('\n', '<br>')
+	return str(value).replace('\n', '<br>')
 
 @app.template_filter()
-def tagid(s):
-	if not s:
+def tagid(value):
+	if not value:
 		return 'EMPTY'
-	s = s.replace(' ', '_').lower()
-	r = ''
-	for c in s:
-		if c in string.ascii_lowercase+string.digits+'_':
-			r = r + c
-	return r
+	value = value.replace(' ', '_').lower()
+	result = ''
+	for char in value:
+		if char in string.ascii_lowercase+string.digits+'_':
+			result = result + char
+	return result
 
 @app.template_global()
 def is_readonly():
 	try:
 		return show('SHOW GLOBAL STATUS LIKE "wsrep_ready"')['wsrep_ready'] != 'ON'
-	except:
+	except: #pylint: disable=bare-except
 		return True
diff --git a/templates/course.html b/templates/course.html
index a43010e919e27ca53be5c266076f7578890bc45d..2c1747418573e37f27e1e309da0cf14b73e9eb30 100644
--- a/templates/course.html
+++ b/templates/course.html
@@ -72,11 +72,11 @@
 	<div class="row panel-body collapse out panel-collapse" id="statspanel">
 		<div class="col-md-6 col-xs-12">
 			<p class="text-center">Zuschauer pro Tag</p>
-			<div class="plot-view" data-url="{{url_for('stats_viewsperday', req="course", param=course.id)}}"></div>
+			<div id="plot_stats_viewsperday" class="plot-view" data-url="{{url_for('stats_viewsperday', req="course", param=course.id)}}"></div>
 		</div>
 		<div class="col-md-6 col-xs-12">
 			<p class="text-center">Zuschauer pro Termin</p>
-			<div class="plot-view" data-type="bar" data-url="{{url_for('stats_generic', req="lecture_views", param=course.id)}}"></div>
+			<div id="plot_stats_generic" class="plot-view" data-type="bar" data-url="{{url_for('stats_generic', req="lecture_views", param=course.id)}}"></div>
 		</div>
 	</div>
 </div>
diff --git a/templates/lecture.html b/templates/lecture.html
index df6e477ff643a4023147bd8b0552183ad83c242d..cf38a3cface94f5f36884febcd7dbe8e92d757fb 100644
--- a/templates/lecture.html
+++ b/templates/lecture.html
@@ -80,11 +80,11 @@
 	<div class="row panel-body collapse out panel-collapse" id="statspanel">
 		<div class="col-md-6 col-xs-12">
 			<p class="text-center">Zuschauer pro Tag</p>
-			<div class="plot-view" data-url="{{url_for('stats_viewsperday', req="lecture", param=lecture.id)}}"></div>
+			<div id="plot_stats_viewsperday" class="plot-view" data-url="{{url_for('stats_viewsperday', req="lecture", param=lecture.id)}}"></div>
 		</div>
 		<div class="col-md-6 col-xs-12">
 			<p class="text-center">Zuschauer im Livestream</p>
-			<div class="plot-view" data-url="{{url_for('stats_generic', req="live_views", param=lecture.id)}}" data-reload="60000"></div>
+			<div id="plot_stats_generic" class="plot-view" data-url="{{url_for('stats_generic', req="live_views", param=lecture.id)}}" data-reload="60000"></div>
 		</div>
 	</div>
 </div>
diff --git a/templates/stats.html b/templates/stats.html
index effded1eb65fd06d9f89c11ccde052558122c38d..c24fddc8385a926ea3572071fdd2dc3dd9d6aa44 100644
--- a/templates/stats.html
+++ b/templates/stats.html
@@ -9,19 +9,19 @@
 			<div class="row col-xs-12">
 				<div class="col-xs-12 col-md-6">
 					<p class="text-center">Veranstaltungen pro Semester</p>
-					<div class="plot-view" data-url="{{url_for('stats_generic', req="course_count")}}"></div>
+					<div id="plot_course_count" class="plot-view" data-url="{{url_for('stats_generic', req="course_count")}}"></div>
 				</div>
 				<div class="col-xs-12 col-md-6">
 					<p class="text-center">Vorlesungen pro Semester</p>
-					<div class="plot-view" data-url="{{url_for('stats_generic', req="lectures_count")}}"></div>
+					<div id="plot_lectures_count" class="plot-view" data-url="{{url_for('stats_generic', req="lectures_count")}}"></div>
 				</div>
 				<div class="col-xs-12 col-md-6">
 					<p class="text-center">Veranstaltungen pro Kategorie</p>
-					<div class="plot-view" data-type="pie" data-url="{{url_for('stats_generic', req="categories_courses")}}"></div>
+					<div id="plot_categories_courses" class="plot-view" data-type="pie" data-url="{{url_for('stats_generic', req="categories_courses")}}"></div>
 				</div>
 				<div class="col-xs-12 col-md-6">
 					<p class="text-center">Vorlesungen pro Kategorie</p>
-					<div class="plot-view" data-type="pie" data-url="{{url_for('stats_generic', req="categories_lectures")}}"></div>
+					<div id="plot_categories_lectures" class="plot-view" data-type="pie" data-url="{{url_for('stats_generic', req="categories_lectures")}}"></div>
 				</div>
 				<!--<div class="col-xs-12 col-md-12 plot-view" style="height: 1200px;" data-type="pie" data-url="{{url_for('stats_generic', req="organizer_courses")}}"></div>!-->
 			</div>
@@ -34,11 +34,11 @@
 		<div class="panel-body" >
 			<div class=col-xs-12">
 				<p class="text-center">Zuschauer pro Veranstaltung</p>
-				<div class="plot-view" data-url="{{url_for('stats_viewsperday', req="courses", filter=filter)}}"></div>
+				<div id="plot_courses" class="plot-view" data-url="{{url_for('stats_viewsperday', req="courses", filter=filter)}}"></div>
 			</div>
 			<div class=col-xs-12">
 				<p class="text-center">Zuschauer pro Format</p>
-				<div class="plot-view" data-url="{{url_for('stats_viewsperday', req="global", filter=filter)}}"></div>
+				<div id="plot_global" class="plot-view" data-url="{{url_for('stats_viewsperday', req="global", filter=filter)}}"></div>
 			</div>
 		</div>
 	</div>
diff --git a/templates/timetable.html b/templates/timetable.html
index fe6ab377147a42aaabc25de442e526f5b17715c0..5a7e1ec40f9a874239af0514247ef77416de26ed 100644
--- a/templates/timetable.html
+++ b/templates/timetable.html
@@ -42,7 +42,7 @@
 			<table id="timetable" class="table table-bordered col-xs-12" style="width: auto; min-width: 100%;">
 				<tr><th style="width: 30px;"></th>{% for d in days if (d.index < 5) or (d.lectures|length) > 0 %}<th style="min-width: 10em;" colspan="{{d.maxcol}}">{{ d.date.strftime("%A (%d.%m.%Y)") }}</th>{% endfor %}</tr>
 				{# iterating over each 15 min block #}
-				{% for t in times %}
+				{% for t in blocks %}
 					{% set time_index = loop.index %}
 					<tr{% if t.strftime("%M") == "00" %} class="hourlytime"{% endif %}>
 						{# display time in first row if its a full hour #}
@@ -51,9 +51,9 @@
 						{% for d in days if (d.index < 5) or (d.lectures|length) > 0 %}
 							{% for col in range(1,d.maxcol+1) %}
 
-								{# iterate over all lextures but only consider those that are in the current column and happen in the 15 min block #}
+								{# iterate over all lectures but only consider those that are in the current column and happen in the 15 min block #}
 								{# time_index starts at 0 so we use it directly and do not do +1 #}
-								{% for l in d.lectures|selectattr('timetable_col','equalto',col) if ((l.time.time() >= t) and (l.time.time() < times[time_index])) %}
+								{% for l in d.lectures|selectattr('timetable_col','equalto',col) if ((l.time.time() >= t) and (l.time.time() < blocks[time_index])) %}
 									{# handle the first column of a day specialy, set red background if hidden #}
 									<td
 										{% if col == 1 %} class="newday"{% endif %}
diff --git a/tests/test_misc.py b/tests/test_misc.py
index a32c0d2e274b73245c0d1dab871db4af965d6de6..088c67759dec0783b75ce8486f88aaa788b41bd3 100644
--- a/tests/test_misc.py
+++ b/tests/test_misc.py
@@ -233,8 +233,8 @@ class VideoTestCase(unittest.TestCase):
 			r = self.app.post('/internal/jobs/api/worker/test/schedule', data=json.dumps({'jobtypes': ['thumbnail'], 'queues': ['default'], 'apikey': '1'}), content_type='application/json')
 			assert r.status_code == 200
 			jobdata = json.loads(json.loads(r.data.decode())['data'])
-			assert jobdata.get('lectureid') == '6981'
-			assert jobdata.get('path') == 'pub/hls/15ws-afi.m3u8'
+			assert jobdata.get('filename') == 'l_6981.jpg'
+			assert jobdata.get('src') == 'pub/hls/15ws-afi.m3u8'
 
 			r = self.app.get('/internal/streaming/legacy_auth/testserver', data={'app': 'live', 'call': 'publish_done', 'pass': 'caisoh8aht0wuSu', 'lecture': 6981, 'name': '15ws-afi'}, headers={'X-Real-IP': '137.226.35.193'})
 			assert r.status_code == 200
diff --git a/timetable.py b/timetable.py
index 00f1a89dbacf86c73f938491a069e9070d34ac90..6efbbb3a9d85b2340e7d9dfa81d77eae82b7a803 100644
--- a/timetable.py
+++ b/timetable.py
@@ -1,5 +1,69 @@
 from server import *
-from datetime import time
+
+def get_monday(day):
+	return day-timedelta(days=day.weekday())
+
+def get_week_offset(value):
+	if value is None:
+		return 0
+	day = None
+	for pattern in ['%d-%m-%Y-1', '%Y-W%W-%w']:
+		try:
+			day = datetime.strptime(value+'-1', pattern)
+		except ValueError:
+			pass
+		if day is not None:
+			break
+	if day is None:
+		return 0
+	return int((get_monday(day) - get_monday(datetime.now())).days/7)
+
+def query_lectures_on_day(start, end):
+	# What we want to match:
+	#  lecture.time <= end AND lecture.time+lecture.duration >= start
+	# But there is no SQL statement that does this and is compatible with both sqlite
+	# and mysql, so we approximate the "lecture.time+lecture.duration" part
+	rows = query('''SELECT lectures.*, courses.short, "course" AS sep, courses.*
+		FROM lectures 
+		JOIN courses ON (lectures.course_id = courses.id) 
+		WHERE time <= ? AND time > ?
+		ORDER BY time ASC''', end, start-timedelta(weeks=2))
+	lectures = []
+	for lecture in rows:
+		if lecture['time']+timedelta(minutes=lecture['duration']) >= start:
+			lectures.append(lecture)
+	return lectures
+
+'''Use a sweepline algorithm to find overlapping lectures
+
+For each day the item 'maxcol' will be the number of columns required to display
+the overlapping lectures. For each lecture the item 'timetable_col' will be the
+index of the column the lecture is going to be rendered in.'''
+def timetable_sweepline(days):
+	earliest_start = time(23, 59)
+	latest_end = time(0, 0)
+	for day in days:
+		sweeplinetupels = [(lecture['time'].time(), True, lecture) for lecture in day['lectures']]
+		sweeplinetupels += [(lecture['time_end'].time(), False, lecture) for lecture in day['lectures']]
+		maxcol = 0
+		curcol = 0
+		freecol = []
+		sweeplinetupels.sort(key=lambda row: row[:2])
+		for timestamp, is_start, lecture in sweeplinetupels:
+			if is_start:
+				curcol += 1
+				maxcol = max(maxcol, curcol)
+				if freecol:
+					lecture['timetable_col'] = freecol.pop()
+				else:
+					lecture['timetable_col'] = maxcol
+				earliest_start = min(earliest_start, timestamp)
+			else:
+				curcol -= 1
+				freecol.append(lecture['timetable_col'])
+				latest_end = max(latest_end, timestamp)
+		day['maxcol'] = max(maxcol, 1)
+	return earliest_start, latest_end
 
 @register_navbar('personalisierter Drehplan', icon='calendar', userendpoint=True, endpoint='timetable_user')
 @register_navbar('Drehplan', icon='calendar')
@@ -7,109 +71,46 @@ from datetime import time
 @app.route('/internal/user/<int:user>/timetable', endpoint='timetable_user')
 @mod_required
 def timetable(user=None):
-	if 'kw' not in request.args:
-		if 'date' in request.args:
-			thisweekmonday = datetime.now()
-			thisweekmonday -= timedelta(days=thisweekmonday.weekday())
-
-			try:
-				datesweekmonday = datetime.strptime(request.args['date'], '%d-%m-%Y')
-			except ValueError:
-				datesweekmonday = None
-			if not datesweekmonday:
-				try:
-					datesweekmonday = datetime.strptime(request.args['date'] + '-1', "%Y-W%W-%w")
-				except ValueError:
-					datesweekmonday = None
-
-			if not datesweekmonday:
-				kw = 0
-			else:
-				datesweekmonday -= timedelta(days=datesweekmonday.weekday())
-				kw = int((datesweekmonday.date() - thisweekmonday.date()).days/7)
-		else:
-			kw=0
+	if 'kw' in request.args:
+		week_offset = int(request.args['kw'])
 	else:
-		kw=int(request.args['kw'])
-	try:
-		start = date.today() - timedelta(days=date.today().weekday() -7*kw)
-	except:
-		start = date.today() - timedelta(days=date.today().weekday())
+		week_offset = get_week_offset(request.args.get('date', None))
+	start_day = date.today() - timedelta(days=date.today().weekday() - 7*week_offset)
+	days = [{'date': start_day, 'lectures': [], 'atonce': 0, 'index': 0}]
+	for i in range(1, 7):
+		days.append({'date': days[i-1]['date'] + timedelta(days=1), 'atonce': 0, 'index': i, 'lectures': []})
+	for day in days:
+		start = datetime.combine(day['date'], time(0, 0))
+		end = datetime.combine(day['date'], time(23, 59))
+		day['lectures'] = []
+		for lecture in query_lectures_on_day(start, end):
+			lecture['time_end'] = lecture['time']+timedelta(minutes=lecture['duration'])
+			# "Crop" lecture's timespan to start/end of day
+			lecture['time'] = max(start, lecture['time'])
+			lecture['time_end'] = min(end, lecture['time_end'])
+			# Ensure length > 0
+			lecture['time_end'] = max(lecture['time_end'], lecture['time']+timedelta(minutes=1))
+			lecture['duration'] = int((lecture['time_end'] - lecture['time']).total_seconds()/60)
+			# Filter on responsible user if a user parameter was given
+			lecture['responsible'] = query('''SELECT users.*
+					FROM responsible
+					JOIN users ON (responsible.user_id = users.id AND responsible.course_id = ?)
+					ORDER BY users.realname ASC''', lecture['course_id'])
+			if len(lecture['responsible']) == 0:
+				lecture['responsible'] = [{"realname": "Niemand", "id": -1}]
+			if not user or user in [r['id'] for r in lecture['responsible']]:
+				day['lectures'].append(lecture)
+	earliest_start, latest_end = timetable_sweepline(days)
+	start = min(earliest_start, time(8, 0))
+	end = max(latest_end, time(19, 0))
+	blocks = []
+	for i in range(start.hour*4, min(int((60*end.hour/15)/4)*4+5, 24*4)):
+		timestamp = i*15
+		blocks.append(time(int(timestamp/60), timestamp%60))
 	weekofyear = '{}-W{:02d}'.format(datetime.today().year, datetime.today().isocalendar()[1])
-	days = [{'date': start, 'lectures': [], 'atonce':0, 'index': 0 }]
-	earlieststart=time(23,59)
-	latestend=time(0,0)
-	for i in range(1,7):
-		days.append({'date': days[i-1]['date'] + timedelta(days=1), 'atonce':0, 'index': i, 'lectures':[] })
-	for i in days:
-		# date and times are burning in sqlite
-		s = datetime.combine(i['date'],time(0,0))
-		e = datetime.combine(i['date'],time(23,59))
-		i['lectures'] = []
-		for l in query ('''
-					SELECT lectures.*, courses.short, "course" AS sep, courses.*
-					FROM lectures 
-					JOIN courses ON (lectures.course_id = courses.id) 
-					WHERE time < ? AND time > ? AND NOT norecording AND NOT external
-					ORDER BY time ASC''', i['date']+timedelta(weeks=2), i['date']-timedelta(weeks=2)):
-			# we can not use the where clause of sql to match against the time, because sqlite and mysql use a different syntax -.-
-			# we still use it to only get the lectures for a 3 week periode
-			if not l['time']:
-				l['time'] = datetime.fromtimestamp(0)
-			if ((l['time'] < e) and (l['time'] > s)) or ((l['time'] + timedelta(minutes=l['duration']) < e) and (l['time'] + timedelta(minutes=l['duration'])> s)):
-				# filter on responsible user if a user parameter was given
-				l['responsible'] = query('''SELECT users.*
-						FROM responsible
-						JOIN users ON (responsible.user_id = users.id AND responsible.course_id = ?)
-						ORDER BY users.realname ASC''', l['course_id'])
-				if len(l['responsible']) == 0:
-					l['responsible'] = [{"realname": "Niemand", "id": -1}]
-				if not user or user in [ r['id'] for r in l['responsible'] ]:
-					i['lectures'].append(l)
-
-				oldtime = l['time']
-				l['time'] = max(s,l['time'])
-				l['duration'] = ( min(e,oldtime + timedelta(minutes=l['duration'])) - l['time'] ).total_seconds()/60
-		# sweepline to find out how many lectures overlap
-		maxcol=0;
-		curcol=0;
-		freecol=[];
-		for l in i['lectures']:
-			# who the hell inserts lectures with zero length?!?!?
-			l['time_end'] = l['time']+timedelta(minutes=max(l['duration'],1))
-		# create sweepline input array
-		sweeplinetupels = [(l['time'],True,l) for l in i['lectures']]
-		sweeplinetupels += [(l['time_end'],False,l) for l in i['lectures']]
-		tmp = []
-		for x in sweeplinetupels:
-			unique = True
-			for y in tmp:
-				if x[0] == y[0] and x[1] == y[1] and x[2]['short'] == y[2]['short']:
-					unique = False
-			if unique:
-				tmp.append(x)
-
-		sweeplinetupels = sorted(tmp, key=lambda t:(t[0],t[1]))
-		for l in sweeplinetupels:
-			if l[1]:
-				curcol += 1
-				if curcol > maxcol:
-					maxcol = curcol
-				if len(freecol) == 0:
-					freecol.append(maxcol)
-				l[2]['timetable_col'] = freecol.pop()
-				if earlieststart > l[0].time():
-					earlieststart = l[0].time()
-			else:
-				curcol -= 1
-				freecol.append(l[2]['timetable_col'])
-				if latestend < l[0].time():
-					latestend = l[0].time()
-		i['maxcol'] = max(maxcol,1)
-	times=[]
-	s = min(earlieststart,time(8,0))
-	e = max(latestend,time(19,0))
-	for i in range(s.hour*4,min(int((60*e.hour/15)/4)*4+5,24*4)):
-		t = i*15
-		times.append(time(int(t/60),t%60))
-	return render_template('timetable.html',days=days,times=times,kw=kw, weekofyear=weekofyear, user=query('SELECT * FROM users WHERE id = ?', user)[0] if user else None)
+	return render_template('timetable.html',
+			days=days,
+			blocks=blocks,
+			kw=week_offset,
+			weekofyear=weekofyear,
+			user=query('SELECT * FROM users WHERE id = ?', user)[0] if user else None)