diff --git a/230101/project.toml b/230101/project.toml
index d90df0cfb9ddbc32591bf1ca7b009d97d07e7dda..814e1c24e3d8d4d6251feedd5001b213c440b8d7 100644
--- a/230101/project.toml
+++ b/230101/project.toml
@@ -7,9 +7,13 @@ date = "230101"
 [source]
 files = ["C01.mp4", "C02.mp4", "C03.mp4"]
 stereo = false
-start = "2"
+start = "1"
 end = "12"
-fast = [["5", "7"], ["9", "11"]]
+fast = [["6", "8"], ["10", "11"]]
+questions = [
+	["1.5", "3", "Hallo liebes Publikum. Ich habe leider meine Frage vergessen. Bitte entschuldigt die Störung."],
+	["3.5", "5", "Ah jetzt weiß ich es wieder. Meine Frage war: Was war meine Frage?"]
+]
 
 [source.metadata]
 source_duration = "12.53000"
@@ -22,5 +26,6 @@ source_sample_rate = 48000
 preprocessed = false
 asked_start_end = true
 asked_fast = true
+asked_questions = true
 rendered = false
 transcoded = []
diff --git a/src/main.rs b/src/main.rs
index 5b163f27e313f6f2a75b4f06200d4e4a5904a4ef..742ecd894945b4af74b755a1f529773e3c404a34 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -216,6 +216,9 @@ struct ProjectProgress {
 	#[serde(default)]
 	asked_questions: bool,
 
+	#[serde(default)]
+	rendered_assets: bool,
+
 	#[serde(default)]
 	rendered: bool,
 
@@ -414,6 +417,14 @@ fn main() {
 		fs::write(&project_path, toml::to_string(&project).unwrap().as_bytes()).unwrap();
 	}
 
+	// render the assets
+	if !project.progress.rendered_assets {
+		renderer.render_assets(&project).unwrap();
+		project.progress.rendered_assets = true;
+
+		fs::write(&project_path, toml::to_string(&project).unwrap().as_bytes()).unwrap();
+	}
+
 	// render the video
 	let mut videos = Vec::new();
 	videos.push(if project.progress.rendered {
diff --git a/src/question.rs b/src/question.rs
index daf25bbddca95579741f275a0eec858fee0c0353..0d7e3fc5f72cd0203a581b83d198f47b126013d7 100644
--- a/src/question.rs
+++ b/src/question.rs
@@ -13,7 +13,7 @@ pub(crate) struct Question {
 }
 
 impl Question {
-	pub(crate) fn new(res: Resolution, lang: &Language<'_>, str: String) -> Self {
+	pub(crate) fn new(res: Resolution, lang: &Language<'_>, str: &str) -> Self {
 		static FONT: OnceLock<Owned<Font<'static>>> = OnceLock::new();
 		let font = FONT.get_or_init(|| {
 			let fc = Fontconfig::new().unwrap();
@@ -134,14 +134,6 @@ impl Question {
 		svg.set_width(self.res.width());
 		svg.set_height(self.res.height());
 		svg.set_view_box("0 0 1920 1080");
-		svg.push(
-			Rect::new()
-				.with_fill("#040")
-				.with_x(0)
-				.with_y(0)
-				.with_width(1920)
-				.with_height(1080)
-		);
 		svg.push(self.g);
 		svg
 	}
diff --git a/src/render/filter.rs b/src/render/filter.rs
index dd82ecbaace735594b19614e6b01e9c0bb0a6ab2..70832f4a3841cd9f9eb51699337236f7fe9bf8a1 100644
--- a/src/render/filter.rs
+++ b/src/render/filter.rs
@@ -19,6 +19,7 @@ pub(crate) enum Filter {
 		overlay_input: Cow<'static, str>,
 		x: Cow<'static, str>,
 		y: Cow<'static, str>,
+		repeatlast: bool,
 		output: Cow<'static, str>
 	},
 
@@ -37,6 +38,22 @@ pub(crate) enum Filter {
 		output: Cow<'static, str>
 	},
 
+	/// Fade only video using the alpha channel.
+	FadeAlpha {
+		input: Cow<'static, str>,
+		direction: &'static str,
+		start: Time,
+		duration: Time,
+		output: Cow<'static, str>
+	},
+
+	/// Offset the PTS of the video by the amount of seconds.
+	VideoOffset {
+		input: Cow<'static, str>,
+		seconds: Time,
+		output: Cow<'static, str>
+	},
+
 	/// Generate silence. The video is copied.
 	GenerateSilence {
 		video: Cow<'static, str>,
@@ -77,11 +94,13 @@ impl Filter {
 				overlay_input,
 				x,
 				y,
+				repeatlast,
 				output
 			} => {
+				let repeatlast: u8 = (*repeatlast).into();
 				writeln!(
 					complex,
-					"{}{}overlay=x={x}:y={y}{};",
+					"{}{}overlay=x={x}:y={y}:repeatlast={repeatlast}:eval=init{};",
 					channel('v', video_input),
 					channel('v', overlay_input),
 					channel('v', output)
@@ -129,6 +148,34 @@ impl Filter {
 				)?;
 			},
 
+			Self::FadeAlpha {
+				input,
+				direction,
+				start,
+				duration,
+				output
+			} => {
+				writeln!(
+					complex,
+					"{}fade={direction}:st={start}:d={duration}:alpha=1{};",
+					channel('v', input),
+					channel('v', output)
+				)?;
+			},
+
+			Self::VideoOffset {
+				input,
+				seconds,
+				output
+			} => {
+				writeln!(
+					complex,
+					"{}setpts=PTS+{seconds}/TB{};",
+					channel('v', input),
+					channel('v', output)
+				)?;
+			},
+
 			Self::GenerateSilence { video, output } => {
 				writeln!(
 					complex,
diff --git a/src/render/mod.rs b/src/render/mod.rs
index bd22aff10623e9d77b84089f2b2a8f6abac6e54f..15fdf80889c8f81b5843bc155c607d899f8cdcec 100644
--- a/src/render/mod.rs
+++ b/src/render/mod.rs
@@ -7,8 +7,9 @@ use self::{
 };
 use crate::{
 	iotro::{intro, outro},
+	question::Question,
 	render::ffmpeg::{Ffmpeg, FfmpegInput},
-	time::{format_date, Time},
+	time::{format_date, format_time, Time},
 	Project, ProjectLecture, ProjectSourceMetadata, Resolution
 };
 use anyhow::{bail, Context};
@@ -34,6 +35,10 @@ const TRANSITION_LEN: Time = Time {
 	seconds: 0,
 	micros: 200_000
 };
+const QUESTION_FADE_LEN: Time = Time {
+	seconds: 0,
+	micros: 400_000
+};
 const FF_MULTIPLIER: usize = 8;
 // logo sizes at full hd, will be scaled to source resolution
 const FF_LOGO_SIZE: usize = 128;
@@ -152,14 +157,13 @@ fn svg2mkv(
 	ffmpeg.run()
 }
 
-fn svg2png(svg: &Path, png: &Path, size: usize) -> anyhow::Result<()> {
+fn svg2png(svg: &Path, png: &Path, width: usize, height: usize) -> anyhow::Result<()> {
 	let mut cmd = cmd();
-	let size = size.to_string();
 	cmd.arg("inkscape")
 		.arg("-w")
-		.arg(&size)
+		.arg(width.to_string())
 		.arg("-h")
-		.arg(&size);
+		.arg(height.to_string());
 	cmd.arg(svg).arg("-o").arg(png);
 
 	let status = cmd.status()?;
@@ -218,6 +222,14 @@ impl<'a> Renderer<'a> {
 		self.target.join("outro.mkv")
 	}
 
+	fn question_svg(&self, q_idx: usize) -> PathBuf {
+		self.target.join(format!("question{q_idx}.svg"))
+	}
+
+	fn question_png(&self, q_idx: usize) -> PathBuf {
+		self.target.join(format!("question{q_idx}.png"))
+	}
+
 	pub(crate) fn preprocess(&self, project: &mut Project) -> anyhow::Result<()> {
 		assert!(!project.progress.preprocessed);
 
@@ -266,6 +278,12 @@ impl<'a> Renderer<'a> {
 			source_res,
 			source_sample_rate
 		});
+
+		Ok(())
+	}
+
+	/// Prepare assets like intro, outro and questions.
+	pub(crate) fn render_assets(&self, project: &Project) -> anyhow::Result<()> {
 		let metadata = project.source.metadata.as_ref().unwrap();
 
 		println!();
@@ -279,7 +297,7 @@ impl<'a> Renderer<'a> {
 		let intro_svg = self.target.join("intro.svg");
 		fs::write(
 			&intro_svg,
-			intro(source_res, &project.lecture)
+			intro(metadata.source_res, &project.lecture)
 				.to_string_pretty()
 				.into_bytes()
 		)?;
@@ -290,7 +308,7 @@ impl<'a> Renderer<'a> {
 		let outro_svg = self.target.join("outro.svg");
 		fs::write(
 			&outro_svg,
-			outro(&project.lecture.lang, source_res)
+			outro(&project.lecture.lang, metadata.source_res)
 				.to_string_pretty()
 				.into_bytes()
 		)?;
@@ -304,7 +322,8 @@ impl<'a> Renderer<'a> {
 			include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/assets/logo.svg"))
 		)?;
 		let logo_png = self.target.join("logo.png");
-		svg2png(&logo_svg, &logo_png, LOGO_SIZE * source_res.width() / 1920)?;
+		let logo_size = LOGO_SIZE * metadata.source_res.width() / 1920;
+		svg2png(&logo_svg, &logo_png, logo_size, logo_size)?;
 
 		// copy fastforward then render to png
 		let fastforward_svg = self.target.join("fastforward.svg");
@@ -316,12 +335,31 @@ impl<'a> Renderer<'a> {
 			))
 		)?;
 		let fastforward_png = self.target.join("fastforward.png");
+		let ff_logo_size = FF_LOGO_SIZE * metadata.source_res.width() / 1920;
 		svg2png(
 			&fastforward_svg,
 			&fastforward_png,
-			FF_LOGO_SIZE * source_res.width() / 1920
+			ff_logo_size,
+			ff_logo_size
 		)?;
 
+		// write questions then render to png
+		for (q_idx, (_, _, q_text)) in project.source.questions.iter().enumerate() {
+			let q = Question::new(metadata.source_res, &project.lecture.lang, q_text)
+				.finish()
+				.to_string_pretty()
+				.into_bytes();
+			let q_svg = self.question_svg(q_idx);
+			let q_png = self.question_png(q_idx);
+			fs::write(&q_svg, q)?;
+			svg2png(
+				&q_svg,
+				&q_png,
+				metadata.source_res.width(),
+				metadata.source_res.height()
+			)?;
+		}
+
 		Ok(())
 	}
 
@@ -361,21 +399,24 @@ impl<'a> Renderer<'a> {
 		let mut part3: Cow<'static, str> = outro.into();
 
 		// the recording is fun because of all the fast forwarding
-		let mut part2 = VecDeque::new();
+		let mut part2 = VecDeque::<Cow<'static, str>>::new();
+		let mut part2_ts = VecDeque::new();
 		let mut part2_start_of_the_end = None;
 		let mut part2_end_of_the_start = None;
 
 		// ok so ff is fun. we will add the ff'ed section as well as the part between
-		// the previous ff'ed section and our new section, unless we are the first
+		// the previous ff'ed section and our new section, unless we are the first.
 		project.source.fast.sort();
 		for (i, (ff_st, ff_end)) in project.source.fast.iter().rev().enumerate() {
 			if let Some(prev_end) = part2_end_of_the_start {
+				let duration = prev_end - *ff_end;
 				let recffbetween = ffmpeg.add_input(FfmpegInput {
 					start: Some(*ff_end),
-					duration: Some(prev_end - *ff_end),
+					duration: Some(duration),
 					..FfmpegInput::new(rec_file.clone())
 				});
 				part2.push_front(recffbetween.into());
+				part2_ts.push_front(Some((*ff_end, duration)));
 			} else {
 				part2_start_of_the_end = Some(*ff_end);
 			}
@@ -395,6 +436,7 @@ impl<'a> Renderer<'a> {
 				output: recff.clone().into()
 			});
 			part2.push_front(recff.into());
+			part2_ts.push_front(None);
 		}
 
 		// if the recording was not ff'ed, perform a normal trim
@@ -409,23 +451,112 @@ impl<'a> Renderer<'a> {
 				..FfmpegInput::new(rec_file.clone())
 			});
 			part2.push_back(rectrim.into());
+			part2_ts.push_back(Some((start, part2_last_part_duration)));
 		}
 		// otherwise add the first and last parts separately
 		else {
+			let duration = part2_end_of_the_start.unwrap() - start;
 			let rectrimst = ffmpeg.add_input(FfmpegInput {
 				start: Some(start),
-				duration: Some(part2_end_of_the_start.unwrap() - start),
+				duration: Some(duration),
 				..FfmpegInput::new(rec_file.clone())
 			});
 			part2.push_front(rectrimst.into());
+			part2_ts.push_front(Some((start, duration)));
 
-			part2_last_part_duration = end - part2_start_of_the_end.unwrap();
+			let part2_start_of_the_end = part2_start_of_the_end.unwrap();
+			part2_last_part_duration = end - part2_start_of_the_end;
 			let rectrimend = ffmpeg.add_input(FfmpegInput {
-				start: Some(part2_start_of_the_end.unwrap()),
+				start: Some(part2_start_of_the_end),
 				duration: Some(part2_last_part_duration),
 				..FfmpegInput::new(rec_file.clone())
 			});
 			part2.push_back(rectrimend.into());
+			part2_ts.push_back(Some((part2_start_of_the_end, part2_last_part_duration)));
+		}
+
+		// ok now we have a bunch of parts and a bunch of questions that want to get
+		// overlayed over those parts.
+		project.source.questions.sort();
+		let mut q_idx = 0;
+		for (i, ts) in part2_ts.iter().enumerate() {
+			let Some((start, duration)) = ts else {
+				continue;
+			};
+			loop {
+				if q_idx >= project.source.questions.len() {
+					break;
+				}
+				let (q_start, q_end, _) = &project.source.questions[q_idx];
+				if q_start < start {
+					bail!(
+						"Question starting at {} did not fit into the video",
+						format_time(*q_start)
+					);
+				}
+				if q_start >= start && *q_end <= *start + *duration {
+					// add the question as input to ffmpeg
+					let q_inp = ffmpeg.add_input(FfmpegInput {
+						loop_input: true,
+						fps: Some(project.source.metadata.as_ref().unwrap().source_fps),
+						duration: Some(*q_end - *q_start),
+						..FfmpegInput::new(self.question_png(q_idx))
+					});
+
+					// fade in the question
+					let q_fadein = format!("q{q_idx}fin");
+					ffmpeg.add_filter(Filter::FadeAlpha {
+						input: q_inp.into(),
+						direction: "in",
+						start: Time {
+							seconds: 0,
+							micros: 0
+						},
+						duration: QUESTION_FADE_LEN,
+						output: q_fadein.clone().into()
+					});
+
+					// fade out the question
+					let q_fadeout = format!("q{q_idx}fout");
+					ffmpeg.add_filter(Filter::FadeAlpha {
+						input: q_fadein.into(),
+						direction: "out",
+						start: *q_end - *q_start - QUESTION_FADE_LEN,
+						duration: QUESTION_FADE_LEN,
+						output: q_fadeout.clone().into()
+					});
+
+					// move the question to the correct timestamp
+					let q_pts = format!("q{q_idx}pts");
+					ffmpeg.add_filter(Filter::VideoOffset {
+						input: q_fadeout.into(),
+						seconds: *q_start - *start,
+						output: q_pts.clone().into()
+					});
+
+					// overlay the part in question
+					let q_overlay = format!("q{q_idx}o");
+					ffmpeg.add_filter(Filter::Overlay {
+						video_input: part2[i].clone(),
+						overlay_input: q_pts.into(),
+						x: "0".into(),
+						y: "0".into(),
+						repeatlast: false,
+						output: q_overlay.clone().into()
+					});
+					part2[i] = q_overlay.into();
+
+					q_idx += 1;
+					continue;
+				}
+				break;
+			}
+		}
+		if q_idx < project.source.questions.len() {
+			bail!(
+				"Question starting at {} did not fit into the video before it was over",
+				format_time(project.source.questions[q_idx].0)
+			);
 		}
 
 		// fade out the intro
@@ -503,6 +634,7 @@ impl<'a> Renderer<'a> {
 			overlay_input: logoalpha.into(),
 			x: format!("main_w-overlay_w-{overlay_off_x}").into(),
 			y: format!("main_h-overlay_h-{overlay_off_y}").into(),
+			repeatlast: true,
 			output: overlay.into()
 		});
 
diff --git a/tmp.sh b/tmp.sh
index df71030a579b6cace094c34b9acdda24633b5cdc..3320104c77de8e2d0da95687f6010519426733f4 100755
--- a/tmp.sh
+++ b/tmp.sh
@@ -4,11 +4,11 @@ set -euo pipefail
 rm tmp.mkv || true
 
 ffmpeg -hide_banner \
-	-loop 1 -r 25 -t 4 -i question.png \
+	-loop 1 -r 25 -t 3 -i question.png \
 	-filter_complex "
-		gradients=s=2560x1440:d=4:c0=#000055:c1=#005500:x0=480:y0=540:x1=1440:y1=540[input];
-		[0]fade=t=in:st=0:d=1:alpha=1,fade=t=out:st=3:d=1:alpha=1[overlay];
-		[input][overlay]overlay=eval=frame:x=0:y=0[v]
+		gradients=s=2560x1440:d=5:c0=#000055:c1=#005500:x0=480:y0=540:x1=1440:y1=540[input];
+		[0]fade=t=in:st=1:d=0.5:alpha=1,fade=t=out:st=2.5:d=0.5:alpha=1,setpts=PTS+1/TB[overlay];
+		[input][overlay]overlay=eval=frame:x=0:y=0:repeatlast=0[v]
 	" \
 	-map "[v]" \
 	-c:v libsvtav1 -preset 1 -crf 18 \