diff --git a/iml/chapters/generative-modeling.tex b/iml/chapters/generative-modeling.tex
index de08eaa..8f860b7 100644
--- a/iml/chapters/generative-modeling.tex
+++ b/iml/chapters/generative-modeling.tex
@@ -53,4 +53,4 @@ \subsection*{Generative vs. Discriminative}
 
 \textbf{Generative models}:
 
-$p(x,y)$, can be more powerful (dectect outliers, missing values) if assumptions are met, are typically less robust against outliers
+$p(x,y)$, can be more powerful (detect outliers, missing values) if assumptions are met, are typically less robust against outliers
diff --git a/iml/chapters/various.tex b/iml/chapters/various.tex
index e3ff887..3700061 100644
--- a/iml/chapters/various.tex
+++ b/iml/chapters/various.tex
@@ -50,5 +50,5 @@ \section*{Various}
 $M \in \mathbb{R}^{n\times n}$ PSD $\Leftrightarrow \forall x \in \mathbb{R}^n: x^\top Mx \geq 0 \\
 \Leftrightarrow$ all principal minors of $M$ have non-negative determinant $\Leftrightarrow \lambda \geq 0 \ \forall \lambda\in\sigma(M)$
 
-\textbf{CLT} For $X_i$ iid with $m = \E[X_1]$ and $\Var[X_1] = \sigma^2$: $\mathbb{P}\left[\frac{\sum_{i=1}^n X_i - n m}{\sqrt{\sigma^2 n}} \leq a\right] \xrightarrow[n \to \infty]{} \Phi(a)$.
-\textbf{KL Divergence} $D_{KL}(P||Q) = \mathbb{E}_p[\log(\frac{p(x)}{q(x)})]$, 0 iff $P = Q$, always non-negative
\ No newline at end of file
+\textbf{CLT} For $X_i$ iid with $m = \E[X_1]$ and $\text{Var}(X_1) = \sigma^2$: $\mathbb{P}\left[\frac{\sum_{i=1}^n X_i - n m}{\sqrt{\sigma^2 n}} \leq a\right] \xrightarrow[n \to \infty]{} \Phi(a)$.
+\textbf{KL Divergence} $D_{KL}(P||Q) = \mathbb{E}_p[\log(\frac{p(x)}{q(x)})]$, 0 iff $P = Q$, always non-negative