diff --git a/README.md b/README.md
index 63ee098..5769c92 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@
 </div>
 
 <div style="text-align: center;">
-    <h1>Mambular: Tabular Deep Learning</h1>
+    <h1>Mambular: Tabular Deep  Made Simple</h1>
 </div>
 
 Mambular is a Python library for tabular deep learning. It includes models that leverage the Mamba (State Space Model) architecture, as well as other popular models like TabTransformer, FTTransformer, TabM and tabular ResNets. Check out our paper `Mambular: A Sequential Model for Tabular Deep Learning`, available [here](https://arxiv.org/abs/2408.06291). Also check out our paper introducing [TabulaRNN](https://arxiv.org/pdf/2411.17207) and analyzing the efficiency of NLP inspired tabular models. 
diff --git a/mambular/__version__.py b/mambular/__version__.py
index 095e93c..23b9f4b 100644
--- a/mambular/__version__.py
+++ b/mambular/__version__.py
@@ -1,4 +1,4 @@
 """Version information."""
 
 # The following line *must* be the last in the module, exactly as formatted:
-__version__ = "0.2.3"
+__version__ = "1.0.0"
diff --git a/mambular/arch_utils/layer_utils/embedding_layer.py b/mambular/arch_utils/layer_utils/embedding_layer.py
index 478a70f..83b84ac 100644
--- a/mambular/arch_utils/layer_utils/embedding_layer.py
+++ b/mambular/arch_utils/layer_utils/embedding_layer.py
@@ -54,7 +54,7 @@ def __init__(self, num_feature_info, cat_feature_info, config):
                 d_embedding=self.d_model,
                 n_frequencies=getattr(config, "n_frequencies", 48),
                 frequency_init_scale=getattr(config, "frequency_init_scale", 0.01),
-                activation=self.embedding_activation,
+                activation=True,
                 lite=getattr(config, "plr_lite", False),
             )
         elif self.embedding_type == "linear":
diff --git a/mambular/configs/mlp_config.py b/mambular/configs/mlp_config.py
index fd1fe09..dc5e458 100644
--- a/mambular/configs/mlp_config.py
+++ b/mambular/configs/mlp_config.py
@@ -62,7 +62,7 @@ class DefaultMLPConfig:
     weight_decay: float = 1e-06
     lr_factor: float = 0.1
     layer_sizes: list = (256, 128, 32)
-    activation: callable = nn.SELU()
+    activation: callable = nn.ReLU()
     skip_layers: bool = False
     dropout: float = 0.2
     use_glu: bool = False
@@ -76,5 +76,4 @@ class DefaultMLPConfig:
     embedding_bias: bool = False
     layer_norm_after_embedding: bool = False
     d_model: int = 32
-    embedding_type: float = "plr"
     plr_lite: bool = False