Skip to content

Commit

Permalink
Updating Docs and adding new examples
Browse files Browse the repository at this point in the history
  • Loading branch information
erfanzar committed May 21, 2024
1 parent e7f7b3d commit 2588d03
Show file tree
Hide file tree
Showing 111 changed files with 16,330 additions and 15,008 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ print(f"Hey ! , here's where your model saved {output.checkpoint_path}")
```
> [!NOTE]
> You Can use Lora too, both for DPO and SFT Trainers.
> You Can use Lora too, for DPO, ORPO and SFT Trainers.
## FineTuning
Expand Down
2 changes: 2 additions & 0 deletions docs/generated-cli-cli.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# cli.cli
::: src.python.easydel.cli.cli
12 changes: 6 additions & 6 deletions generate_documentations.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,11 +121,11 @@ def main():
handlers:
python:
options:
docstring_style: sphinx
docstring_style: google
repo_url: https://github.com/erfanzar/EasyDeL
site_author: Erfan Zare Chavoshi
site_name: easydel
site_name: EasyDeL
copyright: Erfan Zare Chavoshi-easydel
theme:
Expand All @@ -138,9 +138,9 @@ def main():

statics = {
("Home",): "index.md",
("install",): "Install.md",
("AvailableModels",): "AvailableModels.md",
("EasyBIT",): "Bits.md",
("Install",): "Install.md",
("Available models",): "AvailableModels.md",
("Easy Bits",): "Bits.md",
("Examples", "EasyState"): "EasyStateExample.md",
("Examples", "LoRA and Transfer Learning"): "LoRA-TransferLearningExample.md",
("Examples", "Fine Tuning Example"): "FineTuningExample.md",
Expand All @@ -154,7 +154,7 @@ def main():
("Examples", "MosaicMPT Models"): "MosaicMPT.md",
("Examples", "Easy Attention"): "AttentionModuleExample.md",
("Examples", "Model Parameter Quantization"): "Parameter-Quantization.md",
("CONTRIBUTING",): "CONTRIBUTING.md"
("Contributing",): "CONTRIBUTING.md"

}
cache = {("APIs",) + k: v for k, v in cache.items()}
Expand Down
14 changes: 8 additions & 6 deletions mkdocs.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
nav:
- APIs:
- Cli:
- Cli: generated-cli-cli.md
- Data Preprocessing:
- Processor: generated-data_preprocessing-_processor.md
- Etils:
Expand Down Expand Up @@ -188,9 +190,9 @@ nav:
- Prompters: generated-utils-prompters.md
- Tensor Utils: generated-utils-tensor_utils.md
- Utils: generated-utils-utils.md
- AvailableModels: AvailableModels.md
- CONTRIBUTING: CONTRIBUTING.md
- EasyBIT: Bits.md
- Available models: AvailableModels.md
- Contributing: CONTRIBUTING.md
- Easy Bits: Bits.md
- Examples:
- DataProcessing: DataProcessing.md
- Easy Attention: AttentionModuleExample.md
Expand All @@ -206,19 +208,19 @@ nav:
- MosaicMPT Models: MosaicMPT.md
- PytorchServer: PyTorchServer.md
- Home: index.md
- install: Install.md
- Install: Install.md

plugins:
- search
- mkdocstrings:
handlers:
python:
options:
docstring_style: sphinx
docstring_style: google

repo_url: https://github.com/erfanzar/EasyDeL
site_author: Erfan Zare Chavoshi
site_name: easydel
site_name: EasyDeL
copyright: Erfan Zare Chavoshi-easydel

theme:
Expand Down
30 changes: 18 additions & 12 deletions src/python/easydel/etils/auto_tx.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,20 +20,26 @@ def get_optimizer_and_scheduler(
weight_decay: float = 0.02,
warmup_steps: int = 0
):
"""
The get_optimizer_and_scheduler function is a helper function that returns an optimizer and scheduler
"""The get_optimizer_and_scheduler function is a helper function that returns an optimizer and scheduler
based on the parameters passed to it.
:param optimizer: AVAILABLE_OPTIMIZERS: Choose the optimizer
:param scheduler: AVAILABLE_SCHEDULERS: Determine the learning rate scheduler
:param steps: int: Specify the number of steps in the training process
:param learning_rate: float: Set the learning rate for the optimizer
:param learning_rate_end: float: Set the final learning rate
:param gradient_accumulation_steps: int: Accumulate the gradients before updating the weights
:param extra_optimizer_kwargs: dict | None: Pass extra arguments to the optimizer
:param weight_decay: float: Set the weight decay for adamw optimizer
:param warmup_steps: int: Specify the number of steps to warm up the learning rate
:return: A tuple of two objects: (Optimizer and scheduler)
Args:
optimizer: AVAILABLE_OPTIMIZERS: Choose the optimizer
scheduler: AVAILABLE_SCHEDULERS: Determine the learning rate
scheduler
steps: int: Specify the number of steps in the training process
learning_rate: float: Set the learning rate for the optimizer
learning_rate_end: float: Set the final learning rate
gradient_accumulation_steps: int: Accumulate the gradients
before updating the weights
extra_optimizer_kwargs: dict | None: Pass extra arguments to the
optimizer
weight_decay: float: Set the weight decay for adamw optimizer
warmup_steps: int: Specify the number of steps to warm up the
learning rate
Returns:
A tuple of two objects: (Optimizer and scheduler)
"""
if extra_optimizer_kwargs is None:
extra_optimizer_kwargs = {}
Expand Down
13 changes: 7 additions & 6 deletions src/python/easydel/etils/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -396,13 +396,14 @@


def get_config(model_type: str, struct: str):
"""
The get_config function takes in a model_type and struct, and returns the corresponding config.
"""The get_config function takes in a model_type and struct, and returns the corresponding config.
Args:
model_type: str: Determine which model to use
struct: str: Specify the structure of the model
:param model_type: str: Determine which model to use
:param struct: str: Specify the structure of the model
:return: A dictionary of hyperparameters
Returns:
A dictionary of hyperparameters
"""
if model_type == "llama":
return llama_configs[struct]
Expand Down
Loading

0 comments on commit 2588d03

Please sign in to comment.