Skip to content

Commit

Permalink
Merge pull request confident-ai#138 from confident-ai/feature/refacto…
Browse files Browse the repository at this point in the history
…r-ragas

update ragas
  • Loading branch information
ColabDog authored Sep 25, 2023
2 parents 3afba87 + 3a7ea4d commit 4b1ccb3
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 16 deletions.
2 changes: 1 addition & 1 deletion deepeval/_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__: str = "0.17.4"
__version__: str = "0.17.5"
30 changes: 15 additions & 15 deletions deepeval/metrics/ragas_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from typing import List


class ContextualRelevancyRagasMetric(Metric):
class RagasContextualRelevancyMetric(Metric):
"""This metric checks the contextual relevancy using Ragas"""

def __init__(
Expand Down Expand Up @@ -64,10 +64,10 @@ def is_successful(self):

@property
def __name__(self):
return "Contextual Relevancy Ragas Score"
return "Ragas Contextual Relevancy Score"


class AnswerRelevancyRagasMetric(Metric):
class RagasAnswerRelevancyMetric(Metric):
"""This metric checks the answer relevancy using Ragas"""

def __init__(
Expand Down Expand Up @@ -117,10 +117,10 @@ def is_successful(self):

@property
def __name__(self):
return "Answer Relevancy Ragas Score"
return "Ragas Answer Relevancy Score"


class FaithfulnessRagasMetric(Metric):
class RagasFaithfulnessMetric(Metric):
def __init__(
self,
minimum_score: float = 0.3,
Expand Down Expand Up @@ -168,10 +168,10 @@ def is_successful(self):

@property
def __name__(self):
return "Faithfulness Ragas Score"
return "Ragas Faithfulness Score"


class ContextRecallRagasMetric(Metric):
class RagasContextRecallMetric(Metric):
"""This metric checks the context recall using Ragas"""

def __init__(
Expand Down Expand Up @@ -221,10 +221,10 @@ def is_successful(self):

@property
def __name__(self):
return "Context Recall Ragas Score"
return "Ragas Context Recall Score"


class HarmfulnessRagasMetric(Metric):
class RagasHarmfulnessMetric(Metric):
"""This metric checks the harmfulness using Ragas"""

def __init__(
Expand Down Expand Up @@ -274,7 +274,7 @@ def is_successful(self):

@property
def __name__(self):
return "Harmfulness Ragas Score"
return "Ragas Harmfulness Score"


class RagasMetric(Metric):
Expand All @@ -288,11 +288,11 @@ def __init__(
self.minimum_score = minimum_score
if metrics is None:
self.metrics = [
HarmfulnessRagasMetric,
ContextRecallRagasMetric,
FaithfulnessRagasMetric,
AnswerRelevancyRagasMetric,
ContextualRelevancyRagasMetric,
RagasHarmfulnessMetric,
RagasContextRecallMetric,
RagasFaithfulnessMetric,
RagasAnswerRelevancyMetric,
RagasContextualRelevancyMetric,
]
else:
self.metrics = metrics
Expand Down
34 changes: 34 additions & 0 deletions docs/docs/measuring_llm_performance/ragas_score.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,37 @@ def test_overall_score():
)
```


## Individual Metrics

You can call any of the following methodologies to use specific Ragas metrics. Below is an example of how you can do just that!

```python
from deepeval.metrics.ragas_metric import RagasContextualRelevancyMetric
from deepeval.metrics.ragas_metric import RagasAnswerRelevancyMetric
from deepeval.metrics.ragas_metric import RagasFaithfulnessMetric
from deepeval.metrics.ragas_metric import RagasContextRecallMetric
from deepeval.metrics.ragas_metric import RagasHarmfulnessMetric

def test_individual_metrics():
test_case = LLMTestCase(
query=query,
output=output,
expected_output=expected_output,
context=context,
)
metrics = [
RagasContextualRelevancyMetric(),
RagasAnswerRelevancyMetric(),
RagasFaithfulnessMetric(),
RagasContextRecallMetric(),
RagasHarmfulnessMetric(),
]
for metric in metrics:
score = metric.measure(test_case)
print(f"{metric.__name__}: {score}")
```

This will print the individual scores for each metric.


0 comments on commit 4b1ccb3

Please sign in to comment.