Skip to content

Commit

Permalink
* langchain example using langchain expression language
Browse files Browse the repository at this point in the history
  • Loading branch information
ww-jermaine committed Sep 28, 2023
1 parent 43001ac commit ed1782f
Showing 1 changed file with 47 additions and 0 deletions.
47 changes: 47 additions & 0 deletions examples/langchain_lcel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
from langchain import chat_models, prompts, schema

from llm_guard.input_scanners import Toxicity


class LLMGuardPromptInvalidException(Exception):
"""Exception to raise when llm-guard marks prompt invalid."""


class LLMGuardOutputInvalidException(Exception):
"""Exception to raise when llm-guard marks result invalid."""


def scan_input_toxicity_scanner(params) -> str:
text = params.get("text", "")
threshold = float(params.get("threshold", 0.7))
scanner = Toxicity(threshold)
sanitized_input, is_valid, risk_score = scanner.scan(text)
print(risk_score)
if is_valid:
return sanitized_input
else:
raise LLMGuardPromptInvalidException(
f"The input text '{text}' was determined as toxic with risk score {risk_score}"
)


chain = (
prompts.ChatPromptTemplate.from_template("Reverse the following string: {text}")
| chat_models.ChatOpenAI()
| schema.output_parser.StrOutputParser()
)

guard_chain = scan_input_toxicity_scanner | schema.output_parser.StrOutputParser()

overall_chain = {"text": guard_chain} | chain


try:
input_text = "Hello, world!"
threshold = 0.0
output = overall_chain.invoke({"text": input_text, "threshold": threshold})
print(output)
except LLMGuardPromptInvalidException as e:
print(f"Prompt invalid: {str(e)}")
except Exception as e:
print(f"An error occurred: {str(e)}")

0 comments on commit ed1782f

Please sign in to comment.