Skip to content

Commit

Permalink
update website getting started basic example
Browse files Browse the repository at this point in the history
  • Loading branch information
diptanu committed Aug 15, 2024
1 parent 9cf0730 commit 2ec931c
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 2 deletions.
4 changes: 3 additions & 1 deletion docs/docs/getting-started-basic.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,8 @@ extraction_policies:
- extractor: "tensorlake/llama_cpp"
name: "entity-extractor"
input_params:
model: "NousResearch/Hermes-2-Theta-Llama-3-8B-GGUF"
filename: "*Q8_0.gguf"
system_prompt: "Extract entities from text, and return the output in JSON format."
n_ctx: 50000
- extractor: "tensorlake/chunk-extractor"
Expand Down Expand Up @@ -326,7 +328,7 @@ from indexify import IndexifyClient
from llama_cpp import Llama
client = IndexifyClient()
llm = Llama.from_pretrained(repo_id='microsoft/Phi-3-mini-4k-instruct-gguf', filename='*q4.gguf', verbose=False, n_ctx=2048)
llm = Llama.from_pretrained(repo_id='NousResearch/Hermes-2-Theta-Llama-3-8B-GGUF', filename='*Q8_0.gguf', verbose=False, n_ctx=2048)
# Get entities
ingested_content_list = client.list_content("wiki_extraction_pipeline")
Expand Down
2 changes: 2 additions & 0 deletions examples/getting_started/website/basic/graph.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ extraction_policies:
- extractor: "tensorlake/llama_cpp"
name: "entity-extractor"
input_params:
model: "NousResearch/Hermes-2-Theta-Llama-3-8B-GGUF"
filename: "*Q8_0.gguf"
system_prompt: "Extract entities from text, and return the output in JSON format."
n_ctx: 50000
- extractor: "tensorlake/chunk-extractor"
Expand Down
2 changes: 1 addition & 1 deletion examples/getting_started/website/basic/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from llama_cpp import Llama

client = IndexifyClient()
llm = Llama.from_pretrained(repo_id='microsoft/Phi-3-mini-4k-instruct-gguf', filename='*q4.gguf', verbose=False, n_ctx=2048)
llm = Llama.from_pretrained(repo_id='NousResearch/Hermes-2-Theta-Llama-3-8B-GGUF', filename='*Q8_0.gguf', verbose=False, n_ctx=2048)

# Get entities
ingested_content_list = client.list_content("wiki_extraction_pipeline")
Expand Down

0 comments on commit 2ec931c

Please sign in to comment.