Skip to content

Commit

Permalink
code quality fixes: line length = 80
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye committed Nov 24, 2023
1 parent d97de1c commit 49c7b97
Show file tree
Hide file tree
Showing 126 changed files with 1,706 additions and 728 deletions.
5 changes: 4 additions & 1 deletion playground/agents/mm_agent_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
img = node.run_img("/image1", "What is this image about?")

chat = node.chat(
"What is your name? Generate a picture of yourself. What is this image about?",
(
"What is your name? Generate a picture of yourself. What is this image"
" about?"
),
streaming=True,
)
10 changes: 8 additions & 2 deletions playground/agents/revgpt_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,19 @@
"plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")],
"disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True",
"PUID": os.getenv("REVGPT_PUID"),
"unverified_plugin_domains": [os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")],
"unverified_plugin_domains": [
os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")
],
}

llm = RevChatGPTModel(access_token=os.getenv("ACCESS_TOKEN"), **config)

worker = Worker(ai_name="Optimus Prime", llm=llm)

task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
task = (
"What were the winning boston marathon times for the past 5 years (ending"
" in 2022)? Generate a table of the year, name, country of origin, and"
" times."
)
response = worker.run(task)
print(response)
7 changes: 5 additions & 2 deletions playground/demos/accountant_team/accountant_team.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ def run(self):

# Provide decision making support to the accountant
decision_making_support_agent_output = decision_making_support_agent.run(
f"{self.decision_making_support_agent_instructions}: {summary_agent_output}"
f"{self.decision_making_support_agent_instructions}:"
f" {summary_agent_output}"
)

return decision_making_support_agent_output
Expand All @@ -113,5 +114,7 @@ def run(self):
pdf_path="tesla.pdf",
fraud_detection_instructions="Detect fraud in the document",
summary_agent_instructions="Generate an actionable summary of the document",
decision_making_support_agent_instructions="Provide decision making support to the business owner:",
decision_making_support_agent_instructions=(
"Provide decision making support to the business owner:"
),
)
3 changes: 2 additions & 1 deletion playground/demos/ai_research_team/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@

paper = pdf_to_text(PDF_PATH)
algorithmic_psuedocode_agent = paper_summarizer_agent.run(
f"Focus on creating the algorithmic pseudocode for the novel method in this paper: {paper}"
"Focus on creating the algorithmic pseudocode for the novel method in this"
f" paper: {paper}"
)
pytorch_code = paper_implementor_agent.run(algorithmic_psuedocode_agent)
18 changes: 14 additions & 4 deletions playground/demos/autotemp/autotemp.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,18 @@ class AutoTemp:
"""

def __init__(
self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6
self,
api_key,
default_temp=0.0,
alt_temps=None,
auto_select=True,
max_workers=6,
):
self.api_key = api_key
self.default_temp = default_temp
self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
self.alt_temps = (
alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
)
self.auto_select = auto_select
self.max_workers = max_workers
self.llm = OpenAIChat(
Expand Down Expand Up @@ -62,12 +69,15 @@ def run(self, prompt, temperature_string):
if not scores:
return "No valid outputs generated.", None

sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)
sorted_scores = sorted(
scores.items(), key=lambda item: item[1], reverse=True
)
best_temp, best_score = sorted_scores[0]
best_output = outputs[best_temp]

return (
f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}"
f"Best AutoTemp Output (Temp {best_temp} | Score:"
f" {best_score}):\n{best_output}"
if self.auto_select
else "\n".join(
f"Temp {temp} | Score: {score}:\n{outputs[temp]}"
Expand Down
38 changes: 28 additions & 10 deletions playground/demos/blog_gen/blog_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@

class BlogGen:
def __init__(
self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2"
self,
api_key,
blog_topic,
temperature_range: str = "0.4,0.6,0.8,1.0,1.2",
): # Add blog_topic as an argument
self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8)
self.auto_temp = AutoTemp(api_key)
Expand Down Expand Up @@ -40,7 +43,10 @@ def run_workflow(self):
topic_output = topic_result.generations[0][0].text
print(
colored(
f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n",
(
"\nTopic Selection Task"
f" Output:\n----------------------------\n{topic_output}\n"
),
"white",
)
)
Expand All @@ -58,7 +64,10 @@ def run_workflow(self):
initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly
print(
colored(
f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n",
(
"\nInitial Draft"
f" Output:\n----------------------------\n{initial_draft_output}\n"
),
"white",
)
)
Expand All @@ -71,7 +80,10 @@ def run_workflow(self):
review_output = review_result.generations[0][0].text
print(
colored(
f"\nReview Output:\n----------------------------\n{review_output}\n",
(
"\nReview"
f" Output:\n----------------------------\n{review_output}\n"
),
"white",
)
)
Expand All @@ -80,22 +92,28 @@ def run_workflow(self):
distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace(
"{{ARTICLE_TOPIC}}", chosen_topic
)
distribution_result = self.openai_chat.generate([distribution_prompt])
distribution_result = self.openai_chat.generate(
[distribution_prompt]
)
distribution_output = distribution_result.generations[0][0].text
print(
colored(
f"\nDistribution Output:\n----------------------------\n{distribution_output}\n",
(
"\nDistribution"
f" Output:\n----------------------------\n{distribution_output}\n"
),
"white",
)
)

# Final compilation of the blog
final_blog_content = (
f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}"
)
final_blog_content = f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}"
print(
colored(
f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n",
(
"\nFinal Blog"
f" Content:\n----------------------------\n{final_blog_content}\n"
),
"green",
)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@
# Multi Modality Auto Agent
llm = Idefics(max_length=2000)

task = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
task = (
"User: What is in this image?"
" https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
)

## Initialize the workflow
flow = Flow(
Expand Down
32 changes: 23 additions & 9 deletions playground/demos/nutrition/nutrition.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,16 @@
openai_api_key = os.getenv("OPENAI_API_KEY")

# Define prompts for various tasks
MEAL_PLAN_PROMPT = "Based on the following user preferences: dietary restrictions as vegetarian, preferred cuisines as Italian and Indian, a total caloric intake of around 2000 calories per day, and an exclusion of legumes, create a detailed weekly meal plan. Include a variety of meals for breakfast, lunch, dinner, and optional snacks."
MEAL_PLAN_PROMPT = (
"Based on the following user preferences: dietary restrictions as"
" vegetarian, preferred cuisines as Italian and Indian, a total caloric"
" intake of around 2000 calories per day, and an exclusion of legumes,"
" create a detailed weekly meal plan. Include a variety of meals for"
" breakfast, lunch, dinner, and optional snacks."
)
IMAGE_ANALYSIS_PROMPT = (
"Identify the items in this fridge, including their quantities and condition."
"Identify the items in this fridge, including their quantities and"
" condition."
)


Expand Down Expand Up @@ -45,15 +52,19 @@ def create_vision_agent(image_path):
{"type": "text", "text": IMAGE_ANALYSIS_PROMPT},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
},
},
],
}
],
"max_tokens": 300,
}
response = requests.post(
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
"https://api.openai.com/v1/chat/completions",
headers=headers,
json=payload,
)
return response.json()

Expand All @@ -65,10 +76,11 @@ def generate_integrated_shopping_list(
# Prepare the prompt for the LLM
fridge_contents = image_analysis["choices"][0]["message"]["content"]
prompt = (
f"Based on this meal plan: {meal_plan_output}, "
f"and the following items in the fridge: {fridge_contents}, "
f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, "
f"generate a comprehensive shopping list that includes only the items needed."
f"Based on this meal plan: {meal_plan_output}, and the following items"
f" in the fridge: {fridge_contents}, considering dietary preferences as"
" vegetarian with a preference for Italian and Indian cuisines,"
" generate a comprehensive shopping list that includes only the items"
" needed."
)

# Send the prompt to the LLM and return the response
Expand All @@ -94,7 +106,9 @@ def generate_integrated_shopping_list(
}

# Generate Meal Plan
meal_plan_output = meal_plan_agent.run(f"Generate a meal plan: {user_preferences}")
meal_plan_output = meal_plan_agent.run(
f"Generate a meal plan: {user_preferences}"
)

# Vision Agent - Analyze an Image
image_analysis_output = create_vision_agent("full_fridge.jpg")
Expand Down
13 changes: 8 additions & 5 deletions playground/demos/positive_med/positive_med.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ def get_review_prompt(article):


def social_media_prompt(article: str, goal: str = "Clicks and engagement"):
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace("{{ARTICLE}}", article).replace(
"{{GOAL}}", goal
)
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
"{{ARTICLE}}", article
).replace("{{GOAL}}", goal)
return prompt


Expand All @@ -50,7 +50,8 @@ def social_media_prompt(article: str, goal: str = "Clicks and engagement"):
"Generate 10 topics on gaining mental clarity using ancient practices"
)
topics = llm(
f"Your System Instructions: {TOPIC_GENERATOR}, Your current task: {topic_selection_task}"
f"Your System Instructions: {TOPIC_GENERATOR}, Your current task:"
f" {topic_selection_task}"
)

dashboard = print(
Expand Down Expand Up @@ -109,7 +110,9 @@ def social_media_prompt(article: str, goal: str = "Clicks and engagement"):


# Agent that publishes on social media
distribution_agent = llm(social_media_prompt(draft_blog, goal="Clicks and engagement"))
distribution_agent = llm(
social_media_prompt(draft_blog, goal="Clicks and engagement")
)
distribution_agent_out = print(
colored(
f"""
Expand Down
4 changes: 3 additions & 1 deletion playground/models/bioclip.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from swarms.models.bioclip import BioClip

clip = BioClip("hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224")
clip = BioClip(
"hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224"
)

labels = [
"adenocarcinoma histopathology",
Expand Down
10 changes: 8 additions & 2 deletions playground/models/idefics.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,17 @@

model = idefics()

user_input = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
user_input = (
"User: What is in this image?"
" https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
)
response = model.chat(user_input)
print(response)

user_input = "User: And who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
user_input = (
"User: And who is that?"
" https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
)
response = model.chat(user_input)
print(response)

Expand Down
4 changes: 3 additions & 1 deletion playground/models/llama_function_caller.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ def get_weather(location: str, format: str) -> str:
)

# Call the function
result = llama_caller.call_function("get_weather", location="Paris", format="Celsius")
result = llama_caller.call_function(
"get_weather", location="Paris", format="Celsius"
)
print(result)

# Stream a user prompt
Expand Down
3 changes: 2 additions & 1 deletion playground/models/vilt.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,6 @@
model = Vilt()

output = model(
"What is this image", "http://images.cocodataset.org/val2017/000000039769.jpg"
"What is this image",
"http://images.cocodataset.org/val2017/000000039769.jpg",
)
7 changes: 5 additions & 2 deletions playground/structs/flow_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,9 @@ async def async_load_playwright(url: str) -> str:

text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
chunks = (
phrase.strip() for line in lines for phrase in line.split(" ")
)
results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
results = f"Error: {e}"
Expand Down Expand Up @@ -58,5 +60,6 @@ def browse_web_page(url: str) -> str:
)

out = flow.run(
"Generate a 10,000 word blog on mental clarity and the benefits of meditation."
"Generate a 10,000 word blog on mental clarity and the benefits of"
" meditation."
)
Loading

0 comments on commit 49c7b97

Please sign in to comment.