Skip to content

Commit

Permalink
kai-service fmt improvements (#15)
Browse files Browse the repository at this point in the history
Signed-off-by: Jason Montleon <jmontleo@redhat.com>
  • Loading branch information
jmontleon authored Feb 2, 2024
1 parent 2c01c58 commit 969ce29
Show file tree
Hide file tree
Showing 3 changed files with 73 additions and 44 deletions.
4 changes: 4 additions & 0 deletions kai-service/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
This utility provides a service to generate AI prompts based off of a solved example and source that requires updating. It also proxies requests to LLMs, and will likely include additional functionality as required.

## Usage

To deploy in cluster:

oc create configmap kai-conf --from-file kai.conf
Expand All @@ -12,7 +13,9 @@ oc patch deploy/kai-service --patch '{"spec":{"template":{"metadata":{"labels":{
oc create route edge kai-service --service kai-service --insecure-policy Redirect

## Brief Examples

### Generate a Prompt

```
curl -k 'https://kai-service-konveyor-tackle.apps.example.com/generate_prompt' -X POST -H "Content-Type: application/json" -d '{ "issue_description": "my bad description",
"language": "java-python-go-whatever",
Expand All @@ -23,6 +26,7 @@ curl -k 'https://kai-service-konveyor-tackle.apps.example.com/generate_prompt' -
```

### Proxy a Request

```
export OPENAI_API_KEY=replace-with-your-key
curl -k 'https://kai-service-konveyor-tackle.apps.example.com/proxy?upstream_url=https://api.openai.com/v1/chat/completions' \
Expand Down
86 changes: 51 additions & 35 deletions kai-service/kai.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,17 @@

import os
import warnings

import aiohttp
import yaml

from aiohttp import web


def load_config():
"""Load the configuration from a yaml conf file."""
config = "/usr/local/etc/kai.conf"
if os.environ.get('KAI_CONFIG'):
config = os.environ.get('KAI_CONFIG')
if os.environ.get("KAI_CONFIG"):
config = os.environ.get("KAI_CONFIG")

with open(config, "r", encoding="utf-8") as stream:
try:
Expand All @@ -22,9 +23,11 @@ def load_config():
print(exc)
return None


def load_templates():
"""Get model templates from the loaded configuration."""
return load_config()['model_templates']
return load_config()["model_templates"]


def load_template(model_name):
"""Loads the requested template."""
Expand All @@ -35,52 +38,62 @@ def load_template(model_name):
warnings.warn("Warning: Model not found, using default (first) model from kai.conf")
return list(model_templates.items())[0][1]


async def generate_prompt(request):
"""Generates a prompt based on input using the specified template."""
try:
data = await request.json()

language = data.get('language', '')
issue_description = data.get('issue_description', '')
example_original_code = data.get('example_original_code', '')
example_solved_code = data.get('example_solved_code', '')
current_original_code = data.get('current_original_code', '')
model_template = data.get('model_template', '')

if model_template == '':
warnings.warn("Model template not specified. For best results specify a model template.")

response = load_template(model_template).format(language=language,
issue_description=issue_description,
example_original_code=example_original_code,
example_solved_code=example_solved_code,
current_original_code=current_original_code,
model_template=model_template)
language = data.get("language", "")
issue_description = data.get("issue_description", "")
example_original_code = data.get("example_original_code", "")
example_solved_code = data.get("example_solved_code", "")
current_original_code = data.get("current_original_code", "")
model_template = data.get("model_template", "")

if model_template == "":
warnings.warn(
"Model template not specified. For best results specify a model template."
)

response = load_template(model_template).format(
language=language,
issue_description=issue_description,
example_original_code=example_original_code,
example_solved_code=example_solved_code,
current_original_code=current_original_code,
model_template=model_template,
)

warnings.resetwarnings()
return web.json_response({'generated_prompt': response})
return web.json_response({"generated_prompt": response})
except Exception as e:
return web.json_response({'error': str(e)}, status=400)
return web.json_response({"error": str(e)}, status=400)


async def proxy_handler(request):
"""Proxies a streaming request to an LLM."""
upstream_url = request.query.get('upstream_url')
upstream_url = request.query.get("upstream_url")

if not upstream_url:
return web.Response(status=400, text="Missing 'upstream_url' parameter in the request")
return web.Response(
status=400, text="Missing 'upstream_url' parameter in the request"
)

headers = {}
if request.headers.get('Authorization'):
headers.update({ 'Authorization': request.headers.get('Authorization') })
if request.headers.get('Content-Type'):
headers.update({ 'Content-Type': request.headers.get('Content-Type') })
if request.headers.get("Authorization"):
headers.update({"Authorization": request.headers.get("Authorization")})
if request.headers.get("Content-Type"):
headers.update({"Content-Type": request.headers.get("Content-Type")})
method = request.method
data = await request.read()

async with aiohttp.ClientSession() as session:
try:
async with session.request(method, upstream_url, headers=headers, data=data) as upstream_response:
if 'chunked' in upstream_response.headers.get('Transfer-Encoding', ''):
async with session.request(
method, upstream_url, headers=headers, data=data
) as upstream_response:
if "chunked" in upstream_response.headers.get("Transfer-Encoding", ""):
response = web.StreamResponse()
await response.prepare(request)

Expand All @@ -93,14 +106,17 @@ async def proxy_handler(request):
return web.Response(
status=upstream_response.status,
text=await upstream_response.text(),
headers=upstream_response.headers
headers=upstream_response.headers,
)
except aiohttp.ClientError as e:
return web.Response(status=500, text=f"Error connecting to upstream service: {str(e)}")
return web.Response(
status=500, text=f"Error connecting to upstream service: {str(e)}"
)


app = web.Application()
app.router.add_post('/generate_prompt', generate_prompt)
app.router.add_route('*', '/proxy', proxy_handler)
app.router.add_post("/generate_prompt", generate_prompt)
app.router.add_route("*", "/proxy", proxy_handler)

if __name__ == '__main__':
if __name__ == "__main__":
web.run_app(app)
27 changes: 18 additions & 9 deletions kai-service/tests/kai_tests.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,42 @@
""" Test for kai.py."""

import os
import pytest

import pytest
from aiohttp import web

import kai

os.environ['KAI_CONFIG'] = "tests/kai_tests.conf"
os.environ["KAI_CONFIG"] = "tests/kai_tests.conf"


@pytest.fixture
def cli(event_loop, aiohttp_client):
"""Start server to listen for test connections."""
app = web.Application()
app.router.add_post('/generate_prompt', kai.generate_prompt)
app.router.add_route('*', '/proxy', kai.proxy_handler)
app.router.add_post("/generate_prompt", kai.generate_prompt)
app.router.add_route("*", "/proxy", kai.proxy_handler)
return event_loop.run_until_complete(aiohttp_client(app))


@pytest.mark.asyncio
async def test_set_value(cli):
"""Test a template renders properly."""
resp = await cli.post('/generate_prompt', headers={"Content-Type": "application/json"},
data=b"""{"issue_description": "description",
resp = await cli.post(
"/generate_prompt",
headers={"Content-Type": "application/json"},
data=b"""{"issue_description": "description",
"language": "go",
"example_original_code": "my original code",
"example_solved_code": "my solved example",
"current_original_code": "my current issue code",
"model_template": "gpt"}""")
"model_template": "gpt"}""",
)

print(await resp.read())
print(os.environ.get('KAI_CONFIG'))
print(os.environ.get("KAI_CONFIG"))
assert resp.status == 200
assert await resp.text() == '{"generated_prompt": "My Prompt description go my original code my solved example my current issue code"}'
assert (
await resp.text()
== '{"generated_prompt": "My Prompt description go my original code my solved example my current issue code"}'
)

0 comments on commit 969ce29

Please sign in to comment.