diff --git a/examples/async_demo.py b/examples/async_demo.py index 793b4e43fb..92c267c38f 100755 --- a/examples/async_demo.py +++ b/examples/async_demo.py @@ -10,7 +10,7 @@ async def main() -> None: stream = await client.completions.create( - model="gpt-3.5-turbo-instruct", + model="text-davinci-003", prompt="Say this is a test", stream=True, ) diff --git a/examples/streaming.py b/examples/streaming.py index 368fa5f911..168877dfc5 100755 --- a/examples/streaming.py +++ b/examples/streaming.py @@ -13,7 +13,7 @@ def sync_main() -> None: client = OpenAI() response = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="text-davinci-002", prompt="1,2,3,", max_tokens=5, temperature=0, @@ -33,7 +33,7 @@ def sync_main() -> None: async def async_main() -> None: client = AsyncOpenAI() response = await client.completions.create( - model="gpt-3.5-turbo-instruct", + model="text-davinci-002", prompt="1,2,3,", max_tokens=5, temperature=0,