Skip to content

Instantly share code, notes, and snippets.

@data2json
Last active August 27, 2024 04:24
Show Gist options
  • Save data2json/d5a7e6c7b24e9c6264c1418780f7eaab to your computer and use it in GitHub Desktop.
Save data2json/d5a7e6c7b24e9c6264c1418780f7eaab to your computer and use it in GitHub Desktop.

Revisions

  1. data2json renamed this gist Aug 27, 2024. 1 changed file with 0 additions and 0 deletions.
    File renamed without changes.
  2. data2json created this gist Aug 27, 2024.
    108 changes: 108 additions & 0 deletions OpenAI_Asyncio
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,108 @@
    import asyncio
    import os
    import json
    from openai import AsyncOpenAI
    from datetime import datetime
    import random

    # Set up the client with custom base URL
    client = AsyncOpenAI(base_url="http://0.0.0.0:8000/v1/")

    async def get_current_temperature(location: str, unit: str = "fahrenheit") -> dict:
    """
    Simulate getting the current temperature for a given location.
    """
    # Simulate API call delay
    await asyncio.sleep(2)

    # Generate a random temperature between 0°C and 40°C
    temp_c = 28

    # Convert to Fahrenheit if requested
    temp_f = (temp_c * 9/5) + 32

    temp = temp_f if unit.lower() == "fahrenheit" else temp_c

    return {
    "location": location,
    "temperature": round(temp, 1),
    "unit": unit,
    "timestamp": datetime.now().isoformat()
    }

    async def process_stream(stream):
    full_response = ""
    async for chunk in stream:
    if chunk.choices[0].delta.content is not None:
    content = chunk.choices[0].delta.content
    print(content, end="", flush=True)
    full_response += content
    print() # New line after full response
    return full_response

    async def main():
    # Read system and tool prompts from files
    with open(os.path.expanduser("~/templates/system.txt"), "r") as f:
    system_prompt = f.read()
    with open(os.path.expanduser("~/templates/func.txt"), "r") as f:
    tool_prompt = f.read()

    messages = [
    {"role": "system", "content": system_prompt},
    {"role": "tool", "content": tool_prompt},
    {"role": "user", "content": "What's the temperature in Louisville, KY?"}
    ]

    while True:
    stream = await client.chat.completions.create(
    model="gpt-3.5-turbo",
    messages=messages,
    max_tokens=4098,
    n=1,
    temperature=0.1,
    top_p=0.1,
    stream=True
    )

    full_response = await process_stream(stream)

    if full_response.startswith("<|python_tag|>"):
    try:
    function_call = json.loads(full_response.replace("<|python_tag|>", "").strip(
    ))
    result = await get_current_temperature(**function_call["parameters"])
    function_response = json.dumps(result)
    messages.append({"role": "assistant", "content": full_response})
    messages.append({"role": "function", "name": "get_current_temperature", "cont
    ent": function_response})
    print(function_response) # Print the function response

    # Follow-up completion
    messages.append({"role": "user", "content": "Can you summarize the temperatur
    e information you just provided?"})

    follow_up_stream = await client.chat.completions.create(
    model="gpt-3.5-turbo",
    messages=messages,
    max_tokens=4098,
    n=1,
    temperature=0.1,
    top_p=0.1,
    stream=True
    )

    follow_up_response = await process_stream(follow_up_stream)
    messages.append({"role": "assistant", "content": follow_up_response})

    except json.JSONDecodeError:
    print("Error: Invalid JSON in function call")
    break
    except Exception as e:
    print(f"Error calling function: {str(e)}")
    break
    else:
    # If it's not a function call, we're done
    break

    if __name__ == "__main__":
    asyncio.run(main())