#!/usr/bin/env python import os import sys import platform from openai import OpenAI from openai.types.chat.chat_completion_chunk import Choice api_key = os.getenv("OPENAI_KEY", "") client = OpenAI(api_key=api_key) default_model = "gpt-4o-mini" def print_completion(prompt: str) -> None: streamed_completion = client.chat.completions.create( stream=True, model=default_model, messages=[ {"role": "system", "content": f"you are a coding assistant running on {platform.system()}"}, {"role": "user", "content": prompt} ], ) for chunk in streamed_completion: [choice] = chunk.choices if not isinstance(choice, Choice): raise ValueError("could not process first choice within chunk") content_delta = choice.delta.content print(content_delta, end="") print("\n") if __name__ == "__main__": # treat args as separate words _, *query = sys.argv query = " ".join(query) try: print_completion(query) except Exception as e: print(f"could not complete because {e}")