#compdef sgpt # Be sure to name this file `_sgpt` and place it in your fpath. # This file is sourced by zsh when you start a new shell session. # To list your fpath, run `echo $fpath` in your shell. # To add this directory to fpath, add `fpath=($HOME/.config/shell_gpt $fpath)` to your .zshrc file. # For example, to install it system-wide : # wget -O /usr/share/zsh/site-functions/_sgpt https://gist.github.com/obeone/dc66f2ca40b8254edab61ac50cdec0f3/raw/_sgpt.zsh # Authors : # - ChatGPT [ZSH Expert](https://chatgpt.com/g/g-XczdbjXSW-zsh-expert) # - a little bit [obeone](https://github.com/obeone) local model_cache_file="${HOME}/.cache/sgpt_models" local sgptrc="${HOME}/.config/shell_gpt/.sgptrc" # Source configuration if it exists [[ -f "$sgptrc" ]] && source "$sgptrc" _arguments -s \ '1:prompt:_files' \ '--model[specify the model to use]:model:_sgpt_models' \ '--temperature[randomness of generated output]:temperature (range 0.0 to 2.0)' \ '--top-p[limits highest probable tokens]:top-p (range 0.0 to 1.0)' \ '--md[prettify markdown output]::' \ '--no-md[disable prettify markdown output]' \ '--editor[open $EDITOR to provide a prompt]::' \ '--no-editor[do not open $EDITOR]' \ '--cache[cache completion results]::' \ '--no-cache[do not cache completion results]' \ '--version[show version]' \ '--help[show help]' \ '--shell[generate and execute shell commands]' \ '-s[alias for --shell]' \ '--no-interaction[disable interaction for --shell]' \ '--interaction[enable interaction for --shell]' \ '--describe-shell[describe a shell command]::' \ '--code[generate only code]' \ '--no-functions[disable function calls]' \ '--functions[enable function calls]' \ '--chat[follow conversation with id, use "temp" for quick session]:chat id:_sgpt_chats' \ '--repl[start a REPL session]:session id:_sgpt_chats' \ '--show-chat[show all messages from provided chat id]:chat id:_sgpt_chats' \ '--list-chats[list all existing chat ids]' \ '--role[set system role for GPT model]:role:_sgpt_roles' \ '--create-role[create role]:role name:_invalidate_roles_cache' \ '--show-role[show role]:role name:_sgpt_roles' \ '--list-roles[list roles]' function _sgpt_chats { local -a chats local cache_file="${HOME}/.cache/sgpt_chats" # Check cache if [[ -f "$cache_file" ]] && (( $(date +%s) - $(stat -c "%Y" "$cache_file" 2>/dev/null || stat -f "%m" "$cache_file" 2>/dev/null) < 43200 )); then chats=("${(@f)$(<"$cache_file")}") else # Fetch chats dynamically and cache results chats=("${(@f)$(sgpt --list-chats | xargs -n 1 basename)}") print -l $chats > "$cache_file" fi _describe -t chats 'available chats' chats } function _sgpt_models { local -a models # Fetch OpenAI models if API key is available, exclude blacklisted models if [[ -n "$OPENAI_API_KEY" ]]; then models+=("${(@f)$(curl -s "$OPENAI_BASE_URL/models" -H "Authorization: Bearer $OPENAI_API_KEY" | jq -r '.data[].id | select(test("dall-e.*|text-embedding.*|tts.*|whisper.*") | not)')}") fi # Fetch Ollama models directly every time, ensure using the right column and escape colon if [[ "$USE_LITELLM" == "true" ]]; then models+=("${(@f)$(ollama list | awk 'NR > 1 {print "ollama/"$1}' | sed 's/:/\\:/g')}") fi _describe -t models 'available models' models } function _sgpt_roles { local -a roles local cache_file="${HOME}/.cache/sgpt_roles" # Check cache if [[ -f "$cache_file" ]] && (( $(date +%s) - $(stat -c "%Y" "$cache_file" 2>/dev/null || stat -f "%m" "$cache_file" 2>/dev/null) < 86400 )); then roles=("${(@f)$(<"$cache_file")}") else roles=("${(@f)$(sgpt --list-roles | awk -F/ '{print $NF}' | sed 's/\.json$//')}") print -l $roles > "$cache_file" fi _describe -t roles 'available roles' roles } function _invalidate_roles_cache { local cache_file="${HOME}/.cache/sgpt_roles" rm -f "$cache_file" _sgpt_roles # Refresh the roles list } # Ensure you place this script under your fpath, commonly at ~/.zsh/completions/