[
language = "rust"
tavily_api_key = "env:TAVILY_API_KEY"
tool_executor = "docker"
[commands]
test = "cargo test --no-fail-fast --color=never"
coverage = "cargo tarpaulin --skip-clean --target-dir target/debug --engine llvm"
lint_and_fix = "cargo clippy --fix --allow-dirty --allow-staged && cargo fmt"
[github]
owner = "shamb0"
repository = "kwaak-demo01-code-intel"
main_branch = "master"
token = "env:GITHUB_TOKEN"
[llm.indexing]
provider = "Groq"
api_key = "env:KWAAK_GROQ_API_KEY"
prompt_model = "llama-3.3-70b-versatile"
base_url = "https://api.groq.com/openai/v1"
[llm.query]
provider = "Groq"
api_key = "env:KWAAK_GROQ_API_KEY"
prompt_model = "llama3-groq-70b-8192-tool-use-preview"
base_url = "https://api.groq.com/openai/v1"
[llm.embedding]
provider = "FastEmbed"
embedding_type = "dense"
embedding_model = "all-MiniLM-L6-v2-onnx"
batch_size = 16
[docker]
dockerfile = "Dockerfile"]
[
language = "rust"
tavily_api_key = "env:TAVILY_API_KEY"
tool_executor = "docker"
[commands]
test = "cargo test --no-fail-fast --color=never"
coverage = "cargo tarpaulin --skip-clean --target-dir target/debug --engine llvm"
lint_and_fix = "cargo clippy --fix --allow-dirty --allow-staged && cargo fmt"
[github]
owner = "shamb0"
repository = "kwaak-demo01-code-intel"
main_branch = "master"
token = "env:GITHUB_TOKEN"
[llm.indexing]
provider = "Ollama"
api_key = "env:KWAAK_OLLAMA_API_KEY"
prompt_model = "qwen2.5-coder:14b"
base_url = "http://localhost:11434/v1"
[llm.query]
provider = "Ollama"
api_key = "env:KWAAK_OLLAMA_API_KEY"
prompt_model = "llama3.2:latest"
base_url = "http://localhost:11434/v1"
[llm.embedding]
provider = "FastEmbed"
embedding_type = "dense"
embedding_model = "all-MiniLM-L6-v2-onnx"
batch_size = 16
[docker]
dockerfile = "Dockerfile"
]