#!/usr/bin/env bun /** * Bug Reproduction: Maple AI models don't use OpenAI tool calling format * * Testing: gpt-oss-120b (which works with tool calling on Cerebras) * Expected: Model should return tool_calls in response * Actual: Model returns empty response or text instead of tool calls * * Setup: * bun install ai @ai-sdk/openai-compatible * export MAPLE_API_KEY="your-key" * bun run maple-tool-calling-bug-repro.ts */ import { streamText } from "ai" import { createOpenAICompatible } from "@ai-sdk/openai-compatible" import { createMapleFetch } from "./src/maple" const MAPLE_API_KEY = process.env.MAPLE_API_KEY || "" if (!MAPLE_API_KEY) { console.error("āŒ Missing MAPLE_API_KEY environment variable") console.error(" Get one at: https://trymaple.ai") process.exit(1) } console.log("šŸ› Maple AI Tool Calling Bug Reproduction\n") console.log("šŸ“‹ Test Setup:") console.log(" Model: gpt-oss-120b") console.log(" API: https://enclave.trymaple.ai") console.log(" Task: Write 'Hello World' to /tmp/test.txt") console.log(" Expected: Model calls write() tool") console.log(" Actual: ???\n") // Create Maple's encrypted fetch const mapleFetch = await createMapleFetch({ apiUrl: "https://enclave.trymaple.ai", apiKey: MAPLE_API_KEY, pcr0Values: [ "79e7bd1e7df09fdb5b7098956a2268c278cc88be323c11975e2a2d080d65f30f8e0efe690edd450493c833b46f40ae1a", "ed9109c16f30a470cf0ea2251816789b4ffa510c990118323ce94a2364b9bf05bdb8777959cbac86f5cabc4852e0da71", "4f2bcdf16c38842e1a45defd944d24ea58bb5bcb76491843223022acfe9eb6f1ff79b2cb9a6b2a9219daf9c7bf40fa37", "b8ee4b511ef2c9c6ab3e5c0840c5df2218fbb4d9df88254ece7af9462677e55aa5a03838f3ae432d86ca1cb6f992eee7", ], }) // Create provider with Maple's fetch const maple = createOpenAICompatible({ name: "maple", baseURL: "https://enclave.trymaple.ai/v1", fetch: mapleFetch, }) const tools = { write: { description: "Write content to a file on disk", parameters: { type: "object" as const, properties: { path: { type: "string" as const, description: "The file path to write to", }, content: { type: "string" as const, description: "The content to write to the file", }, }, required: ["path", "content"], }, execute: async ({ path, content }: { path: string; content: string }) => { try { await Bun.write(path, content) return `Written to ${path}` } catch (err: any) { return `Error: ${err.message}` } }, }, } try { console.log("šŸ“¤ Sending request with tool definitions...\n") const result = streamText({ model: maple("gpt-oss-120b"), messages: [ { role: "user", content: "Write 'Hello World from Maple!' to /tmp/maple-test.txt", }, ], tools, maxSteps: 3, }) let hasToolCalls = false let hasText = false let textContent = "" let toolCallsFound: string[] = [] for await (const chunk of result.fullStream) { if (chunk.type === "tool-call") { hasToolCalls = true toolCallsFound.push(chunk.toolName) console.log(`āœ… Tool call: ${chunk.toolName}`) console.log(` Args: ${JSON.stringify(chunk.args)}`) } else if (chunk.type === "text-delta") { hasText = true const delta = String(chunk.textDelta || "") textContent += delta process.stdout.write(delta) } else if (chunk.type === "finish") { console.log("\n\nšŸ“Š Result:") console.log(` Tool calls: ${hasToolCalls ? "āœ… YES" : "āŒ NO"}`) console.log(` Text output: ${hasText ? "YES" : "NO"}`) console.log(` Text length: ${textContent.length} chars`) if (toolCallsFound.length > 0) { console.log(` Tools called: ${toolCallsFound.join(", ")}`) } if (hasText) { const sample = textContent.trim().substring(0, 200) console.log(` Text content: "${sample}${sample.length < textContent.trim().length ? '...' : ''}"`) } console.log("\nšŸ” Diagnosis:") if (hasToolCalls) { console.log(" āœ… SUCCESS: Model correctly used tool calling!") } else if (hasText) { console.log(" āŒ BUG: Model returned text instead of calling tools") console.log(" Expected: model.tool_calls = [{ name: 'write', args: {...} }]") console.log(" Actual: model.content = text explaining what to do") } else { console.log(" āŒ BUG: Model returned empty response") console.log(" Expected: model.tool_calls = [{ name: 'write', args: {...} }]") console.log(" Actual: No tool calls, no text") } } } console.log("\nšŸ“ Notes:") console.log(" • This same model (gpt-oss-120b) DOES support tool calling on Cerebras") console.log(" • See: https://inference-docs.cerebras.ai/models/openai-oss") console.log(" • OpenCode Zen models (grok-code, etc) work fine for tool calling") console.log(" • All Maple models tested show this issue (qwen, llama, deepseek, gpt-oss)") } catch (error: any) { console.error("\nāŒ Error:", error.message) if (error.cause) { console.error(" Cause:", error.cause) } if (error.stack) { console.error("\n Stack trace:") console.error(error.stack) } process.exit(1) }