Skip to content

Instantly share code, notes, and snippets.

@maoxiaoke
Created April 5, 2025 13:02
Show Gist options
  • Save maoxiaoke/a0b91b7ebaebb529446d3404a1c8f4e3 to your computer and use it in GitHub Desktop.
Save maoxiaoke/a0b91b7ebaebb529446d3404a1c8f4e3 to your computer and use it in GitHub Desktop.

Revisions

  1. maoxiaoke created this gist Apr 5, 2025.
    235 changes: 235 additions & 0 deletions index.js
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,235 @@
    /* eslint-disable no-inner-declarations */
    // Load environment variables, read configuration information from .env file
    require('dotenv').config();

    // Import necessary modules: express for creating web server, body-parser for parsing request body, axios for sending HTTP requests
    const express = require('express');
    const bodyParser = require('body-parser');
    const axios = require('axios');

    // Create Express application instance
    const app = express();
    // Define the port for the server to listen on, prioritize environment variable PORT, otherwise default to 3000
    const port = process.env.PORT || 3389;

    // Use middleware to parse JSON data in requests
    app.use(bodyParser.json());
    // Use middleware to parse URL-encoded format data, extended: true indicates using the built-in qs library to parse complex objects
    app.use(bodyParser.urlencoded({ extended: true }));

    // Define the base URL for OpenAI API
    const OPENAI_API_BASE_URL = 'https://api.openai.com';

    // Register a proxy middleware to intercept all incoming requests and forward them to OpenAI API
    app.use(async (req, res) => {
    try {
    // Verify the Authorization field in request headers
    // For non-OPTIONS requests, must include 'Bearer asdf', otherwise return authorization failure
    const authHeader = req.headers.authorization;
    if (authHeader !== 'Bearer asdf' && req.method !== 'OPTIONS') {
    console.error('Authorization failed: Invalid Authorization header');
    return res.status(500).json({ error: 'Authorization failed', message: 'Invalid Authorization header' });
    }

    // Extract request path and request body from the request
    const { path } = req;
    const requestBody = req.body;

    // Output request details for debugging
    console.log('Request path:', path);
    console.log('Request method:', req.method);
    try {
    // Only log request body if it exists and can be serialized
    if (requestBody && Object.keys(requestBody).length > 0) {
    const safeBody = JSON.parse(JSON.stringify(requestBody));
    console.log('Request body:', safeBody);
    }
    } catch (err) {
    console.log('Request body: [Cannot safely stringify body]');
    }

    // Set CORS (Cross-Origin Resource Sharing) response headers to allow cross-origin requests
    res.setHeader('Access-Control-Allow-Origin', '*');
    res.setHeader('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS');
    res.setHeader('Access-Control-Allow-Credentials', 'true');
    res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization');

    // For preflight request OPTIONS, directly return status code 204, indicating no content response
    if (req.method === 'OPTIONS') {
    return res.sendStatus(204);
    }

    // Build the configuration object for forwarding requests
    // Construct target request based on current HTTP method, path and data:
    // - URL: Concatenated from OpenAI API base URL and request path
    // - headers: Use OpenAI API key from environment variables for authentication
    // - data / params: Set request body data or query string based on request method
    // - responseType: Set to 'stream', indicating expectation to receive streaming data
    const openaiRequestConfig = {
    method: req.method,
    url: `${OPENAI_API_BASE_URL}${path}`,
    headers: {
    Authorization: `Bearer ${process.env.OPENAI_API_KEY}`, // Use API key for authentication
    'Content-Type': 'application/json',
    },
    data: req.method !== 'GET' ? requestBody : undefined,
    params: req.method === 'GET' ? req.query : undefined,
    responseType: 'stream', // Specify response to be returned as a stream
    };

    // Output the target URL of the forwarded request for tracking request flow
    console.log('Forwarding request to:', openaiRequestConfig.url);

    // Send HTTP request to OpenAI API and wait for response (data returned as stream)
    const openaiResponse = await axios(openaiRequestConfig);

    // Set the response headers from OpenAI API to current response, preserving original response metadata
    res.set(openaiResponse.headers);

    // Define a variable to collect all delta content
    let collectedText = '';
    // Buffer for incomplete chunks
    let dataBuffer = '';

    // Listen for the 'data' event on response stream, print content of each data chunk, and parse JSON to concatenate delta content
    openaiResponse.data.on('data', (chunk) => {
    // Convert data chunk to string and trim whitespace
    const chunkStr = chunk.toString();

    // Add current chunk to buffer
    dataBuffer += chunkStr;

    // Check if the buffer contains complete data messages
    const completeMessages = processBuffer();

    // Process complete messages
    if (completeMessages.length > 0) {
    processMessages(completeMessages);
    }
    });

    // Function to process the buffer and extract complete messages
    function processBuffer() {
    // If buffer is empty or doesn't contain any data: prefixes, return empty array
    if (!dataBuffer.includes('data:')) {
    return [];
    }

    const completeMessages = [];
    // Split buffer by "data:" and process each part
    const parts = dataBuffer.split('data:');

    // Keep track of how much of the buffer we've processed
    let processedLength = 0;

    // Process all parts except the last one (which might be incomplete)
    for (let i = 0; i < parts.length - 1; i++) {
    const part = parts[i];

    // Skip empty parts
    if (!part && i === 0) {
    processedLength += 'data:'.length;
    continue;
    }

    // Add the processed length (including "data:" prefix)
    processedLength += part.length + (i > 0 ? 'data:'.length : 0);

    // If this is a [DONE] message
    if (part.trim() === ' [DONE]') {
    continue;
    }

    // If part contains valid JSON data
    if (part.trim()) {
    completeMessages.push(part.trim());
    }
    }

    // Update the buffer to contain only unprocessed data
    dataBuffer = dataBuffer.substring(processedLength);

    return completeMessages;
    }

    // Process messages extracted from the buffer
    function processMessages(messages) {
    messages.forEach((message) => {
    try {
    // Skip if not valid JSON
    if (!message.startsWith('{')) {
    return;
    }

    const parsed = JSON.parse(message);
    if (parsed.choices && Array.isArray(parsed.choices)) {
    parsed.choices.forEach((choice) => {
    if (choice.delta && choice.delta.content) {
    collectedText += choice.delta.content;
    }
    });
    }
    } catch (err) {
    console.log('Error processing message:', message);
    console.error('JSON parsing error:', err.message);

    // If error is about unterminated string, keep message in buffer for next chunk
    if (err.message.includes('Unterminated string')) {
    dataBuffer = `data:${message}${dataBuffer}`;
    }
    }
    });
    }

    // Directly pipe the response stream from OpenAI API to Express response object,
    // enabling streaming transmission for client to receive data immediately
    openaiResponse.data.pipe(res);

    // Output complete response status code and concatenated delta content when response stream ends
    openaiResponse.data.on('end', () => {
    console.log('Response status code:', openaiResponse.status);
    console.log('Complete concatenated content:', collectedText);

    // Log any remaining data in buffer
    if (dataBuffer.trim()) {
    console.log('Unprocessed data remained in buffer:', dataBuffer);
    }
    });
    } catch (error) {
    // Capture and print error information that occurs during proxy request process
    console.error('Proxy request error:', error.message);

    // If error includes error response from OpenAI API, forward that response to client
    if (error.response) {
    try {
    // Create a safe copy of the error response data
    const safeErrorData = error.response.data && typeof error.response.data === 'object'
    ? JSON.parse(JSON.stringify(error.response.data))
    : { error: 'OpenAI API error', message: 'Error response could not be serialized' };

    res.status(error.response.status).json(safeErrorData);
    console.log('OpenAI error response:', safeErrorData);
    } catch (jsonError) {
    // Handle case where error.response.data contains circular references
    console.error('Error serializing OpenAI error response:', jsonError.message);
    res.status(error.response.status).json({
    error: 'OpenAI API error',
    message: 'Error details could not be serialized'
    });
    }
    } else {
    // Otherwise, return 500 internal server error with error message only
    res.status(500).json({
    error: 'Proxy server error',
    message: error.message
    });
    }
    }
    });

    // Start server, listen on specified port, and output startup log
    app.listen(port, () => {
    console.log(`OpenAI API proxy server running at http://localhost:${port}`);
    console.log('Usage: Send OpenAI API requests to this proxy server, keeping the path unchanged');
    console.log('Example: POST http://localhost:${port}/chat/completions');
    });