from langchain.llms.base import LLM from typing import Optional, List, Mapping, Any import requests from langchain.llms.utils import enforce_stop_tokens class CustomLLM(LLM): def __init__(self, url: str): self.url = url def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: res = requests.get(self.url, params={"q": prompt}) text = res.content.decode() if stop is not None: text = enforce_stop_tokens(text, stop) return text llm = CustomLLM("http://127.0.0.1:5001/chat")