from langchain import hub from langchain.agents import AgentExecutor, load_tools from langchain.agents.format_scratchpad import format_log_to_str from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser from langchain.tools.render import render_text_description from langchain_community.llms.bedrock import Bedrock from langchain_community.chat_models.bedrock import BedrockChat import dotenv dotenv.load_dotenv() llm = Bedrock( credentials_profile_name='us-east-1', model_id='anthropic.claude-v2:1', ) chat_model = BedrockChat( credentials_profile_name='us-east-1', model_id='anthropic.claude-v2:1' ) tools = load_tools(["serpapi", "llm-math"], llm=llm) prompt = hub.pull("hwchase17/react-json") prompt = prompt.partial( tools=render_text_description(tools), tool_names=", ".join([t.name for t in tools]), ) chat_model_with_stop = chat_model.bind( stop=["\n\nHuman:"] ) agent = ( { "input": lambda x: x["input"], "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]), } | prompt | chat_model_with_stop | ReActJsonSingleInputOutputParser() ) agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, handle_parsing_errors=True) agent_executor.invoke( { "input": "Which city has a larger population, Guiyang or Tacheng?" } # { # "input": "what's the smallest prime number greater than 1000?" # } # { # "input": "The Japanese military officer Kenji Hatanaka conspired to prevent the broadcast of the surrender of this emperor who passed away a few decades later and was succeeded by whom?" # } # { # "input": "who is the F1 World Champion in 2023? what's his age raised to the 0.43 power?" # } )