LangChain is a powerful framework for building applications with large language models. It provides abstractions for chains, agents, tools, and memory, making it easier to create sophisticated AI systems.
Compose LLMs with other components in sequences
LLMs that can use tools and make decisions
Persist state between chain/agent calls
Functions agents can use to interact with the world
Interface with vector stores and databases
Hook into various stages of execution
# Install LangChain and dependencies pip install langchain langchain-openai langchain-community pip install langchain-experimental langchainhub pip install chromadb tiktoken # For LangGraph (advanced agents) pip install langgraph # Environment setup import os os.environ["OPENAI_API_KEY"] = "your-api-key" os.environ["LANGCHAIN_TRACING_V2"] = "true" # Enable tracing os.environ["LANGCHAIN_API_KEY"] = "your-langsmith-key"
from langchain.agents import create_openai_functions_agent, AgentExecutor from langchain_openai import ChatOpenAI from langchain.tools import Tool, tool from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_community.tools import DuckDuckGoSearchRun from langchain.tools.retriever import create_retriever_tool import requests # Initialize LLM llm = ChatOpenAI(model="gpt-4", temperature=0) # Define custom tools @tool def calculate(expression: str) -> str: """Evaluate a mathematical expression.""" try: result = eval(expression) return f"The result is: {result}" except: return "Invalid expression" @tool def get_weather(city: str) -> str: """Get current weather for a city.""" # In production, use a real weather API return f"The weather in {city} is sunny, 22°C" # Web search tool search = DuckDuckGoSearchRun() search_tool = Tool( name="web_search", func=search.run, description="Search the web for current information" ) # Create agent tools = [calculate, get_weather, search_tool] # Agent prompt prompt = ChatPromptTemplate.from_messages([ ("system", """You are a helpful AI assistant with access to various tools. Use them to answer questions accurately. Think step-by-step."""), ("user", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ]) # Create the agent agent = create_openai_functions_agent(llm, tools, prompt) # Create executor agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, return_intermediate_steps=True, max_iterations=5, early_stopping_method="generate" ) # Use the agent result = agent_executor.invoke({ "input": "What's the weather in Tokyo and calculate 25% of 480" }) print(result["output"])
from langchain.agents import create_react_agent from langchain import hub # Pull ReAct prompt from hub react_prompt = hub.pull("hwchase17/react") # Create ReAct agent react_agent = create_react_agent( llm=llm, tools=tools, prompt=react_prompt ) react_executor = AgentExecutor( agent=react_agent, tools=tools, verbose=True, handle_parsing_errors=True ) # ReAct pattern: Thought → Action → Observation → loop result = react_executor.invoke({ "input": "Find the latest AI news and summarize the top 3 stories" })
from langchain.agents import create_self_ask_with_search_agent # This agent decomposes questions into sub-questions self_ask_agent = create_self_ask_with_search_agent( llm=llm, search_tool=search_tool, prompt=hub.pull("hwchase17/self-ask-with-search") ) # Example: Complex multi-hop question result = AgentExecutor( agent=self_ask_agent, tools=[search_tool], verbose=True ).invoke({ "input": "What is the population of the capital of the country where Tesla's largest factory outside the US is located?" })
from langchain_experimental.plan_and_execute import ( PlanAndExecute, load_agent_executor, load_chat_planner ) # Create planner and executor planner = load_chat_planner(llm) executor = load_agent_executor(llm, tools, verbose=True) # Create plan-and-execute agent plan_agent = PlanAndExecute( planner=planner, executor=executor, verbose=True ) # Complex task requiring planning result = plan_agent.run( "Create a comprehensive market analysis report for electric vehicles: " "1) Current market size, 2) Top 5 manufacturers, 3) Growth projections, " "4) Key challenges, 5) Investment opportunities" )
from langchain.memory import ConversationBufferWindowMemory from langchain.callbacks import StdOutCallbackHandler from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter class AdvancedAgent: def __init__(self): self.llm = ChatOpenAI(model="gpt-4", temperature=0) self.memory = ConversationBufferWindowMemory( k=10, return_messages=True, memory_key="chat_history" ) self.tools = self._setup_tools() self.agent = self._create_agent() def _setup_tools(self): tools = [] # 1. Vector store tool for RAG embeddings = OpenAIEmbeddings() vectorstore = Chroma( persist_directory="./chroma_db", embedding_function=embeddings ) retriever_tool = create_retriever_tool( vectorstore.as_retriever(), "knowledge_base", "Search internal knowledge base for information" ) tools.append(retriever_tool) # 2. Code execution tool @tool def execute_python(code: str) -> str: """Execute Python code and return the result.""" try: # Use exec with restricted globals for safety exec_globals = {"__builtins__": {}} exec(code, exec_globals) return str(exec_globals) except Exception as e: return f"Error: {e}" tools.append(execute_python) # 3. File operations @tool def read_file(filepath: str) -> str: """Read contents of a file.""" try: with open(filepath, 'r') as f: return f.read() except Exception as e: return f"Error reading file: {e}" @tool def write_file(filepath: str, content: str) -> str: """Write content to a file.""" try: with open(filepath, 'w') as f: f.write(content) return f"Successfully wrote to {filepath}" except Exception as e: return f"Error writing file: {e}" tools.extend([read_file, write_file]) return tools def _create_agent(self): prompt = ChatPromptTemplate.from_messages([ ("system", """You are an advanced AI assistant with access to: - Knowledge base search - Python code execution - File operations - Web search Always think step-by-step and use tools when needed. Maintain context from previous conversations."""), MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ]) agent = create_openai_functions_agent( self.llm, self.tools, prompt ) return AgentExecutor( agent=agent, tools=self.tools, memory=self.memory, verbose=True, return_intermediate_steps=True, max_iterations=10, callbacks=[StdOutCallbackHandler()] ) def run(self, query: str): """Execute agent with query.""" return self.agent.invoke({"input": query}) def add_to_knowledge_base(self, documents): """Add documents to vector store.""" text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200 ) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() vectorstore = Chroma.from_documents( texts, embeddings, persist_directory="./chroma_db" ) vectorstore.persist() # Usage agent_system = AdvancedAgent() result = agent_system.run( "Search the knowledge base for information about LangChain, " "then write a Python script that demonstrates its key features" )
LangGraph enables building stateful, multi-agent applications with cycles, controllability, and persistence.
from langgraph.graph import StateGraph, END from typing import TypedDict, Annotated, Sequence import operator from langchain_core.messages import BaseMessage, HumanMessage, AIMessage # Define state class AgentState(TypedDict): messages: Annotated[Sequence[BaseMessage], operator.add] next_step: str # Create graph workflow = StateGraph(AgentState) # Define nodes def researcher(state): """Research node that gathers information.""" messages = state["messages"] last_message = messages[-1].content # Simulate research research_result = f"Research on: {last_message}\nFound: [relevant information]" return { "messages": [AIMessage(content=research_result)], "next_step": "analyzer" } def analyzer(state): """Analyze research results.""" messages = state["messages"] research = messages[-1].content analysis = f"Analysis: The research shows... [detailed analysis]" return { "messages": [AIMessage(content=analysis)], "next_step": "writer" } def writer(state): """Generate final output.""" messages = state["messages"] analysis = messages[-1].content final_output = f"Final Report:\n{analysis}\n[Formatted output]" return { "messages": [AIMessage(content=final_output)], "next_step": "end" } # Add nodes to graph workflow.add_node("researcher", researcher) workflow.add_node("analyzer", analyzer) workflow.add_node("writer", writer) # Define edges workflow.set_entry_point("researcher") workflow.add_edge("researcher", "analyzer") workflow.add_edge("analyzer", "writer") workflow.add_edge("writer", END) # Compile graph app = workflow.compile() # Run the graph initial_state = { "messages": [HumanMessage(content="Analyze the AI agent market")], "next_step": "researcher" } result = app.invoke(initial_state) print(result["messages"][-1].content)
from langgraph.graph import StateGraph, END from langgraph.checkpoint import MemorySaver from langchain_openai import ChatOpenAI class MultiAgentState(TypedDict): messages: Annotated[Sequence[BaseMessage], operator.add] current_agent: str task_completed: bool results: dict class MultiAgentSystem: def __init__(self): self.llm = ChatOpenAI(model="gpt-4") self.workflow = StateGraph(MultiAgentState) self._setup_agents() self._setup_edges() def _setup_agents(self): # Coordinator Agent def coordinator(state): """Decides which agent should act next.""" prompt = f""" Current state: {state['results']} Messages: {state['messages'][-1].content if state['messages'] else 'Start'} Decide next agent: researcher, coder, reviewer, or end """ response = self.llm.invoke(prompt) next_agent = self._parse_next_agent(response.content) return { "current_agent": next_agent, "messages": [AIMessage(content=f"Routing to {next_agent}")] } # Researcher Agent def researcher(state): """Researches information.""" task = state['messages'][-2].content if len(state['messages']) > 1 else "" research = self.llm.invoke( f"Research this topic and provide findings: {task}" ) state['results']['research'] = research.content return { "messages": [AIMessage(content=research.content)], "current_agent": "coordinator" } # Coder Agent def coder(state): """Writes code based on requirements.""" context = state['results'].get('research', '') code = self.llm.invoke( f"Write code based on: {context}" ) state['results']['code'] = code.content return { "messages": [AIMessage(content=code.content)], "current_agent": "coordinator" } # Reviewer Agent def reviewer(state): """Reviews and provides feedback.""" code = state['results'].get('code', '') review = self.llm.invoke( f"Review this code and suggest improvements: {code}" ) state['results']['review'] = review.content return { "messages": [AIMessage(content=review.content)], "current_agent": "coordinator", "task_completed": True } # Add all nodes self.workflow.add_node("coordinator", coordinator) self.workflow.add_node("researcher", researcher) self.workflow.add_node("coder", coder) self.workflow.add_node("reviewer", reviewer) def _setup_edges(self): # Conditional routing def route_next(state): if state.get('task_completed', False): return END return state['current_agent'] self.workflow.set_entry_point("coordinator") # Add conditional edges from coordinator self.workflow.add_conditional_edges( "coordinator", route_next, { "researcher": "researcher", "coder": "coder", "reviewer": "reviewer", END: END } ) # All agents return to coordinator self.workflow.add_edge("researcher", "coordinator") self.workflow.add_edge("coder", "coordinator") self.workflow.add_edge("reviewer", "coordinator") def compile(self): # Add memory for persistence memory = MemorySaver() return self.workflow.compile(checkpointer=memory) # Usage system = MultiAgentSystem() app = system.compile() # Run with streaming config = {"configurable": {"thread_id": "main-thread"}} for output in app.stream({ "messages": [HumanMessage(content="Create a web scraping tool")], "current_agent": "coordinator", "results": {} }, config): print(output)
from langchain.agents import AgentExecutor from tenacity import retry, stop_after_attempt, wait_exponential class RobustAgent: def __init__(self): self.primary_llm = ChatOpenAI(model="gpt-4") self.fallback_llm = ChatOpenAI(model="gpt-3.5-turbo") @retry( stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10) ) def execute_with_retry(self, query): try: # Try with primary LLM return self.run_agent(self.primary_llm, query) except Exception as e: print(f"Primary failed: {e}, using fallback") return self.run_agent(self.fallback_llm, query) def run_agent(self, llm, query): agent_executor = AgentExecutor( agent=create_openai_functions_agent(llm, tools, prompt), tools=tools, handle_parsing_errors=True, max_iterations=3, early_stopping_method="generate" ) return agent_executor.invoke({"input": query})
# LangSmith Integration import langsmith client = langsmith.Client() # Custom callbacks for monitoring from langchain.callbacks.base import BaseCallbackHandler class MonitoringCallback(BaseCallbackHandler): def __init__(self): self.token_count = 0 self.tool_calls = [] def on_llm_start(self, serialized, prompts, **kwargs): print(f"LLM Start: {prompts}") def on_llm_end(self, response, **kwargs): # Track token usage if hasattr(response, 'llm_output'): tokens = response.llm_output.get('token_usage', {}) self.token_count += tokens.get('total_tokens', 0) def on_tool_start(self, serialized, input_str, **kwargs): self.tool_calls.append({ 'tool': serialized.get('name'), 'input': input_str, 'timestamp': datetime.now() }) def get_metrics(self): return { 'total_tokens': self.token_count, 'tool_calls': len(self.tool_calls), 'tools_used': list(set(t['tool'] for t in self.tool_calls)) } # Use in agent monitor = MonitoringCallback() agent_executor = AgentExecutor( agent=agent, tools=tools, callbacks=[monitor], verbose=True ) result = agent_executor.invoke({"input": "Complex task"}) print(monitor.get_metrics())
Feature | LangChain | CrewAI | AutoGen |
---|---|---|---|
Learning Curve | Moderate-High | Low-Moderate | High |
Flexibility | Very High | Moderate | High |
Multi-Agent | Via LangGraph | Native | Native |
Tool Ecosystem | Extensive | Growing | Moderate |
Production Ready | Yes | Yes | Yes |
Best For | Complex chains, RAG | Multi-agent teams | Research, Complex reasoning |