""" Advanced LangChain Demo This script demonstrates advanced LangChain features including: 1. Agents with multiple tools 2. Memory and conversation management 3. Custom tools and chains 4. Structured output parsing 5. Sequential chains """ import os from typing import List, Dict, Union import re from datetime import datetime from rich.console import Console from rich.panel import Panel from rich.table import Table from rich import print as rprint from langchain.agents import load_tools, initialize_agent, Tool, AgentExecutor, AgentType from langchain.memory import ConversationBufferMemory from langchain.chat_models import ChatOpenAI from langchain.chains import LLMChain, SimpleSequentialChain from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate from langchain.schema import HumanMessage, AgentAction, AgentFinish from langchain.tools import DuckDuckGoSearchRun from langchain.callbacks import get_openai_callback # Initialize Rich console for pretty output console = Console() def create_news_analyzer_agent(): """ Creates an agent that can search for news and analyze them. Demonstrates tool integration and agent creation. """ # Initialize the language model llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # Initialize tools search = DuckDuckGoSearchRun() tools = [ Tool( name="Search", func=search.run, description="Useful for searching recent news and information" ) ] # Initialize memory memory = ConversationBufferMemory( memory_key="chat_history", return_messages=True ) # Create the agent agent = initialize_agent( tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, memory=memory, verbose=True ) return agent def create_analysis_chain(): """ Creates a sequential chain for analyzing topics. Demonstrates chain composition and prompt management. """ llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # First chain: Generate main points main_points_prompt = PromptTemplate( input_variables=["topic"], template="What are the 3 main points to understand about {topic}?" ) main_points_chain = LLMChain(llm=llm, prompt=main_points_prompt) # Second chain: Generate implications implications_prompt = PromptTemplate( input_variables=["points"], template="Given these points: {points} What are the potential future implications?" ) implications_chain = LLMChain(llm=llm, prompt=implications_prompt) # Combine chains overall_chain = SimpleSequentialChain( chains=[main_points_chain, implications_chain], verbose=True ) return overall_chain def display_results(title: str, content: str): """Pretty print results using Rich.""" console.print() console.print(Panel.fit( f"🔍 {title}", style="bold blue" )) console.print() console.print(content) console.print() def main(): """ Main function demonstrating various LangChain features. """ console.print(Panel.fit( "🚀 Advanced LangChain Features Demo", subtitle="Demonstrating Agents, Chains, and Tools", style="bold green" )) # Initialize components news_agent = create_news_analyzer_agent() analysis_chain = create_analysis_chain() # Track token usage with get_openai_callback() as cb: # 1. Use agent to search and analyze news console.print(" [bold]🔍 Using Agent to Search and Analyze News[/bold]") news_query = "What are the latest developments in AI regulation?" agent_result = news_agent.run(news_query) display_results("Agent Analysis Result", agent_result) # 2. Use sequential chain for deeper analysis console.print(" [bold]📊 Using Sequential Chain for Analysis[/bold]") chain_result = analysis_chain.run("AI regulation and its impact on technology companies") display_results("Sequential Chain Analysis", chain_result) # Display token usage console.print(Panel.fit( f"""[bold]📊 Token Usage Statistics[/bold] Total Tokens: {cb.total_tokens} Prompt Tokens: {cb.prompt_tokens} Completion Tokens: {cb.completion_tokens} Total Cost (USD): ${cb.total_cost:.4f}""", style="bold yellow" )) if __name__ == "__main__": main()