openhands revisou este gist . Ir para a revisão
2 files changed, 162 insertions
advanced_langchain_demo.py(arquivo criado)
| @@ -0,0 +1,152 @@ | |||
| 1 | + | """ | |
| 2 | + | Advanced LangChain Demo | |
| 3 | + | ||
| 4 | + | This script demonstrates advanced LangChain features including: | |
| 5 | + | 1. Agents with multiple tools | |
| 6 | + | 2. Memory and conversation management | |
| 7 | + | 3. Custom tools and chains | |
| 8 | + | 4. Structured output parsing | |
| 9 | + | 5. Sequential chains | |
| 10 | + | """ | |
| 11 | + | ||
| 12 | + | import os | |
| 13 | + | from typing import List, Dict, Union | |
| 14 | + | import re | |
| 15 | + | from datetime import datetime | |
| 16 | + | ||
| 17 | + | from rich.console import Console | |
| 18 | + | from rich.panel import Panel | |
| 19 | + | from rich.table import Table | |
| 20 | + | from rich import print as rprint | |
| 21 | + | ||
| 22 | + | from langchain.agents import load_tools, initialize_agent, Tool, AgentExecutor, AgentType | |
| 23 | + | from langchain.memory import ConversationBufferMemory | |
| 24 | + | from langchain.chat_models import ChatOpenAI | |
| 25 | + | from langchain.chains import LLMChain, SimpleSequentialChain | |
| 26 | + | from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate | |
| 27 | + | from langchain.schema import HumanMessage, AgentAction, AgentFinish | |
| 28 | + | from langchain.tools import DuckDuckGoSearchRun | |
| 29 | + | from langchain.callbacks import get_openai_callback | |
| 30 | + | ||
| 31 | + | # Initialize Rich console for pretty output | |
| 32 | + | console = Console() | |
| 33 | + | ||
| 34 | + | def create_news_analyzer_agent(): | |
| 35 | + | """ | |
| 36 | + | Creates an agent that can search for news and analyze them. | |
| 37 | + | Demonstrates tool integration and agent creation. | |
| 38 | + | """ | |
| 39 | + | # Initialize the language model | |
| 40 | + | llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") | |
| 41 | + | ||
| 42 | + | # Initialize tools | |
| 43 | + | search = DuckDuckGoSearchRun() | |
| 44 | + | ||
| 45 | + | tools = [ | |
| 46 | + | Tool( | |
| 47 | + | name="Search", | |
| 48 | + | func=search.run, | |
| 49 | + | description="Useful for searching recent news and information" | |
| 50 | + | ) | |
| 51 | + | ] | |
| 52 | + | ||
| 53 | + | # Initialize memory | |
| 54 | + | memory = ConversationBufferMemory( | |
| 55 | + | memory_key="chat_history", | |
| 56 | + | return_messages=True | |
| 57 | + | ) | |
| 58 | + | ||
| 59 | + | # Create the agent | |
| 60 | + | agent = initialize_agent( | |
| 61 | + | tools, | |
| 62 | + | llm, | |
| 63 | + | agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, | |
| 64 | + | memory=memory, | |
| 65 | + | verbose=True | |
| 66 | + | ) | |
| 67 | + | ||
| 68 | + | return agent | |
| 69 | + | ||
| 70 | + | def create_analysis_chain(): | |
| 71 | + | """ | |
| 72 | + | Creates a sequential chain for analyzing topics. | |
| 73 | + | Demonstrates chain composition and prompt management. | |
| 74 | + | """ | |
| 75 | + | llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") | |
| 76 | + | ||
| 77 | + | # First chain: Generate main points | |
| 78 | + | main_points_prompt = PromptTemplate( | |
| 79 | + | input_variables=["topic"], | |
| 80 | + | template="What are the 3 main points to understand about {topic}?" | |
| 81 | + | ) | |
| 82 | + | main_points_chain = LLMChain(llm=llm, prompt=main_points_prompt) | |
| 83 | + | ||
| 84 | + | # Second chain: Generate implications | |
| 85 | + | implications_prompt = PromptTemplate( | |
| 86 | + | input_variables=["points"], | |
| 87 | + | template="Given these points: {points} | |
| 88 | + | What are the potential future implications?" | |
| 89 | + | ) | |
| 90 | + | implications_chain = LLMChain(llm=llm, prompt=implications_prompt) | |
| 91 | + | ||
| 92 | + | # Combine chains | |
| 93 | + | overall_chain = SimpleSequentialChain( | |
| 94 | + | chains=[main_points_chain, implications_chain], | |
| 95 | + | verbose=True | |
| 96 | + | ) | |
| 97 | + | ||
| 98 | + | return overall_chain | |
| 99 | + | ||
| 100 | + | def display_results(title: str, content: str): | |
| 101 | + | """Pretty print results using Rich.""" | |
| 102 | + | console.print() | |
| 103 | + | console.print(Panel.fit( | |
| 104 | + | f"🔍 {title}", | |
| 105 | + | style="bold blue" | |
| 106 | + | )) | |
| 107 | + | console.print() | |
| 108 | + | console.print(content) | |
| 109 | + | console.print() | |
| 110 | + | ||
| 111 | + | def main(): | |
| 112 | + | """ | |
| 113 | + | Main function demonstrating various LangChain features. | |
| 114 | + | """ | |
| 115 | + | console.print(Panel.fit( | |
| 116 | + | "🚀 Advanced LangChain Features Demo", | |
| 117 | + | subtitle="Demonstrating Agents, Chains, and Tools", | |
| 118 | + | style="bold green" | |
| 119 | + | )) | |
| 120 | + | ||
| 121 | + | # Initialize components | |
| 122 | + | news_agent = create_news_analyzer_agent() | |
| 123 | + | analysis_chain = create_analysis_chain() | |
| 124 | + | ||
| 125 | + | # Track token usage | |
| 126 | + | with get_openai_callback() as cb: | |
| 127 | + | # 1. Use agent to search and analyze news | |
| 128 | + | console.print(" | |
| 129 | + | [bold]🔍 Using Agent to Search and Analyze News[/bold]") | |
| 130 | + | news_query = "What are the latest developments in AI regulation?" | |
| 131 | + | agent_result = news_agent.run(news_query) | |
| 132 | + | display_results("Agent Analysis Result", agent_result) | |
| 133 | + | ||
| 134 | + | # 2. Use sequential chain for deeper analysis | |
| 135 | + | console.print(" | |
| 136 | + | [bold]📊 Using Sequential Chain for Analysis[/bold]") | |
| 137 | + | chain_result = analysis_chain.run("AI regulation and its impact on technology companies") | |
| 138 | + | display_results("Sequential Chain Analysis", chain_result) | |
| 139 | + | ||
| 140 | + | # Display token usage | |
| 141 | + | console.print(Panel.fit( | |
| 142 | + | f"""[bold]📊 Token Usage Statistics[/bold] | |
| 143 | + | ||
| 144 | + | Total Tokens: {cb.total_tokens} | |
| 145 | + | Prompt Tokens: {cb.prompt_tokens} | |
| 146 | + | Completion Tokens: {cb.completion_tokens} | |
| 147 | + | Total Cost (USD): ${cb.total_cost:.4f}""", | |
| 148 | + | style="bold yellow" | |
| 149 | + | )) | |
| 150 | + | ||
| 151 | + | if __name__ == "__main__": | |
| 152 | + | main() | |
requirements.txt(arquivo criado)
| @@ -0,0 +1,10 @@ | |||
| 1 | + | langchain==0.0.340 | |
| 2 | + | openai==1.3.7 | |
| 3 | + | python-dotenv==1.0.0 | |
| 4 | + | requests==2.31.0 | |
| 5 | + | rich==13.7.0 | |
| 6 | + | beautifulsoup4==4.12.2 | |
| 7 | + | wikipedia==1.4.0 | |
| 8 | + | google-search-results==2.4.2 | |
| 9 | + | duckduckgo-search==3.9.9 | |
| 10 | + | tiktoken==0.5.1 | |
Próximo
Anterior