""" LangChain Hacker News Story Categorizer Tutorial This script demonstrates key LangChain concepts by building a simple application that fetches Hacker News stories and categorizes them using OpenAI's GPT model. Key LangChain Concepts Demonstrated: 1. Chains: Sequences of operations that can be combined 2. Prompts: Structured way to interact with LLMs 3. LLMs: Language Model integration 4. Pydantic Output Parsing: Type-safe structured output handling """ import os import requests from typing import List, Dict from rich.console import Console from rich.panel import Panel from rich.table import Table from rich import print as rprint from pydantic import BaseModel, Field from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.output_parsers import PydanticOutputParser # Initialize Rich console for pretty output console = Console() # Your OpenAI API key - Replace with your actual key os.environ["OPENAI_API_KEY"] = "YOUR-OPENAI-API-KEY" # Replace this with your actual API key # Define our Pydantic model for structured output class StoryAnalysis(BaseModel): """ Pydantic model for story analysis output. Using Pydantic provides type safety and validation. """ category: str = Field(description="The main category of the story (Tech, Business, Science, etc.)") subcategory: str = Field(description="A more specific subcategory") summary: str = Field(description="A brief 1-2 sentence summary of the story's main points") def fetch_hn_stories(limit: int = 5) -> List[Dict]: """ Fetch top stories from Hacker News. This function demonstrates basic API interaction outside of LangChain. Later, we'll combine this with LangChain components. """ # Get top story IDs response = requests.get("https://hacker-news.firebaseio.com/v0/topstories.json") story_ids = response.json()[:limit] stories = [] for story_id in story_ids: # Fetch individual story details story_url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" story_response = requests.get(story_url) story_data = story_response.json() if story_data and 'title' in story_data: stories.append({ 'title': story_data['title'], 'url': story_data.get('url', ''), 'score': story_data.get('score', 0) }) return stories def setup_langchain_categorizer(): """ Set up the LangChain components for story categorization. This demonstrates several key LangChain concepts: 1. Pydantic Output Parser: Type-safe structured output 2. Prompt Templates: Create reusable prompts 3. LLM Chain: Combine prompts and models """ # Create a Pydantic output parser # This is a more modern approach than using ResponseSchema output_parser = PydanticOutputParser(pydantic_object=StoryAnalysis) # Create a prompt template with format instructions # This shows how to create structured prompts in LangChain prompt = ChatPromptTemplate.from_template(""" Analyze the following Hacker News story and provide a categorization and summary. Story Title: {title} URL: {url} {format_instructions} Provide your analysis in the exact format specified above: """) # Initialize the language model # ChatOpenAI is a LangChain wrapper around OpenAI's chat models llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # Create a chain that combines the prompt and model # LLMChain is a basic building block in LangChain for combining prompts with LLMs chain = LLMChain(llm=llm, prompt=prompt) return chain, output_parser def display_results(stories: List[Dict], categorized_results: List[StoryAnalysis]): """ Display the results in a pretty format using Rich. This function shows how to work with the structured output from our LangChain pipeline. The categorized_results are strongly typed thanks to our Pydantic model. """ table = Table(title="Hacker News Stories Analysis", show_header=True, header_style="bold magenta") table.add_column("Title", style="cyan", no_wrap=False) table.add_column("Category", style="green", no_wrap=True) table.add_column("Subcategory", style="yellow", no_wrap=True) table.add_column("Summary", style="white", no_wrap=False) for story, result in zip(stories, categorized_results): table.add_row( story['title'], result.category, # Note: Using dot notation because result is a Pydantic model result.subcategory, result.summary ) console.print() console.print(Panel.fit( "🚀 LangChain Hacker News Analyzer", subtitle="Analyzing top stories using LangChain and GPT-3.5", style="bold blue" )) console.print() console.print(table) console.print() def main(): """ Main function to run the HN story categorizer. This function orchestrates the entire pipeline: 1. Fetch stories from HN API 2. Set up LangChain components 3. Process stories through the LLM chain 4. Display results """ # Show a welcome message console.print(Panel.fit( "Fetching and analyzing Hacker News stories...", style="bold green" )) # Fetch stories stories = fetch_hn_stories(limit=5) # Setup LangChain components chain, output_parser = setup_langchain_categorizer() # Process each story categorized_results = [] with console.status("[bold green]Processing stories...") as status: for story in stories: # Get format instructions from the parser format_instructions = output_parser.get_format_instructions() # Run the chain result = chain.run( title=story['title'], url=story['url'], format_instructions=format_instructions ) # Parse the result into our Pydantic model parsed_result = output_parser.parse(result) categorized_results.append(parsed_result) # Display results display_results(stories, categorized_results) if __name__ == "__main__": main()