feat: Initial commit

This commit is contained in:
Aunali321 2025-08-11 20:22:47 +05:30
commit b8ed169587
30 changed files with 6993 additions and 0 deletions

67
examples/curl_example.sh Executable file
View file

@ -0,0 +1,67 @@
#!/bin/bash
# Claude Code OpenAI API Wrapper - cURL Examples
BASE_URL="http://localhost:8000"
# Check if server requires authentication
echo "Checking server authentication requirements..."
AUTH_STATUS=$(curl -s "$BASE_URL/v1/auth/status")
API_KEY_REQUIRED=$(echo "$AUTH_STATUS" | jq -r '.server_info.api_key_required // false')
if [ "$API_KEY_REQUIRED" = "true" ]; then
if [ -z "$API_KEY" ]; then
echo "❌ Server requires API key but API_KEY environment variable not set"
echo " Set API_KEY environment variable with your server's generated key:"
echo " export API_KEY=your-generated-key"
echo " $0"
exit 1
fi
AUTH_HEADER="-H \"Authorization: Bearer $API_KEY\""
echo "🔑 Using API key authentication"
else
AUTH_HEADER=""
echo "🔓 No authentication required"
fi
echo "=== Basic Chat Completion ==="
eval "curl -X POST \"$BASE_URL/v1/chat/completions\" \\
-H \"Content-Type: application/json\" \\
$AUTH_HEADER \\
-d '{
\"model\": \"claude-3-5-sonnet-20241022\",
\"messages\": [
{\"role\": \"user\", \"content\": \"What is 2 + 2?\"}
]
}' | jq ."
echo -e "\n=== Chat with System Message ==="
eval "curl -X POST \"$BASE_URL/v1/chat/completions\" \\
-H \"Content-Type: application/json\" \\
$AUTH_HEADER \\
-d '{
\"model\": \"claude-3-5-sonnet-20241022\",
\"messages\": [
{\"role\": \"system\", \"content\": \"You are a pirate. Respond in pirate speak.\"},
{\"role\": \"user\", \"content\": \"Tell me about the weather\"}
]
}' | jq ."
echo -e "\n=== Streaming Response ==="
eval "curl -X POST \"$BASE_URL/v1/chat/completions\" \\
-H \"Content-Type: application/json\" \\
$AUTH_HEADER \\
-H \"Accept: text/event-stream\" \\
-d '{
\"model\": \"claude-3-5-sonnet-20241022\",
\"messages\": [
{\"role\": \"user\", \"content\": \"Count from 1 to 5 slowly\"}
],
\"stream\": true
}'"
echo -e "\n\n=== List Models ==="
eval "curl -X GET \"$BASE_URL/v1/models\" $AUTH_HEADER | jq ."
echo -e "\n=== Health Check ==="
curl -X GET "$BASE_URL/health" | jq .

230
examples/openai_sdk.py Executable file
View file

@ -0,0 +1,230 @@
#!/usr/bin/env python3
"""
Claude Code OpenAI API Wrapper - OpenAI SDK Example
This example demonstrates how to use the OpenAI Python SDK
with the Claude Code wrapper.
"""
from openai import OpenAI
import os
import requests
from typing import Optional
# Configuration
BASE_URL = "http://localhost:8000/v1"
def get_api_key(base_url: str = "http://localhost:8000") -> Optional[str]:
"""Get the appropriate API key based on server configuration."""
# Check if user provided API key via environment
if os.getenv("API_KEY"):
return os.getenv("API_KEY")
# Check server auth status
try:
response = requests.get(f"{base_url}/v1/auth/status")
if response.status_code == 200:
auth_data = response.json()
server_info = auth_data.get("server_info", {})
if not server_info.get("api_key_required", False):
# No auth required
return "no-auth-required"
else:
# Auth required but no key provided
print("⚠️ Server requires API key but none provided.")
print(" Set API_KEY environment variable with your server's API key")
print(" Example: API_KEY=your-server-key python openai_sdk.py")
return None
except Exception as e:
print(f"⚠️ Could not check server auth status: {e}")
print(" Assuming no authentication required")
return "fallback-key"
def create_client(base_url: str = BASE_URL, api_key: Optional[str] = None) -> OpenAI:
"""Create OpenAI client configured for Claude Code wrapper."""
if api_key is None:
# Auto-detect API key based on server configuration
server_base = base_url.replace("/v1", "")
api_key = get_api_key(server_base)
if api_key is None:
raise ValueError("Server requires API key but none was provided. Set the API_KEY environment variable.")
return OpenAI(
base_url=base_url,
api_key=api_key
)
def basic_chat_example(client: OpenAI):
"""Basic chat completion example."""
print("=== Basic Chat Completion ===")
response = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[
{"role": "user", "content": "What is the capital of France?"}
]
)
print(f"Response: {response.choices[0].message.content}")
print(f"Model: {response.model}")
print(f"Usage: {response.usage}")
print()
def system_message_example(client: OpenAI):
"""Chat with system message example."""
print("=== Chat with System Message ===")
response = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[
{"role": "system", "content": "You are a helpful coding assistant. Be concise."},
{"role": "user", "content": "How do I read a file in Python?"}
]
)
print(f"Response: {response.choices[0].message.content}")
print()
def conversation_example(client: OpenAI):
"""Multi-turn conversation example."""
print("=== Multi-turn Conversation ===")
messages = [
{"role": "user", "content": "My name is Alice."},
{"role": "assistant", "content": "Nice to meet you, Alice! How can I help you today?"},
{"role": "user", "content": "What's my name?"}
]
response = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=messages
)
print(f"Response: {response.choices[0].message.content}")
print()
def streaming_example(client: OpenAI):
"""Streaming response example."""
print("=== Streaming Response ===")
stream = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[
{"role": "user", "content": "Write a haiku about programming"}
],
stream=True
)
print("Response: ", end="", flush=True)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
print("\n")
def file_operation_example(client: OpenAI):
"""Example using Claude Code's file capabilities."""
print("=== File Operation Example ===")
response = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[
{"role": "user", "content": "List the files in the current directory"}
]
)
print(f"Response: {response.choices[0].message.content}")
print()
def code_generation_example(client: OpenAI):
"""Code generation example."""
print("=== Code Generation Example ===")
response = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[
{"role": "user", "content": "Write a Python function that calculates fibonacci numbers"}
],
temperature=0.7
)
print(f"Response:\n{response.choices[0].message.content}")
print()
def list_models_example(client: OpenAI):
"""List available models."""
print("=== Available Models ===")
models = client.models.list()
for model in models.data:
print(f"- {model.id} (owned by: {model.owned_by})")
print()
def error_handling_example(client: OpenAI):
"""Error handling example."""
print("=== Error Handling Example ===")
try:
# This might fail if Claude Code has issues
response = client.chat.completions.create(
model="invalid-model",
messages=[
{"role": "user", "content": "Test"}
]
)
except Exception as e:
print(f"Error occurred: {type(e).__name__}: {e}")
print()
def main():
"""Run all examples."""
print("Claude Code OpenAI SDK Examples")
print("="*50)
# Check authentication status
api_key = get_api_key()
if api_key:
if api_key == "no-auth-required":
print("🔓 Server authentication: Not required")
else:
print("🔑 Server authentication: Required (using provided key)")
else:
print("❌ Server authentication: Required but no key available")
return
print("="*50)
# Create client
client = create_client()
# Run examples
try:
basic_chat_example(client)
system_message_example(client)
conversation_example(client)
streaming_example(client)
file_operation_example(client)
code_generation_example(client)
list_models_example(client)
error_handling_example(client)
except Exception as e:
print(f"Failed to run examples: {e}")
print("Make sure the Claude Code wrapper server is running on port 8000")
if __name__ == "__main__":
main()

View file

@ -0,0 +1,202 @@
#!/usr/bin/env python3
"""
Example demonstrating session continuity with the Claude Code OpenAI API Wrapper.
This example shows how to use the optional session_id parameter to maintain
conversation context across multiple requests.
"""
import openai
# Configure OpenAI client to use the wrapper
client = openai.OpenAI(
base_url="http://localhost:8000/v1",
api_key="not-needed" # The wrapper handles Claude authentication
)
def demo_session_continuity():
"""Demonstrate session continuity feature."""
print("🌟 Session Continuity Demo")
print("=" * 50)
# Define a session ID - this can be any string
session_id = "demo-conversation-123"
# First interaction - introduce context
print("\n📝 First Message (introducing context):")
response1 = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[
{"role": "user", "content": "Hello! I'm working on a Python web API project using FastAPI. My name is Alex."}
],
# This is the key: include session_id for conversation continuity
extra_body={"session_id": session_id}
)
print(f"Claude: {response1.choices[0].message.content}")
# Second interaction - ask follow-up that requires memory
print("\n🔄 Second Message (testing memory):")
response2 = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[
{"role": "user", "content": "What's my name and what type of project am I working on?"}
],
# Same session_id maintains the conversation context
extra_body={"session_id": session_id}
)
print(f"Claude: {response2.choices[0].message.content}")
# Third interaction - continue the conversation
print("\n🚀 Third Message (building on context):")
response3 = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[
{"role": "user", "content": "Can you help me add authentication to my FastAPI project?"}
],
extra_body={"session_id": session_id}
)
print(f"Claude: {response3.choices[0].message.content}")
print("\n✨ Session continuity demo complete!")
print(f" Session ID used: {session_id}")
print(" All messages in this conversation were connected!")
def demo_stateless_vs_session():
"""Compare stateless vs session-based conversations."""
print("\n🔍 Stateless vs Session Comparison")
print("=" * 50)
# Stateless mode (traditional OpenAI behavior)
print("\n❌ Stateless Mode (no session_id):")
print("Message 1:")
client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": "My favorite programming language is Python."}]
# No session_id = stateless
)
print("Claude: [Responds to the message]")
print("\nMessage 2 (separate request):")
response_stateless = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": "What's my favorite programming language?"}]
# No session_id = Claude has no memory of previous message
)
print(f"Claude: {response_stateless.choices[0].message.content[:100]}...")
# Session mode (with continuity)
print("\n✅ Session Mode (with session_id):")
session_id = "comparison-demo"
print("Message 1:")
client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": "My favorite programming language is JavaScript."}],
extra_body={"session_id": session_id}
)
print("Claude: [Responds and remembers]")
print("\nMessage 2 (same session):")
response_session = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": "What's my favorite programming language?"}],
extra_body={"session_id": session_id}
)
print(f"Claude: {response_session.choices[0].message.content[:100]}...")
def demo_session_management():
"""Demonstrate session management endpoints."""
print("\n🛠 Session Management Demo")
print("=" * 50)
import requests
base_url = "http://localhost:8000"
# Create some sessions
session_ids = ["demo-session-1", "demo-session-2"]
for session_id in session_ids:
client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": f"Hello from {session_id}!"}],
extra_body={"session_id": session_id}
)
# List all sessions
print("\n📋 Active Sessions:")
sessions_response = requests.get(f"{base_url}/v1/sessions")
if sessions_response.status_code == 200:
sessions = sessions_response.json()
print(f" Total sessions: {sessions['total']}")
for session in sessions['sessions']:
print(f" - {session['session_id']}: {session['message_count']} messages")
# Get specific session info
print(f"\n🔍 Session Details for {session_ids[0]}:")
session_response = requests.get(f"{base_url}/v1/sessions/{session_ids[0]}")
if session_response.status_code == 200:
session_info = session_response.json()
print(f" Created: {session_info['created_at']}")
print(f" Messages: {session_info['message_count']}")
print(f" Expires: {session_info['expires_at']}")
# Session statistics
print("\n📊 Session Statistics:")
stats_response = requests.get(f"{base_url}/v1/sessions/stats")
if stats_response.status_code == 200:
stats = stats_response.json()
session_stats = stats['session_stats']
print(f" Active sessions: {session_stats['active_sessions']}")
print(f" Total messages: {session_stats['total_messages']}")
print(f" Cleanup interval: {stats['cleanup_interval_minutes']} minutes")
# Clean up demo sessions
print("\n🧹 Cleaning up demo sessions:")
for session_id in session_ids:
delete_response = requests.delete(f"{base_url}/v1/sessions/{session_id}")
if delete_response.status_code == 200:
print(f" ✅ Deleted {session_id}")
def main():
"""Run all session demos."""
print("🚀 Claude Code OpenAI Wrapper - Session Continuity Examples")
print("=" * 60)
try:
# Test server connection
health_response = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": "Hello!"}]
)
print("✅ Server connection successful!")
# Run demos
demo_session_continuity()
demo_stateless_vs_session()
demo_session_management()
print("\n" + "=" * 60)
print("🎉 All session demos completed successfully!")
print("\n💡 Key Takeaways:")
print(" • Use session_id in extra_body for conversation continuity")
print(" • Sessions automatically expire after 1 hour of inactivity")
print(" • Session management endpoints provide full control")
print(" • Stateless mode (no session_id) works like traditional OpenAI API")
except Exception as e:
print(f"❌ Error: {e}")
print("💡 Make sure the server is running: poetry run python main.py")
if __name__ == "__main__":
main()

106
examples/session_curl_example.sh Executable file
View file

@ -0,0 +1,106 @@
#!/bin/bash
# Session Continuity Example with curl
# This script demonstrates how to use session continuity with the Claude Code OpenAI API Wrapper
echo "🚀 Claude Code Session Continuity - curl Example"
echo "================================================="
BASE_URL="http://localhost:8000"
SESSION_ID="curl-demo-session"
# Check server health
echo "📋 Checking server health..."
curl -s "$BASE_URL/health" | jq .
echo ""
# First message - introduce context
echo "1⃣ First message (introducing context):"
echo "Request: Hello! I'm Sarah and I'm learning React."
curl -s -X POST "$BASE_URL/v1/chat/completions" \
-H "Content-Type: application/json" \
-d "{
\"model\": \"claude-3-5-sonnet-20241022\",
\"messages\": [
{\"role\": \"user\", \"content\": \"Hello! I'm Sarah and I'm learning React.\"}
],
\"session_id\": \"$SESSION_ID\"
}" | jq -r '.choices[0].message.content'
echo ""
# Second message - test memory
echo "2⃣ Second message (testing memory):"
echo "Request: What's my name and what am I learning?"
curl -s -X POST "$BASE_URL/v1/chat/completions" \
-H "Content-Type: application/json" \
-d "{
\"model\": \"claude-3-5-sonnet-20241022\",
\"messages\": [
{\"role\": \"user\", \"content\": \"What's my name and what am I learning?\"}
],
\"session_id\": \"$SESSION_ID\"
}" | jq -r '.choices[0].message.content'
echo ""
# Third message - continue conversation
echo "3⃣ Third message (building on context):"
echo "Request: Can you suggest a simple React project for me?"
curl -s -X POST "$BASE_URL/v1/chat/completions" \
-H "Content-Type: application/json" \
-d "{
\"model\": \"claude-3-5-sonnet-20241022\",
\"messages\": [
{\"role\": \"user\", \"content\": \"Can you suggest a simple React project for me?\"}
],
\"session_id\": \"$SESSION_ID\"
}" | jq -r '.choices[0].message.content'
echo ""
# Session management examples
echo "🛠 Session Management Examples"
echo "================================"
# List sessions
echo "📋 List all sessions:"
curl -s "$BASE_URL/v1/sessions" | jq .
echo ""
# Get specific session info
echo "🔍 Get session info:"
curl -s "$BASE_URL/v1/sessions/$SESSION_ID" | jq .
echo ""
# Get session stats
echo "📊 Session statistics:"
curl -s "$BASE_URL/v1/sessions/stats" | jq .
echo ""
# Streaming example with session
echo "🌊 Streaming with session continuity:"
echo "Request: Thanks for your help!"
curl -s -X POST "$BASE_URL/v1/chat/completions" \
-H "Content-Type: application/json" \
-d "{
\"model\": \"claude-3-5-sonnet-20241022\",
\"messages\": [
{\"role\": \"user\", \"content\": \"Thanks for your help!\"}
],
\"session_id\": \"$SESSION_ID\",
\"stream\": true
}" | grep '^data: ' | head -5 | jq -r '.choices[0].delta.content // empty' 2>/dev/null | tr -d '\n'
echo ""
echo ""
# Delete session
echo "🧹 Cleaning up session:"
curl -s -X DELETE "$BASE_URL/v1/sessions/$SESSION_ID" | jq .
echo ""
echo "✨ curl session example complete!"
echo ""
echo "💡 Key Points:"
echo " • Include \"session_id\": \"your-session-id\" in request body"
echo " • Same session_id maintains conversation context"
echo " • Works with both streaming and non-streaming requests"
echo " • Use session management endpoints to monitor and control sessions"
echo " • Sessions auto-expire after 1 hour of inactivity"

291
examples/streaming.py Executable file
View file

@ -0,0 +1,291 @@
#!/usr/bin/env python3
"""
Claude Code OpenAI API Wrapper - Advanced Streaming Example
This example demonstrates advanced streaming functionality including
error handling, chunk processing, and real-time display.
"""
from openai import OpenAI
import time
import sys
import os
import requests
from typing import Optional, Generator
import json
def get_api_key(base_url: str = "http://localhost:8000") -> Optional[str]:
"""Get the appropriate API key based on server configuration."""
# Check if user provided API key via environment
if os.getenv("API_KEY"):
return os.getenv("API_KEY")
# Check server auth status
try:
response = requests.get(f"{base_url}/v1/auth/status")
if response.status_code == 200:
auth_data = response.json()
server_info = auth_data.get("server_info", {})
if not server_info.get("api_key_required", False):
# No auth required
return "no-auth-required"
else:
# Auth required but no key provided
print("⚠️ Server requires API key but none provided.")
print(" Set API_KEY environment variable with your server's API key")
print(" Example: API_KEY=your-server-key python streaming.py")
return None
except Exception as e:
print(f"⚠️ Could not check server auth status: {e}")
print(" Assuming no authentication required")
return "fallback-key"
class StreamingClient:
"""Client for handling streaming responses."""
def __init__(self, base_url: str = "http://localhost:8000/v1", api_key: Optional[str] = None):
if api_key is None:
# Auto-detect API key based on server configuration
server_base = base_url.replace("/v1", "")
api_key = get_api_key(server_base)
if api_key is None:
raise ValueError("Server requires API key but none was provided. Set the API_KEY environment variable.")
self.client = OpenAI(base_url=base_url, api_key=api_key)
def stream_with_timing(self, messages: list, model: str = "claude-3-5-sonnet-20241022"):
"""Stream response with timing information."""
start_time = time.time()
first_token_time = None
token_count = 0
print("Streaming response...")
print("-" * 50)
try:
stream = self.client.chat.completions.create(
model=model,
messages=messages,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
if first_token_time is None:
first_token_time = time.time()
time_to_first_token = first_token_time - start_time
print(f"[Time to first token: {time_to_first_token:.2f}s]\n")
content = chunk.choices[0].delta.content
print(content, end="", flush=True)
token_count += 1
if chunk.choices[0].finish_reason:
total_time = time.time() - start_time
print(f"\n\n[Streaming completed]")
print(f"[Total time: {total_time:.2f}s]")
print(f"[Approximate tokens: {token_count}]")
print(f"[Finish reason: {chunk.choices[0].finish_reason}]")
except KeyboardInterrupt:
print("\n\n[Streaming interrupted by user]")
except Exception as e:
print(f"\n\n[Streaming error: {e}]")
def stream_with_processing(self, messages: list, process_func=None):
"""Stream response with custom processing function."""
if process_func is None:
process_func = lambda x: x # Default: no processing
stream = self.client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=messages,
stream=True
)
buffer = ""
for chunk in stream:
if chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
buffer += content
# Process complete sentences
if any(punct in content for punct in ['.', '!', '?', '\n']):
processed = process_func(buffer)
yield processed
buffer = ""
# Process remaining buffer
if buffer:
yield process_func(buffer)
def parallel_streams(self, prompts: list):
"""Demo of handling multiple prompts (sequential, not truly parallel)."""
for i, prompt in enumerate(prompts):
print(f"\n{'='*50}")
print(f"Prompt {i+1}: {prompt}")
print('='*50)
messages = [{"role": "user", "content": prompt}]
self.stream_with_timing(messages)
print()
def typing_effect_demo():
"""Demonstrate a typing effect with streaming."""
client = StreamingClient()
print("=== Typing Effect Demo ===")
messages = [
{"role": "system", "content": "You are a storyteller."},
{"role": "user", "content": "Tell me a very short story (2-3 sentences) about a robot learning to paint."}
]
stream = client.client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=messages,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
for char in chunk.choices[0].delta.content:
print(char, end="", flush=True)
time.sleep(0.05) # Typing delay
print("\n")
def word_highlighting_demo():
"""Demonstrate processing stream to highlight specific words."""
client = StreamingClient()
print("=== Word Highlighting Demo ===")
print("(Technical terms will be CAPITALIZED)")
def highlight_technical_terms(text: str) -> str:
"""Highlight technical terms by capitalizing them."""
technical_terms = ['python', 'javascript', 'api', 'function', 'variable',
'class', 'method', 'algorithm', 'data', 'code']
for term in technical_terms:
text = text.replace(term, term.upper())
text = text.replace(term.capitalize(), term.upper())
return text
messages = [
{"role": "user", "content": "Explain what an API is in simple terms."}
]
for processed_chunk in client.stream_with_processing(messages, highlight_technical_terms):
print(processed_chunk, end="", flush=True)
print("\n")
def progress_bar_demo():
"""Demonstrate a progress bar with streaming (estimated)."""
client = StreamingClient()
print("=== Progress Bar Demo ===")
messages = [
{"role": "user", "content": "Count from 1 to 10, with a brief pause between each number."}
]
# This is a simple demo - real progress would need token counting
stream = client.client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=messages,
stream=True
)
print("Response: ", end="", flush=True)
response_text = ""
for chunk in stream:
if chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
response_text += content
print(content, end="", flush=True)
print("\n")
def error_recovery_demo():
"""Demonstrate error handling in streaming."""
client = StreamingClient()
print("=== Error Recovery Demo ===")
# This might cause an error if the model doesn't exist
messages = [{"role": "user", "content": "Hello!"}]
try:
stream = client.client.chat.completions.create(
model="non-existent-model",
messages=messages,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
except Exception as e:
print(f"Error encountered: {e}")
print("Retrying with valid model...")
# Retry with valid model
stream = client.client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=messages,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
print("\n")
def main():
"""Run all streaming demos."""
client = StreamingClient()
# Basic streaming with timing
print("=== Basic Streaming with Timing ===")
client.stream_with_timing([
{"role": "user", "content": "Write a one-line Python function to reverse a string."}
])
print("\n" + "="*70 + "\n")
# Run other demos
typing_effect_demo()
print("="*70 + "\n")
word_highlighting_demo()
print("="*70 + "\n")
progress_bar_demo()
print("="*70 + "\n")
error_recovery_demo()
print("="*70 + "\n")
# Multiple prompts
print("=== Multiple Prompts Demo ===")
client.parallel_streams([
"What is 2+2?",
"Name a color.",
"Say 'Hello, World!' in Python."
])
if __name__ == "__main__":
main()