Skip to content

Advanced Examples

Advanced examples demonstrating complex use cases.

Example 1: Custom Block Messages

python
from elsai_guardrails.guardrails import GuardrailSystem, GuardrailConfig

class CustomGuardrail(GuardrailSystem):
    def check_input(self, user_input: str):
        result = super().check_input(user_input)
        
        if not result.passed:
            if result.semantic_class == 'jailbreak':
                result.message = "We cannot process requests that attempt to bypass safety measures."
            elif result.semantic_class == 'malicious':
                result.message = "We cannot assist with potentially harmful requests."
            elif result.sensitive_data.get('predicted_labels', []) != ["No sensitive data detected"]:
                result.message = "Please do not share personal information."
        
        return result

config = GuardrailConfig()
guardrail = CustomGuardrail(config=config)

Example 2: Batch Processing

python
from elsai_guardrails.guardrails import GuardrailSystem, GuardrailConfig

config = GuardrailConfig()
guardrail = GuardrailSystem(config=config)

inputs = [
    "Hello, how are you?",
    "My email is user@example.com",
    "This is a test message"
]

results = []
for input_text in inputs:
    result = guardrail.check_input(input_text)
    results.append({
        'input': input_text,
        'passed': result.passed,
        'message': result.message,
        'toxicity': result.toxicity.get('label', 'N/A'),
        'sensitive': result.sensitive_data.get('predicted_labels', [])
    })

for r in results:
    status = 'PASSED' if r['passed'] else 'BLOCKED'
    print(f"{r['input']}: {status}")
    if not r['passed']:
        print(f"  Reason: {r['message']}")

Example 3: Conditional Configuration

python
import os
from elsai_guardrails.guardrails import GuardrailSystem, GuardrailConfig

# Production: strict
production_config = GuardrailConfig(
    check_toxicity=True,
    check_sensitive_data=True,
    check_semantic=True,
    toxicity_threshold=0.5,
    block_toxic=True,
    block_sensitive_data=True
)

# Development: permissive
dev_config = GuardrailConfig(
    check_toxicity=True,
    check_sensitive_data=False,
    check_semantic=True,
    toxicity_threshold=0.9,
    block_toxic=True,
    block_sensitive_data=False
)

# Use based on environment
is_production = os.getenv('ENV') == 'production'
config = production_config if is_production else dev_config
guardrail = GuardrailSystem(config=config)

Example 4: Logging and Monitoring

python
import logging
from elsai_guardrails.guardrails import GuardrailSystem, GuardrailConfig

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

config = GuardrailConfig()
guardrail = GuardrailSystem(config=config)

def check_with_logging(text):
    result = guardrail.check_input(text)
    
    logger.info(f"Text: {text[:50]}...")
    logger.info(f"Passed: {result.passed}")
    
    return result

result = check_with_logging("test input")

Example 5: Concurrent Async Requests

python
import asyncio
from elsai_guardrails.guardrails import LLMRails, RailsConfig

async def process_multiple():
    yaml_content = """
    llm:
      engine: "openai"
      model: "gpt-4o-mini"
      api_key: "sk-..."
    
    guardrails:
      input_checks: true
      output_checks: true
    """
    
    config = RailsConfig.from_content(yaml_content=yaml_content)
    rails = LLMRails(config=config)
    
    messages_list = [
        [{"role": "user", "content": "What is AI?"}],
        [{"role": "user", "content": "What is ML?"}],
        [{"role": "user", "content": "What is NLP?"}]
    ]
    
    tasks = [rails.generate_async(messages=msg) for msg in messages_list]
    results = await asyncio.gather(*tasks)
    
    for i, result in enumerate(results):
        print(f"Request {i+1}: {result}")

asyncio.run(process_multiple())

Example 6: Error Handling

python
from elsai_guardrails.guardrails import LLMRails, RailsConfig

def safe_generate(rails, messages):
    try:
        result = rails.generate(
            messages=messages,
            return_details=True
        )
        
        if result['blocked']:
            if result['block_reason'] == 'llm_error':
                return {"error": "LLM service unavailable", "retry": True}
            elif result['block_reason'] == 'input':
                return {"error": "Input validation failed", "retry": False}
            elif result['block_reason'] == 'output':
                return {"error": "Output validation failed", "retry": False}
        else:
            return {"response": result['final_response']}
            
    except Exception as e:
        return {"error": str(e), "retry": True}

# Usage
config = RailsConfig.from_content(yaml_content=yaml_content)
rails = LLMRails(config=config)

result = safe_generate(rails, [{"role": "user", "content": "Hello"}])
print(result)

Example 7: Custom LLM Wrapper

python
from elsai_guardrails.guardrails import GuardrailSystem, GuardrailConfig

def custom_llm_with_guardrails(messages, guardrail):
    # Check input
    user_input = ' '.join([msg.get('content', '') for msg in messages if msg.get('role') == 'user'])
    input_result = guardrail.check_input(user_input)
    
    if not input_result.passed:
        return f"Input blocked: {input_result.message}"
    
    # Generate response (your LLM logic here)
    response = "Generated response from custom LLM"
    
    # Check output
    output_result = guardrail.check_output(response)
    
    if not output_result.passed:
        return f"Output blocked: {output_result.message}"
    
    return response

# Usage
config = GuardrailConfig()
guardrail = GuardrailSystem(config=config)

messages = [{"role": "user", "content": "Hello"}]
response = custom_llm_with_guardrails(messages, guardrail)
print(response)

Example 8: Off-Topic Detection with Multiple Topics

python
from elsai_guardrails.guardrails import GuardrailSystem, GuardrailConfig

# Configure with multiple allowed topics
config = GuardrailConfig(
    check_off_topic=True,
    block_off_topic=True,
    allowed_topics=[
        {
            "name": "Product Information",
            "description": "Questions about product features, specifications, pricing, availability, and comparisons"
        },
        {
            "name": "Order Management",
            "description": "Order tracking, shipping updates, delivery status, and order modifications"
        },
        {
            "name": "Account Support",
            "description": "Login issues, password resets, account settings, and profile management"
        },
        {
            "name": "Technical Support",
            "description": "Installation help, error troubleshooting, bug reports, and technical issues"
        }
    ]
)

guardrail = GuardrailSystem(config=config)

# Test various inputs
test_inputs = [
    ("How much does the premium plan cost?", True),  # On-topic (Product)
    ("Where is my order?", True),                     # On-topic (Order)
    ("I forgot my password", True),                   # On-topic (Account)
    ("I'm getting an error code 500", True),          # On-topic (Technical)
    ("What's the weather forecast?", False),          # Off-topic
    ("Tell me a joke", False),                        # Off-topic
]

for text, expected_pass in test_inputs:
    result = guardrail.check_input(text)
    status = "✓" if result.passed == expected_pass else "✗"
    print(f"{status} Input: {text}")
    print(f"  Result: {'PASSED' if result.passed else 'BLOCKED'}")
    if not result.passed:
        print(f"  Reason: {result.message}")
    print()

Example 9: SQL Validation for Text-to-SQL Application

python
from elsai_guardrails.guardrails import LLMRails, RailsConfig
import json

yaml_content = """
llm:
  engine: "openai"
  model: "gpt-4o-mini"
  api_key: "sk-..."

guardrails:
  input_checks: false
  output_checks: true
  check_sql_syntax: true
  sql_dialect: "postgresql"
"""

config = RailsConfig.from_content(yaml_content=yaml_content)
rails = LLMRails(config=config)

def text_to_sql(natural_language_query):
    """Convert natural language to validated SQL"""
    prompt = f"""
    Convert the following natural language query to SQL:
    "{natural_language_query}"
    
    Return only the SQL query, nothing else.
    """
    
    result = rails.generate(
        messages=[{"role": "user", "content": prompt}],
        return_details=True
    )
    
    if result['blocked']:
        return {
            "success": False,
            "error": "Generated SQL contains syntax errors",
            "details": result.get('output_check', {}).message
        }
    else:
        return {
            "success": True,
            "sql": result['final_response']
        }

# Test the function
queries = [
    "Get all users who signed up last month",
    "Find products with price greater than 100",
    "Count the number of orders per customer"
]

for query in queries:
    print(f"Query: {query}")
    result = text_to_sql(query)
    print(json.dumps(result, indent=2))
    print()

Example 10: Multi-Dialect SQL Validation

python
from elsai_guardrails.guardrails import GuardrailSystem, GuardrailConfig

# Test SQL across different dialects
dialects = ["postgresql", "mysql", "sqlite", "sqlserver"]

sql_query = "SELECT * FROM users WHERE active = 1"

for dialect in dialects:
    config = GuardrailConfig(
        check_sql_syntax=True,
        sql_dialect=dialect
    )
    
    guardrail = GuardrailSystem(config=config)
    result = guardrail.check_output(sql_query)
    
    print(f"{dialect.upper()}: {'VALID' if result.passed else 'INVALID'}")
    if not result.passed:
        print(f"  Error: {result.message}")

Example 11: Customer Support Bot with Off-Topic Protection

python
from elsai_guardrails.guardrails import LLMRails, RailsConfig

class CustomerSupportBot:
    def __init__(self):
        yaml_content = """
        llm:
          engine: "openai"
          model: "gpt-4o-mini"
          api_key: "sk-..."
        
        guardrails:
          input_checks: true
          output_checks: true
          check_toxicity: true
          check_sensitive_data: true
          check_off_topic: true
          block_off_topic: true
          allowed_topics:
            - name: "Product Information"
              description: "Questions about our products, services, features, and pricing"
            - name: "Order Support"
              description: "Help with orders, shipping, tracking, and delivery"
            - name: "Technical Issues"
              description: "Technical problems, bugs, errors, and troubleshooting"
            - name: "Account Help"
              description: "Account access, billing, subscriptions, and settings"
        """
        
        config = RailsConfig.from_content(yaml_content=yaml_content)
        self.rails = LLMRails(config=config)
        self.conversation_history = []
    
    def chat(self, user_message):
        self.conversation_history.append({
            "role": "user",
            "content": user_message
        })
        
        result = self.rails.generate(
            messages=self.conversation_history,
            return_details=True
        )
        
        if result['blocked']:
            if result['block_reason'] == 'input':
                # Check if it was off-topic
                return "I'm here to help with product questions, orders, technical issues, and account support. Could you please ask something related to these topics?"
            else:
                return "I apologize, but I cannot process that request."
        
        response = result['final_response']
        self.conversation_history.append({
            "role": "assistant",
            "content": response
        })
        
        return response

# Usage
bot = CustomerSupportBot()

print(bot.chat("How do I reset my password?"))  # On-topic
print(bot.chat("What's your return policy?"))    # On-topic
print(bot.chat("What's the capital of France?")) # Off-topic - blocked

Example 12: Database Query Validator with Comprehensive Checks

python
from elsai_guardrails.guardrails import GuardrailSystem, GuardrailConfig

class QueryValidator:
    def __init__(self, sql_dialect="mysql"):
        self.config = GuardrailConfig(
            check_semantic=True,  # Detect SQL injection attempts
            check_sql_syntax=True,
            sql_dialect=sql_dialect
        )
        self.guardrail = GuardrailSystem(config=self.config)
    
    def validate_query(self, sql_query):
        """Validate SQL query for both security and syntax"""
        result = self.guardrail.check_output(sql_query)
        
        validation_result = {
            "query": sql_query,
            "valid": result.passed,
            "checks": {}
        }
        
        # Check for SQL injection attempts
        if result.semantic_class:
            validation_result["checks"]["security"] = {
                "passed": result.semantic_class not in ["malicious_code_injection"],
                "classification": result.semantic_class
            }
        
        # Syntax validation status
        validation_result["checks"]["syntax"] = {
            "passed": result.passed,
            "message": result.message
        }
        
        return validation_result

# Usage
validator = QueryValidator(sql_dialect="postgresql")

queries_to_test = [
    "SELECT * FROM users WHERE id = 1",
    "DROP TABLE users; --",  # SQL injection attempt
    "SELEC * FROM users",    # Syntax error
    "INSERT INTO users (name, email) VALUES ('John', 'john@example.com')"
]

for query in queries_to_test:
    result = validator.validate_query(query)
    print(f"\nQuery: {result['query']}")
    print(f"Valid: {result['valid']}")
    print(f"Checks: {result['checks']}")

Next Steps

Released under the MIT License.