perfecXion.ai

Guides & Examples

Learn how to integrate perfecX Agent with popular frameworks and implement best practices.

Framework Integration Guides

LangChain Integration

Complete guide for integrating perfecX Agent with LangChain applications.

from langchain.agents import create_react_agent, AgentExecutor
from langchain.tools import Tool
from perfecxion_agent import PerfecXAgent, SecurityPolicy

# Initialize perfecX Agent
px_agent = PerfecXAgent(api_key="your-api-key")

# Define security policy
policy = SecurityPolicy(
    name="langchain_safety",
    allowed_tools=["serpapi", "calculator", "wikipedia"],
    max_execution_time=60,
    memory_encryption=True
)

# Create your LangChain agent
tools = [
    Tool(name="Search", func=search_func, description="Search the web"),
    Tool(name="Calculator", func=calc_func, description="Perform calculations")
]

agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)

# Wrap with perfecX Agent security
secure_executor = px_agent.wrap_langchain(
    executor,
    policy=policy,
    monitor_memory=True,
    detect_prompt_injection=True
)

# Use the secure executor
result = secure_executor.invoke({
    "input": "What is the weather in San Francisco?"
})

# View security metrics
metrics = px_agent.get_session_metrics()
print(f"Anomaly score: {metrics.anomaly_score}")
print(f"Tool usage: {metrics.tool_usage}")

Best Practice: Always define explicit tool allowlists in your security policy to prevent unauthorized tool usage.

AutoGPT Integration

Secure autonomous agents built with AutoGPT.

from autogpt.agents import Agent
from autogpt.config import Config
from perfecxion_agent import PerfecXAgent, MemoryProtection

# Initialize perfecX Agent
px_agent = PerfecXAgent(api_key="your-api-key")

# Configure memory protection
memory_protection = MemoryProtection(
    encrypt_vectors=True,
    sign_memories=True,
    detect_tampering=True,
    max_memory_size_mb=100
)

# Create AutoGPT agent
config = Config()
agent = Agent(
    ai_name="SecureAssistant",
    memory=px_agent.create_protected_memory(memory_protection),
    config=config
)

# Set up continuous monitoring
px_agent.monitor_autogpt(
    agent,
    alert_on_anomaly=True,
    block_suspicious_actions=True,
    log_all_activities=True
)

# Run the agent with protection
agent.start_interaction_loop()

CrewAI Multi-Agent Integration

Secure multi-agent systems with CrewAI.

from crewai import Agent, Crew, Task
from perfecxion_agent import PerfecXAgent, MultiAgentPolicy

# Initialize perfecX Agent
px_agent = PerfecXAgent(api_key="your-api-key")

# Define multi-agent security policy
policy = MultiAgentPolicy(
    agent_isolation=True,
    secure_communication=True,
    prevent_collusion=True,
    audit_inter_agent_messages=True
)

# Create CrewAI agents with security wrappers
researcher = px_agent.wrap_crewai_agent(
    Agent(
        role='Researcher',
        goal='Find accurate information',
        backstory='Expert at finding reliable sources'
    ),
    agent_id="researcher-001"
)

writer = px_agent.wrap_crewai_agent(
    Agent(
        role='Writer',
        goal='Create compelling content',
        backstory='Skilled content creator'
    ),
    agent_id="writer-001"
)

# Create secure crew
crew = Crew(
    agents=[researcher, writer],
    tasks=[research_task, writing_task]
)

# Monitor crew execution
with px_agent.monitor_crew(crew, policy=policy):
    result = crew.kickoff()
    
# Analyze multi-agent interactions
analysis = px_agent.analyze_crew_session()
print(f"Inter-agent messages: {analysis.message_count}")
print(f"Security violations: {analysis.violations}")
print(f"Collaboration score: {analysis.collaboration_score}")

Security Best Practices

1. Memory Protection

Protect agent memory from tampering and unauthorized access.

# Enable comprehensive memory protection
px_agent.configure_memory_protection({
    "encryption": {
        "enabled": True,
        "algorithm": "AES-256-GCM",
        "key_rotation_hours": 24
    },
    "signing": {
        "enabled": True,
        "verify_on_read": True,
        "algorithm": "HMAC-SHA256"
    },
    "access_control": {
        "require_authentication": True,
        "audit_all_access": True,
        "rate_limit_reads": 100  # per minute
    }
})

2. Tool Sandboxing

Implement strict controls on tool usage.

# Create sandboxed tool wrapper
@px_agent.sandbox_tool(
    timeout_ms=5000,
    memory_limit_mb=256,
    network_access=False,
    filesystem_access="read_only"
)
def safe_code_executor(code: str) -> str:
    # Tool implementation
    return execute_code(code)

# Register with monitoring
px_agent.register_tool(
    safe_code_executor,
    log_all_executions=True,
    alert_on_timeout=True,
    block_on_violation=True
)

3. Real-Time Anomaly Detection

Configure behavioral baselines and anomaly thresholds.

# Configure anomaly detection
px_agent.configure_anomaly_detection({
    "behavioral_baseline": {
        "training_period_hours": 24,
        "update_frequency": "hourly",
        "metrics": ["response_time", "tool_usage", "token_count"]
    },
    "thresholds": {
        "anomaly_score": 0.7,
        "consecutive_anomalies": 3,
        "time_window_minutes": 5
    },
    "actions": {
        "on_anomaly": ["log", "alert", "throttle"],
        "on_critical": ["block", "notify_admin", "snapshot_state"]
    }
})

Common Use Cases

Customer Support Bot Protection

Secure customer-facing AI agents handling sensitive data.

# Customer support bot with PII protection
support_agent = px_agent.create_secure_agent(
    base_agent=customer_support_bot,
    config={
        "pii_protection": {
            "detect_pii": True,
            "redact_in_logs": True,
            "block_pii_in_tools": True,
            "pii_types": ["email", "phone", "ssn", "credit_card"]
        },
        "rate_limiting": {
            "requests_per_minute": 30,
            "tokens_per_request": 1000,
            "concurrent_sessions": 100
        },
        "content_filtering": {
            "block_harmful_content": True,
            "detect_prompt_injection": True,
            "sanitize_outputs": True
        }
    }
)

Secure Code Generation

Protect code generation agents from producing malicious code.

# Code generation with security scanning
code_agent = px_agent.create_secure_agent(
    base_agent=code_generator,
    config={
        "code_security": {
            "scan_for_vulnerabilities": True,
            "block_dangerous_patterns": [
                "eval(", "exec(", "__import__",
                "subprocess", "os.system"
            ],
            "require_safe_imports": True,
            "sandbox_execution": True
        },
        "output_validation": {
            "syntax_check": True,
            "dependency_scan": True,
            "license_check": True
        }
    }
)

Research Agent with Source Verification

Ensure research agents use reliable sources and avoid hallucinations.

# Research agent with fact verification
research_agent = px_agent.create_secure_agent(
    base_agent=research_bot,
    config={
        "source_verification": {
            "require_citations": True,
            "verify_urls": True,
            "trusted_domains": ["arxiv.org", "nature.com", "ieee.org"],
            "fact_check_threshold": 0.8
        },
        "hallucination_prevention": {
            "cross_reference_facts": True,
            "confidence_threshold": 0.7,
            "require_multiple_sources": 2
        }
    }
)

Monitoring Dashboard Integration

Access real-time monitoring data programmatically or through our web dashboard.

# Set up dashboard integration
dashboard = px_agent.create_dashboard_client()

# Configure real-time alerts
dashboard.configure_alerts({
    "channels": {
        "email": ["security@company.com"],
        "slack": ["#ai-security"],
        "webhook": ["https://api.company.com/alerts"]
    },
    "triggers": {
        "high_anomaly_score": {
            "threshold": 0.8,
            "channels": ["email", "slack"]
        },
        "policy_violation": {
            "severity": ["critical", "high"],
            "channels": ["email", "slack", "webhook"]
        },
        "memory_tampering": {
            "channels": ["email", "webhook"],
            "include_snapshot": True
        }
    }
})

# Create custom dashboard view
dashboard.create_view(
    name="Production Agents",
    filters={
        "environment": "production",
        "status": "active"
    },
    widgets=[
        "anomaly_timeline",
        "policy_violations",
        "performance_metrics",
        "security_events"
    ]
)

Performance Optimization

Batch Event Processing

Reduce overhead by batching monitoring events.

# Configure batch processing
px_agent.configure_batching({
    "batch_size": 100,
    "flush_interval_ms": 1000,
    "compression": "gzip",
    "retry_on_failure": True
})

Selective Monitoring

Monitor only critical operations to reduce overhead.

# Selective monitoring configuration
px_agent.configure_monitoring({
    "levels": {
        "tool_execution": "critical_only",
        "memory_access": "all",
        "llm_calls": "sample_10_percent"
    },
    "filters": {
        "ignore_tools": ["timestamp", "format_output"],
        "ignore_memory_keys": ["cache_*", "temp_*"]
    }
})