CrewAI Integration
The Surfinguard Python SDK includes a CrewAI integration that adds security checks to CrewAI agent actions, preventing crews from executing dangerous operations.
Installation
pip install surfinguard crewaiQuick Start
from surfinguard import Guard
from surfinguard.integrations.crewai import SurfinguardCrewGuard
from crewai import Agent, Task, Crew
# Create the guard
guard = Guard(api_key="sg_live_your_key_here", policy="moderate")
crew_guard = SurfinguardCrewGuard(guard)
# Define a CrewAI agent
researcher = Agent(
role="Security Researcher",
goal="Analyze URLs for threats",
backstory="You are a security expert.",
tools=[web_search_tool, shell_tool],
)
# Protect the agent
safe_researcher = crew_guard.protect(researcher)
# Use the protected agent in a crew
task = Task(
description="Analyze this list of URLs for phishing indicators",
expected_output="A report of findings",
agent=safe_researcher,
)
crew = Crew(
agents=[safe_researcher],
tasks=[task],
)
result = crew.kickoff()How It Works
SurfinguardCrewGuard.protect() wraps the agent’s tools with security checks. Before any tool executes, the input is checked against Surfinguard’s threat database. If the action is blocked by the active policy, the tool returns an error message instead of executing.
This is transparent to the agent — it sees the tool failure and can choose to try a different approach.
Protecting Multiple Agents
from surfinguard import Guard
from surfinguard.integrations.crewai import SurfinguardCrewGuard
guard = Guard(api_key="sg_live_...", policy="strict")
crew_guard = SurfinguardCrewGuard(guard)
# Protect all agents
safe_researcher = crew_guard.protect(researcher)
safe_writer = crew_guard.protect(writer)
safe_reviewer = crew_guard.protect(reviewer)
crew = Crew(
agents=[safe_researcher, safe_writer, safe_reviewer],
tasks=[research_task, writing_task, review_task],
)Custom Configuration
crew_guard = SurfinguardCrewGuard(
guard,
# Map tool names to action types
tool_type_map={
"web_search": "url",
"shell_exec": "command",
"file_read": "file_read",
"sql_query": "query",
},
)Error Handling
When an agent attempts a dangerous action, the tool returns an error message rather than raising an exception. This allows the CrewAI framework to handle the failure gracefully:
# The agent will see something like:
# "Tool execution blocked by security policy: Destructive command detected (score: 10)"
# And can choose to try a different approachFor programmatic access to blocked actions, use the guard’s event system:
guard = Guard(api_key="sg_live_...", policy="moderate")
# Check the result directly
result = guard.check_command("rm -rf /")
if result.level == "DANGER":
print(f"Would be blocked: {result.reasons}")Full Example
from surfinguard import Guard
from surfinguard.integrations.crewai import SurfinguardCrewGuard
from crewai import Agent, Task, Crew, Process
from crewai_tools import SerperDevTool, FileReadTool
# Security setup
guard = Guard(api_key="sg_live_...", policy="moderate")
crew_guard = SurfinguardCrewGuard(guard)
# Tools
search_tool = SerperDevTool()
file_tool = FileReadTool()
# Agents
analyst = crew_guard.protect(Agent(
role="Threat Analyst",
goal="Identify potential security threats in provided URLs",
backstory="Expert in cybersecurity threat analysis",
tools=[search_tool],
))
reporter = crew_guard.protect(Agent(
role="Report Writer",
goal="Write clear security reports",
backstory="Technical writer specializing in security documentation",
tools=[file_tool],
))
# Tasks
analysis_task = Task(
description="Analyze these URLs for security threats: {urls}",
expected_output="Detailed threat analysis for each URL",
agent=analyst,
)
report_task = Task(
description="Write a security report based on the analysis",
expected_output="A formatted security report",
agent=reporter,
)
# Crew
crew = Crew(
agents=[analyst, reporter],
tasks=[analysis_task, report_task],
process=Process.sequential,
)
result = crew.kickoff(
inputs={"urls": "https://example.com, https://g00gle-login.tk"}
)
print(result)