Agents API Reference¶
The Agents API provides comprehensive agent architectures for cognitive simulation, including cognitive agents, learning agents, and specialized agent types with sophisticated behaviors and capabilities.
CognitiveAgent¶
The base class for all cognitive agents with memory, reasoning, and goal management.
class CognitiveAgent:
"""
Base cognitive agent with memory, reasoning, and goal management.
Implements the core cognitive architecture with perception, reasoning,
action selection, and learning capabilities.
"""
def __init__(
self,
agent_id: str,
agent_type: AgentType = AgentType.COGNITIVE,
personality_traits: Optional[Dict[str, float]] = None,
config: Optional[AgentConfig] = None
):
"""
Initialize cognitive agent.
Args:
agent_id: Unique identifier for the agent
agent_type: Type of agent architecture
personality_traits: Personality trait values (0.0 to 1.0)
config: Agent configuration object
"""
Core Cognitive Cycle¶
perceive¶
def perceive(self, environment: Environment) -> Perception:
"""
Perceive the environment and process sensory information.
Args:
environment: Environment to perceive
Returns:
Perception: Processed perceptual information
Raises:
AgentError: If perception fails
"""
reason¶
def reason(self, perception: Perception, context: Optional[Dict] = None) -> ReasoningResult:
"""
Process perception through reasoning systems.
Args:
perception: Current perceptual input
context: Additional reasoning context
Returns:
ReasoningResult: Results of reasoning process
Raises:
ReasoningError: If reasoning fails
"""
act¶
def act(self, reasoning_result: ReasoningResult) -> Action:
"""
Select and execute action based on reasoning.
Args:
reasoning_result: Output from reasoning process
Returns:
Action: Selected action to execute
Raises:
AgentError: If action selection fails
"""
learn¶
def learn(self, experience: Experience) -> LearningResult:
"""
Learn from experience and update internal models.
Args:
experience: Experience to learn from
Returns:
LearningResult: Results of learning process
Raises:
LearningError: If learning fails
"""
Complete Example:
from cognito_sim_engine import CognitiveAgent, Environment, AgentConfig
# Configure agent
config = AgentConfig(
working_memory_capacity=7,
reasoning_depth=5,
learning_rate=0.01,
personality_traits={
"openness": 0.8,
"conscientiousness": 0.9,
"extraversion": 0.6,
"agreeableness": 0.7,
"neuroticism": 0.3
}
)
# Create cognitive agent
agent = CognitiveAgent(
agent_id="researcher_001",
agent_type=AgentType.COGNITIVE,
config=config
)
# Set up environment
research_lab = Environment("ai_research_lab", environment_type="collaborative")
research_lab.add_agent(agent)
# Cognitive cycle execution
for step in range(100):
# Perception phase
perception = agent.perceive(research_lab)
print(f"Step {step}: Perceived {len(perception.environmental_objects)} objects")
# Reasoning phase
reasoning_result = agent.reason(perception)
print(f"Reasoning confidence: {reasoning_result.confidence:.2f}")
# Action phase
action = agent.act(reasoning_result)
print(f"Selected action: {action.action_type}")
# Execute action in environment
action_result = research_lab.execute_action(agent.agent_id, action)
# Learning phase
experience = Experience(
agent_id=agent.agent_id,
perception=perception,
action=action,
outcome=action_result,
reward=action_result.reward,
timestamp=time.time()
)
learning_result = agent.learn(experience)
if learning_result.knowledge_updated:
print(f"Agent learned: {learning_result.learning_summary}")
Memory Integration¶
get_memory_manager¶
def get_memory_manager(self) -> MemoryManager:
"""
Get the agent's memory manager.
Returns:
MemoryManager: Agent's memory system
"""
store_memory¶
def store_memory(
self,
content: Any,
memory_type: MemoryType,
importance: float = 0.5,
tags: List[str] = None
) -> str:
"""
Store information in agent's memory.
Args:
content: Content to store
memory_type: Type of memory to store in
importance: Importance weight (0.0 to 1.0)
tags: Tags for categorization
Returns:
str: Memory item ID
"""
retrieve_memories¶
def retrieve_memories(
self,
query: str,
memory_types: List[MemoryType] = None,
limit: int = 10
) -> List[MemoryItem]:
"""
Retrieve memories matching query.
Args:
query: Search query
memory_types: Types of memory to search
limit: Maximum number of results
Returns:
List[MemoryItem]: Retrieved memories
"""
Goal Management¶
add_goal¶
def add_goal(
self,
goal: Goal,
priority: float = 0.5,
deadline: Optional[datetime] = None
) -> None:
"""
Add a goal to the agent's goal stack.
Args:
goal: Goal to add
priority: Goal priority (0.0 to 1.0)
deadline: Optional deadline for goal
Raises:
GoalError: If goal cannot be added
"""
get_active_goals¶
def get_active_goals(self) -> List[Goal]:
"""
Get currently active goals.
Returns:
List[Goal]: Active goals ordered by priority
"""
update_goal_progress¶
def update_goal_progress(
self,
goal_id: str,
progress: float,
evidence: Optional[str] = None
) -> None:
"""
Update progress toward a goal.
Args:
goal_id: ID of goal to update
progress: Progress amount (0.0 to 1.0)
evidence: Evidence for progress
"""
Goal Management Example:
from cognito_sim_engine import Goal, GoalType
# Create research goal
research_goal = Goal(
goal_id="master_transformers",
description="Understand transformer architecture deeply",
goal_type=GoalType.ACHIEVEMENT,
success_criteria=[
"understand_attention_mechanism",
"implement_basic_transformer",
"explain_to_others"
],
measurable_metrics={
"understanding_level": 0.8,
"implementation_success": 1.0,
"explanation_clarity": 0.7
}
)
# Add goal to agent
agent.add_goal(
goal=research_goal,
priority=0.9,
deadline=datetime.now() + timedelta(days=30)
)
# Check active goals
active_goals = agent.get_active_goals()
print(f"Agent has {len(active_goals)} active goals")
# Update progress as agent learns
agent.update_goal_progress(
goal_id="master_transformers",
progress=0.2,
evidence="Completed attention mechanism tutorial"
)
Personality and Traits¶
get_personality_traits¶
def get_personality_traits(self) -> Dict[str, float]:
"""
Get agent's personality traits.
Returns:
Dict[str, float]: Personality trait values
"""
update_personality_trait¶
def update_personality_trait(self, trait: str, value: float) -> None:
"""
Update a personality trait value.
Args:
trait: Name of trait to update
value: New trait value (0.0 to 1.0)
Raises:
ValueError: If trait name invalid or value out of range
"""
get_behavioral_tendencies¶
def get_behavioral_tendencies(self) -> BehavioralProfile:
"""
Get behavioral tendencies based on personality.
Returns:
BehavioralProfile: Behavioral tendency predictions
"""
Personality Example:
# Create agent with specific personality
personality = {
"openness": 0.9, # Very open to new experiences
"conscientiousness": 0.8, # Highly organized and disciplined
"extraversion": 0.4, # Somewhat introverted
"agreeableness": 0.7, # Cooperative and trusting
"neuroticism": 0.2 # Emotionally stable
}
agent = CognitiveAgent(
agent_id="research_agent",
personality_traits=personality
)
# Get behavioral predictions
behavioral_profile = agent.get_behavioral_tendencies()
print(f"Exploration tendency: {behavioral_profile.exploration_tendency:.2f}")
print(f"Collaboration preference: {behavioral_profile.collaboration_preference:.2f}")
print(f"Risk tolerance: {behavioral_profile.risk_tolerance:.2f}")
# Personality affects decision making
if agent.get_personality_traits()["openness"] > 0.7:
# High openness leads to more experimental approaches
agent.set_reasoning_strategy("creative_exploration")
LearningAgent¶
Specialized agent with advanced learning capabilities.
class LearningAgent(CognitiveAgent):
"""
Agent specialized for learning and adaptation.
Extends cognitive agent with enhanced learning algorithms,
meta-learning capabilities, and adaptive behavior.
"""
def __init__(
self,
agent_id: str,
learning_algorithm: str = "adaptive",
learning_rate: float = 0.01,
exploration_rate: float = 0.1,
meta_learning_enabled: bool = True,
**kwargs
):
"""
Initialize learning agent.
Args:
agent_id: Unique identifier
learning_algorithm: Learning algorithm to use
learning_rate: Rate of learning adaptation
exploration_rate: Rate of exploration vs exploitation
meta_learning_enabled: Enable meta-learning capabilities
"""
Advanced Learning Methods¶
adaptive_learn¶
def adaptive_learn(
self,
experiences: List[Experience],
learning_context: Optional[Dict] = None
) -> AdaptiveLearningResult:
"""
Perform adaptive learning from multiple experiences.
Args:
experiences: Experiences to learn from
learning_context: Context for learning adaptation
Returns:
AdaptiveLearningResult: Results of adaptive learning
"""
meta_learn¶
def meta_learn(
self,
learning_episodes: List[LearningEpisode],
meta_strategy: str = "gradient_based"
) -> MetaLearningResult:
"""
Learn how to learn more effectively.
Args:
learning_episodes: Previous learning episodes
meta_strategy: Meta-learning strategy
Returns:
MetaLearningResult: Meta-learning outcomes
"""
Learning Agent Example:
from cognito_sim_engine import LearningAgent, Experience
# Create learning agent
learning_agent = LearningAgent(
agent_id="student_001",
learning_algorithm="adaptive",
learning_rate=0.05,
exploration_rate=0.15,
meta_learning_enabled=True
)
# Simulate learning from multiple experiences
experiences = []
for i in range(100):
# Generate learning experience
experience = generate_learning_experience(learning_agent, environment)
experiences.append(experience)
# Immediate learning
learning_result = learning_agent.learn(experience)
# Adaptive learning every 10 experiences
if i % 10 == 0 and i > 0:
adaptive_result = learning_agent.adaptive_learn(
experiences=experiences[-10:],
learning_context={"phase": "skill_building"}
)
print(f"Adaptive learning improved efficiency by {adaptive_result.efficiency_gain:.2f}")
# Meta-learning from learning history
learning_episodes = learning_agent.get_learning_history()
meta_result = learning_agent.meta_learn(learning_episodes)
print(f"Meta-learning updated {len(meta_result.updated_strategies)} strategies")
ReflectiveAgent¶
Agent with sophisticated self-reflection and metacognitive capabilities.
class ReflectiveAgent(CognitiveAgent):
"""
Agent with self-reflection and metacognitive capabilities.
Monitors its own cognitive processes, evaluates performance,
and adapts strategies based on self-assessment.
"""
def __init__(
self,
agent_id: str,
reflection_frequency: int = 10,
metacognitive_monitoring: bool = True,
self_assessment_enabled: bool = True,
**kwargs
):
"""
Initialize reflective agent.
Args:
agent_id: Unique identifier
reflection_frequency: Steps between reflection sessions
metacognitive_monitoring: Enable metacognitive monitoring
self_assessment_enabled: Enable self-performance assessment
"""
Reflection Methods¶
reflect_on_performance¶
def reflect_on_performance(
self,
performance_window: int = 50,
reflection_depth: str = "deep"
) -> ReflectionResult:
"""
Reflect on recent performance and identify improvements.
Args:
performance_window: Number of recent steps to analyze
reflection_depth: Depth of reflection ("shallow", "medium", "deep")
Returns:
ReflectionResult: Insights and improvement recommendations
"""
self_assess¶
def self_assess(
self,
assessment_dimensions: List[str] = None,
comparison_baseline: str = "past_performance"
) -> SelfAssessment:
"""
Perform self-assessment of capabilities and performance.
Args:
assessment_dimensions: Dimensions to assess
comparison_baseline: Baseline for comparison
Returns:
SelfAssessment: Self-assessment results
"""
metacognitive_monitor¶
def metacognitive_monitor(self) -> MetacognitiveState:
"""
Monitor own cognitive processes and states.
Returns:
MetacognitiveState: Current metacognitive awareness
"""
Reflective Agent Example:
from cognito_sim_engine import ReflectiveAgent
# Create reflective agent
reflective_agent = ReflectiveAgent(
agent_id="self_aware_researcher",
reflection_frequency=25,
metacognitive_monitoring=True,
self_assessment_enabled=True
)
# Run agent with periodic reflection
for step in range(100):
# Regular cognitive cycle
perception = reflective_agent.perceive(environment)
reasoning_result = reflective_agent.reason(perception)
action = reflective_agent.act(reasoning_result)
# Metacognitive monitoring
metacog_state = reflective_agent.metacognitive_monitor()
if metacog_state.confidence_low:
print(f"Step {step}: Agent recognizes low confidence")
# Periodic reflection
if step % 25 == 0 and step > 0:
reflection_result = reflective_agent.reflect_on_performance(
performance_window=25,
reflection_depth="deep"
)
print(f"Reflection insights: {len(reflection_result.insights)}")
for insight in reflection_result.insights:
print(f" - {insight.description}")
# Apply improvements from reflection
for improvement in reflection_result.improvements:
reflective_agent.apply_improvement(improvement)
# Self-assessment
assessment = reflective_agent.self_assess(
assessment_dimensions=["reasoning_accuracy", "learning_speed", "goal_achievement"],
comparison_baseline="past_performance"
)
print("Self-Assessment Results:")
for dimension, score in assessment.dimension_scores.items():
print(f" {dimension}: {score:.2f}")
CollaborativeAgent¶
Agent designed for multi-agent collaboration and social interaction.
class CollaborativeAgent(CognitiveAgent):
"""
Agent specialized for collaboration and social interaction.
Includes theory of mind, communication protocols, and
collaborative problem-solving capabilities.
"""
def __init__(
self,
agent_id: str,
collaboration_style: str = "cooperative",
communication_enabled: bool = True,
theory_of_mind: bool = True,
trust_modeling: bool = True,
**kwargs
):
"""
Initialize collaborative agent.
Args:
agent_id: Unique identifier
collaboration_style: Style of collaboration
communication_enabled: Enable inter-agent communication
theory_of_mind: Enable theory of mind modeling
trust_modeling: Enable trust relationship modeling
"""
Collaboration Methods¶
communicate¶
def communicate(
self,
target_agent: str,
message: Message,
communication_channel: str = "direct"
) -> CommunicationResult:
"""
Send communication to another agent.
Args:
target_agent: ID of target agent
message: Message to send
communication_channel: Channel for communication
Returns:
CommunicationResult: Result of communication attempt
"""
collaborate_on_task¶
def collaborate_on_task(
self,
task: CollaborativeTask,
partner_agents: List[str],
coordination_strategy: str = "adaptive"
) -> CollaborationResult:
"""
Collaborate with other agents on a task.
Args:
task: Task to collaborate on
partner_agents: IDs of collaboration partners
coordination_strategy: Strategy for coordination
Returns:
CollaborationResult: Results of collaborative effort
"""
model_other_agent¶
def model_other_agent(
self,
target_agent_id: str,
observation_history: List[Observation]
) -> AgentModel:
"""
Create mental model of another agent.
Args:
target_agent_id: ID of agent to model
observation_history: Observations of the agent
Returns:
AgentModel: Mental model of the other agent
"""
Collaborative Agent Example:
from cognito_sim_engine import CollaborativeAgent, Message, CollaborativeTask
# Create collaborative agents
agent_a = CollaborativeAgent(
agent_id="collaborator_a",
collaboration_style="cooperative",
theory_of_mind=True,
trust_modeling=True
)
agent_b = CollaborativeAgent(
agent_id="collaborator_b",
collaboration_style="competitive",
theory_of_mind=True,
trust_modeling=True
)
# Set up collaborative environment
collaborative_env = Environment("research_collaboration", environment_type="collaborative")
collaborative_env.add_agent(agent_a)
collaborative_env.add_agent(agent_b)
# Communication between agents
message = Message(
sender="collaborator_a",
content="I propose we divide the research task by expertise areas",
message_type="proposal",
urgency=0.6
)
comm_result = agent_a.communicate(
target_agent="collaborator_b",
message=message,
communication_channel="direct"
)
print(f"Communication successful: {comm_result.successful}")
# Collaborative task execution
research_task = CollaborativeTask(
task_id="joint_research_project",
description="Develop new machine learning algorithm",
required_skills=["theoretical_analysis", "implementation", "evaluation"],
complexity=0.8,
deadline=datetime.now() + timedelta(days=14)
)
collaboration_result = agent_a.collaborate_on_task(
task=research_task,
partner_agents=["collaborator_b"],
coordination_strategy="skill_complementary"
)
print(f"Collaboration efficiency: {collaboration_result.efficiency:.2f}")
print(f"Task completion: {collaboration_result.completion_percentage:.1f}%")
# Theory of mind modeling
agent_model = agent_a.model_other_agent(
target_agent_id="collaborator_b",
observation_history=agent_a.get_observation_history("collaborator_b")
)
print(f"Predicted agent_b personality: {agent_model.predicted_personality}")
print(f"Predicted agent_b goals: {agent_model.predicted_goals}")
Agent Configuration¶
AgentConfig¶
@dataclass
class AgentConfig:
# Basic agent settings
agent_type: AgentType = AgentType.COGNITIVE
personality_traits: Optional[Dict[str, float]] = None
# Cognitive settings
working_memory_capacity: int = 7
reasoning_depth: int = 5
confidence_threshold: float = 0.6
# Learning settings
learning_enabled: bool = True
learning_rate: float = 0.01
exploration_rate: float = 0.1
adaptation_enabled: bool = True
# Memory settings
episodic_memory_capacity: int = 10000
semantic_memory_enabled: bool = True
memory_consolidation: bool = True
# Social settings
communication_enabled: bool = False
collaboration_enabled: bool = False
theory_of_mind: bool = False
# Performance settings
action_selection_strategy: str = "rational"
decision_making_style: str = "deliberative"
multitasking_enabled: bool = False
# Advanced features
metacognition_enabled: bool = False
reflection_enabled: bool = False
emotional_modeling: bool = False
Personality Modeling¶
# Big Five personality model
DEFAULT_PERSONALITY = {
"openness": 0.5, # Openness to experience
"conscientiousness": 0.5, # Conscientiousness and organization
"extraversion": 0.5, # Extraversion and social energy
"agreeableness": 0.5, # Agreeableness and cooperation
"neuroticism": 0.5 # Neuroticism and emotional stability
}
# Personality affects behavior
def personality_influenced_decision(agent, options):
"""Make decisions influenced by personality traits."""
personality = agent.get_personality_traits()
# High openness: prefer novel options
if personality["openness"] > 0.7:
novel_options = [opt for opt in options if opt.novelty > 0.6]
if novel_options:
options = novel_options
# High conscientiousness: prefer systematic approaches
if personality["conscientiousness"] > 0.7:
systematic_options = [opt for opt in options if opt.systematic]
if systematic_options:
options = systematic_options
# High extraversion: prefer collaborative options
if personality["extraversion"] > 0.7:
social_options = [opt for opt in options if opt.involves_others]
if social_options:
options = social_options
return agent.select_best_option(options)
Agent Specializations¶
ResearchAgent¶
class ResearchAgent(CognitiveAgent):
"""Agent specialized for research activities."""
def __init__(self, agent_id: str, research_domain: str, **kwargs):
super().__init__(agent_id, **kwargs)
self.research_domain = research_domain
self.research_methodology = ResearchMethodology()
self.publication_tracker = PublicationTracker()
def conduct_research(self, research_question: str) -> ResearchResult:
"""Conduct research on a specific question."""
# Literature review
literature = self.search_literature(research_question)
# Hypothesis generation
hypotheses = self.generate_hypotheses(research_question, literature)
# Experimental design
experiments = self.design_experiments(hypotheses)
# Execute research
results = self.execute_research(experiments)
return ResearchResult(
question=research_question,
methodology=self.research_methodology,
results=results,
conclusions=self.draw_conclusions(results)
)
TeachingAgent¶
class TeachingAgent(CognitiveAgent):
"""Agent specialized for teaching and education."""
def __init__(self, agent_id: str, subject_expertise: List[str], **kwargs):
super().__init__(agent_id, **kwargs)
self.subject_expertise = subject_expertise
self.pedagogical_knowledge = PedagogicalKnowledge()
self.student_models = {}
def teach_concept(
self,
student_agent: str,
concept: str,
teaching_strategy: str = "adaptive"
) -> TeachingResult:
"""Teach a concept to a student agent."""
# Assess student's current knowledge
student_model = self.assess_student_knowledge(student_agent, concept)
# Select appropriate teaching strategy
strategy = self.select_teaching_strategy(student_model, concept)
# Create instructional content
instruction = self.create_instruction(concept, strategy, student_model)
# Deliver instruction
delivery_result = self.deliver_instruction(student_agent, instruction)
# Assess learning outcome
learning_assessment = self.assess_learning_outcome(student_agent, concept)
return TeachingResult(
concept=concept,
strategy=strategy,
student_progress=learning_assessment,
teaching_effectiveness=delivery_result.effectiveness
)
Agent Lifecycle Management¶
Agent Creation and Configuration¶
def create_specialized_agent(agent_type: str, **kwargs) -> CognitiveAgent:
"""Factory function for creating specialized agents."""
agent_configs = {
"researcher": {
"personality_traits": {"openness": 0.9, "conscientiousness": 0.8},
"reasoning_depth": 8,
"learning_rate": 0.02
},
"teacher": {
"personality_traits": {"agreeableness": 0.8, "conscientiousness": 0.9},
"communication_enabled": True,
"theory_of_mind": True
},
"student": {
"learning_rate": 0.05,
"exploration_rate": 0.2,
"metacognition_enabled": True
}
}
config = agent_configs.get(agent_type, {})
config.update(kwargs)
if agent_type == "researcher":
return ResearchAgent(**config)
elif agent_type == "teacher":
return TeachingAgent(**config)
elif agent_type == "student":
return LearningAgent(**config)
else:
return CognitiveAgent(**config)
# Create agents for classroom simulation
teacher = create_specialized_agent(
"teacher",
agent_id="professor_smith",
subject_expertise=["machine_learning", "statistics"]
)
students = [
create_specialized_agent(
"student",
agent_id=f"student_{i:03d}",
learning_rate=random.uniform(0.01, 0.08)
)
for i in range(20)
]
Agent Monitoring and Analytics¶
class AgentMonitor:
"""Monitor and analyze agent performance."""
def __init__(self):
self.performance_history = {}
self.behavioral_patterns = {}
def track_agent_performance(self, agent: CognitiveAgent) -> PerformanceMetrics:
"""Track comprehensive agent performance metrics."""
metrics = PerformanceMetrics(
agent_id=agent.agent_id,
cognitive_efficiency=self.measure_cognitive_efficiency(agent),
learning_progress=self.measure_learning_progress(agent),
goal_achievement_rate=self.measure_goal_achievement(agent),
social_effectiveness=self.measure_social_effectiveness(agent),
adaptation_capability=self.measure_adaptation(agent)
)
self.performance_history[agent.agent_id] = metrics
return metrics
def analyze_behavioral_patterns(self, agent: CognitiveAgent) -> BehavioralAnalysis:
"""Analyze agent's behavioral patterns over time."""
action_history = agent.get_action_history()
decision_patterns = self.extract_decision_patterns(action_history)
learning_curves = self.compute_learning_curves(agent)
return BehavioralAnalysis(
agent_id=agent.agent_id,
decision_patterns=decision_patterns,
learning_curves=learning_curves,
behavioral_consistency=self.measure_consistency(action_history),
adaptation_events=self.identify_adaptation_events(agent)
)
# Usage example
monitor = AgentMonitor()
# Track agents during simulation
for step in range(1000):
for agent in agents:
# Agent performs cognitive cycle
agent.step()
# Monitor performance every 100 steps
if step % 100 == 0:
performance = monitor.track_agent_performance(agent)
print(f"Agent {agent.agent_id} efficiency: {performance.cognitive_efficiency:.2f}")
# Analyze behavioral patterns
for agent in agents:
behavioral_analysis = monitor.analyze_behavioral_patterns(agent)
print(f"Agent {agent.agent_id} consistency: {behavioral_analysis.behavioral_consistency:.2f}")
The Agents API provides sophisticated agent architectures that enable realistic cognitive behavior, learning, and social interaction. Use these components to create agents with human-like intelligence and behavior patterns.
Related APIs:
- Memory API - Agent memory systems
- Reasoning API - Agent reasoning capabilities
- Environment API - Agent-environment interaction