Configuration¶
Complete configuration guide for Q-Memetic AI system customization, performance tuning, and advanced features.
Configuration Overview¶
Q-Memetic AI can be configured through multiple methods:
- Engine Configuration - Direct Python API configuration
- Environment Variables - System-wide settings
- Configuration Files - JSON/YAML configuration files
- CLI Parameters - Command-line overrides
Engine Configuration¶
Basic Configuration¶
from qmemetic_ai import MemeticEngine
# Minimal configuration
engine = MemeticEngine(
license_key="your-license-key",
data_dir="./my_data"
)
# Basic configuration with options
engine = MemeticEngine(
license_key="your-license-key",
data_dir="./my_data",
federated_mode=False,
config={
"max_memory": 8000, # MB
"log_level": "INFO",
"parallel_workers": 4
}
)
Advanced Configuration¶
# Comprehensive configuration
advanced_config = {
# Performance Settings
"performance": {
"max_memory": 16000, # Maximum memory usage (MB)
"parallel_workers": 8, # Number of parallel workers
"batch_size": 1000, # Batch processing size
"cache_size": 5000, # Memory cache size
"gpu_acceleration": True, # Enable GPU acceleration
"async_operations": True # Enable async operations
},
# Evolution Settings
"evolution": {
"default_population_size": 50,
"default_generations": 10,
"mutation_rate": 0.25,
"crossover_rate": 0.75,
"selection_pressure": 0.6,
"elitism_ratio": 0.1,
"diversity_weight": 0.3
},
# Network Settings
"network": {
"entanglement_threshold": 0.3,
"max_connections_per_meme": 20,
"network_update_interval": 60, # seconds
"clustering_algorithm": "louvain",
"similarity_metric": "cosine"
},
# LLM Settings
"llm": {
"provider": "openai",
"model_name": "gpt-4",
"temperature": 0.7,
"max_tokens": 1000,
"api_key": "your-api-key",
"timeout": 30,
"retry_attempts": 3
},
# Visualization Settings
"visualization": {
"default_layout": "force_directed",
"node_size_range": [10, 100],
"edge_width_range": [1, 10],
"color_scheme": "viridis",
"interactive": True,
"animation_speed": 1.0
},
# Federation Settings
"federation": {
"node_id": "my_research_node",
"discovery_port": 8080,
"sync_interval": 300, # seconds
"max_peers": 10,
"trust_threshold": 0.7,
"encryption_enabled": True
},
# Storage Settings
"storage": {
"compression": True,
"backup_interval": 3600, # seconds
"max_backup_files": 10,
"auto_save": True,
"save_interval": 60 # seconds
},
# Security Settings
"security": {
"encryption_key": "your-encryption-key",
"require_authentication": False,
"audit_logging": True,
"session_timeout": 7200 # seconds
}
}
engine = MemeticEngine(
license_key="your-license-key",
config=advanced_config
)
Environment Variables¶
Core Settings¶
# License and authentication
export QMEMETIC_LICENSE_KEY="your-license-key"
export QMEMETIC_ENCRYPTION_KEY="your-encryption-key"
# Data and storage
export QMEMETIC_DATA_DIR="/path/to/data"
export QMEMETIC_BACKUP_DIR="/path/to/backups"
export QMEMETIC_LOG_DIR="/path/to/logs"
# Performance
export QMEMETIC_MAX_MEMORY="16000"
export QMEMETIC_PARALLEL_WORKERS="8"
export QMEMETIC_BATCH_SIZE="1000"
# Logging
export QMEMETIC_LOG_LEVEL="INFO" # DEBUG, INFO, WARNING, ERROR
export QMEMETIC_LOG_FORMAT="json" # json, text
# Federation
export QMEMETIC_FEDERATION_PORT="8080"
export QMEMETIC_NODE_ID="my_node"
export QMEMETIC_FEDERATION_ENABLED="true"
LLM Integration¶
# OpenAI
export OPENAI_API_KEY="your-openai-key"
export QMEMETIC_LLM_PROVIDER="openai"
export QMEMETIC_LLM_MODEL="gpt-4"
export QMEMETIC_LLM_TEMPERATURE="0.7"
# Alternative providers
export ANTHROPIC_API_KEY="your-anthropic-key"
export QMEMETIC_LLM_PROVIDER="anthropic"
export QMEMETIC_LLM_MODEL="claude-3"
# Local models
export QMEMETIC_LLM_PROVIDER="local"
export QMEMETIC_LLM_ENDPOINT="http://localhost:8000"
export QMEMETIC_LLM_MODEL="llama-7b"
Configuration Files¶
JSON Configuration¶
Create qmemetic_config.json
:
{
"license_key": "${QMEMETIC_LICENSE_KEY}",
"data_dir": "./qmemetic_data",
"federated_mode": false,
"performance": {
"max_memory": 8000,
"parallel_workers": 4,
"batch_size": 500,
"gpu_acceleration": false
},
"evolution": {
"default_population_size": 30,
"default_generations": 8,
"mutation_rate": 0.3,
"crossover_rate": 0.7,
"fitness_function": "default"
},
"network": {
"entanglement_threshold": 0.4,
"max_connections_per_meme": 15,
"similarity_metric": "cosine"
},
"llm": {
"provider": "openai",
"model_name": "gpt-3.5-turbo",
"temperature": 0.7,
"max_tokens": 500
},
"visualization": {
"default_layout": "force_directed",
"color_scheme": "plasma",
"interactive": true
}
}
Load configuration:
import json
from qmemetic_ai import MemeticEngine
# Load from JSON file
with open('qmemetic_config.json', 'r') as f:
config = json.load(f)
engine = MemeticEngine(config=config)
YAML Configuration¶
Create qmemetic_config.yaml
:
license_key: "${QMEMETIC_LICENSE_KEY}"
data_dir: "./qmemetic_data"
federated_mode: false
performance:
max_memory: 8000
parallel_workers: 4
batch_size: 500
gpu_acceleration: false
evolution:
default_population_size: 30
default_generations: 8
mutation_rate: 0.3
crossover_rate: 0.7
fitness_function: "default"
network:
entanglement_threshold: 0.4
max_connections_per_meme: 15
similarity_metric: "cosine"
llm:
provider: "openai"
model_name: "gpt-3.5-turbo"
temperature: 0.7
max_tokens: 500
visualization:
default_layout: "force_directed"
color_scheme: "plasma"
interactive: true
Load YAML configuration:
import yaml
from qmemetic_ai import MemeticEngine
# Load from YAML file
with open('qmemetic_config.yaml', 'r') as f:
config = yaml.safe_load(f)
engine = MemeticEngine(config=config)
Performance Configuration¶
Memory Optimization¶
# Configure for large datasets
memory_config = {
"performance": {
"max_memory": 32000, # Use more RAM
"memory_growth": True, # Allow dynamic growth
"garbage_collection": True, # Enable aggressive GC
"memory_mapping": True, # Use memory mapping for large files
"compression": True, # Compress data in memory
"cache_strategy": "lru", # Least Recently Used cache
"swap_threshold": 0.8 # Swap to disk at 80% memory
}
}
CPU Optimization¶
# Configure for multi-core processing
cpu_config = {
"performance": {
"parallel_workers": 16, # Match CPU cores
"thread_pool_size": 32, # Thread pool size
"process_pool_size": 8, # Process pool size
"async_batch_size": 1000, # Async operation batch size
"cpu_affinity": [0, 1, 2, 3], # Bind to specific CPU cores
"numa_aware": True # NUMA-aware scheduling
}
}
GPU Acceleration¶
# Configure for GPU acceleration
gpu_config = {
"performance": {
"gpu_acceleration": True,
"gpu_devices": [0, 1], # Use specific GPUs
"gpu_memory_limit": 8000, # GPU memory limit (MB)
"mixed_precision": True, # Use mixed precision
"cuda_optimization": True, # CUDA optimizations
"tensor_cores": True # Use Tensor Cores if available
}
}
Evolution Configuration¶
Genetic Algorithm Parameters¶
evolution_config = {
"evolution": {
# Population parameters
"population_size": 100,
"generations": 20,
"population_dynamics": "steady_state", # or "generational"
# Selection parameters
"selection_method": "tournament", # tournament, roulette, rank
"tournament_size": 3,
"selection_pressure": 0.7,
"elitism_ratio": 0.05,
# Variation parameters
"mutation_rate": 0.2,
"mutation_strength": 0.5,
"crossover_rate": 0.8,
"crossover_type": "uniform", # uniform, single_point, two_point
# Diversity parameters
"diversity_weight": 0.3,
"diversity_metric": "hamming", # hamming, euclidean, cosine
"niching": True,
"speciation_threshold": 0.6,
# Fitness parameters
"fitness_function": "multi_objective", # single, multi_objective, custom
"fitness_weights": {
"semantic_coherence": 0.4,
"novelty": 0.3,
"domain_relevance": 0.2,
"user_preference": 0.1
}
}
}
Custom Fitness Functions¶
def research_fitness(meme, context):
"""Custom fitness function for research content."""
score = 0.0
# Scientific rigor indicators
scientific_terms = ["hypothesis", "experiment", "data", "analysis"]
for term in scientific_terms:
if term in meme.content.lower():
score += 0.1
# Citation and reference indicators
if "et al" in meme.content or "doi:" in meme.content:
score += 0.2
# Domain relevance
if meme.metadata.domain in ["science", "research"]:
score += 0.3
# Novelty (simplified)
score += context.novelty_calculator.calculate(meme)
return min(1.0, score)
# Register custom fitness function
fitness_config = {
"evolution": {
"fitness_function": "custom",
"custom_fitness_functions": {
"research": research_fitness
}
}
}
Network Configuration¶
Entanglement Parameters¶
network_config = {
"network": {
# Entanglement calculation
"entanglement_threshold": 0.3,
"similarity_metric": "cosine", # cosine, euclidean, manhattan
"entanglement_decay": 0.95, # Decay factor over time
"max_entanglement_distance": 3, # Maximum hops
# Network topology
"max_connections_per_meme": 20,
"network_density_target": 0.1,
"small_world_factor": 0.3,
"scale_free_exponent": 2.1,
# Dynamic updates
"update_frequency": 60, # seconds
"incremental_updates": True,
"batch_update_size": 1000,
# Community detection
"community_algorithm": "louvain", # louvain, leiden, modularity
"community_resolution": 1.0,
"min_community_size": 5
}
}
Quantum Walk Parameters¶
quantum_config = {
"quantum": {
# Walk parameters
"default_walk_length": 50,
"teleport_probability": 0.15,
"quantum_coherence": 0.8,
"decoherence_rate": 0.05,
# Superposition
"superposition_states": 4,
"entanglement_strength": 0.7,
"measurement_basis": "computational", # computational, hadamard
# Interference
"interference_enabled": True,
"phase_randomization": 0.1,
"amplitude_damping": 0.02
}
}
Federation Configuration¶
Node Configuration¶
federation_config = {
"federation": {
# Node identity
"node_id": "research_lab_alpha",
"node_type": "research", # research, production, edge
"capabilities": ["evolution", "analysis", "visualization"],
# Network settings
"discovery_port": 8080,
"data_port": 8081,
"control_port": 8082,
"bind_address": "0.0.0.0",
# Discovery and peering
"discovery_method": "multicast", # multicast, registry, manual
"registry_url": "https://registry.qmemetic.ai",
"bootstrap_nodes": [
"node1.qmemetic.ai:8080",
"node2.qmemetic.ai:8080"
],
# Synchronization
"sync_interval": 300, # seconds
"sync_strategy": "pull_push", # pull, push, pull_push
"max_sync_batch": 1000,
"sync_timeout": 60,
# Peer management
"max_peers": 10,
"min_peers": 2,
"peer_timeout": 300,
"heartbeat_interval": 30,
# Trust and security
"trust_threshold": 0.7,
"require_signatures": True,
"encryption_enabled": True,
"byzantine_tolerance": 0.33
}
}
Security Configuration¶
security_config = {
"security": {
# Encryption
"encryption_algorithm": "AES-256-GCM",
"key_derivation": "PBKDF2",
"key_iterations": 100000,
# Authentication
"auth_method": "jwt", # jwt, oauth, apikey
"jwt_secret": "your-jwt-secret",
"token_expiry": 3600, # seconds
# Access control
"rbac_enabled": True,
"default_permissions": ["read"],
"admin_users": ["admin@example.com"],
# Audit logging
"audit_enabled": True,
"audit_events": ["create", "evolve", "sync"],
"audit_retention": 90, # days
# Rate limiting
"rate_limit_enabled": True,
"requests_per_minute": 1000,
"burst_limit": 100
}
}
Logging Configuration¶
Basic Logging¶
logging_config = {
"logging": {
"level": "INFO", # DEBUG, INFO, WARNING, ERROR
"format": "json", # json, text, structured
"output": ["console", "file"], # console, file, syslog
# File logging
"log_file": "qmemetic.log",
"max_file_size": "100MB",
"backup_count": 5,
"rotation": "daily", # daily, weekly, size
# Structured logging
"fields": {
"timestamp": "iso8601",
"level": "string",
"logger": "string",
"message": "string",
"context": "object"
}
}
}
Advanced Logging¶
advanced_logging = {
"logging": {
"level": "DEBUG",
"format": "structured",
"output": ["console", "file", "elasticsearch"],
# Performance logging
"performance_logging": True,
"slow_query_threshold": 1.0, # seconds
"memory_tracking": True,
# Component-specific levels
"loggers": {
"qmemetic_ai.evolution": "DEBUG",
"qmemetic_ai.network": "INFO",
"qmemetic_ai.federation": "WARNING"
},
# External integrations
"elasticsearch": {
"hosts": ["localhost:9200"],
"index": "qmemetic-logs"
},
"sentry": {
"dsn": "your-sentry-dsn",
"environment": "production"
}
}
}
Development Configuration¶
Development Mode¶
dev_config = {
"development": {
"debug_mode": True,
"hot_reload": True,
"auto_save": False,
"mock_llm": True, # Use mock LLM for testing
"seed_random": 42, # Reproducible results
"profile_performance": True,
"memory_debugging": True
}
}
Testing Configuration¶
test_config = {
"testing": {
"test_mode": True,
"in_memory_storage": True,
"mock_external_apis": True,
"fast_evolution": True, # Reduced generations
"deterministic": True, # Fixed seeds
"cleanup_after_test": True
}
}
Configuration Validation¶
from qmemetic_ai.config import ConfigValidator
# Validate configuration
validator = ConfigValidator()
validation_result = validator.validate(config)
if not validation_result.is_valid:
print("Configuration errors:")
for error in validation_result.errors:
print(f" - {error}")
else:
print("Configuration is valid")
# Get configuration schema
schema = validator.get_schema()
print("Available configuration options:")
for section, options in schema.items():
print(f"{section}:")
for option, details in options.items():
print(f" {option}: {details['type']} - {details['description']}")
Configuration Best Practices¶
Production Configuration¶
production_config = {
# Security first
"security": {
"encryption_enabled": True,
"audit_logging": True,
"rate_limit_enabled": True
},
# Performance optimization
"performance": {
"max_memory": 16000,
"parallel_workers": 8,
"batch_size": 1000,
"cache_size": 5000
},
# Reliability
"storage": {
"backup_interval": 3600,
"auto_save": True,
"compression": True
},
# Monitoring
"logging": {
"level": "INFO",
"audit_enabled": True,
"performance_logging": True
}
}
Development Configuration¶
development_config = {
# Debug settings
"logging": {
"level": "DEBUG",
"format": "text",
"output": ["console"]
},
# Fast iteration
"evolution": {
"default_population_size": 10,
"default_generations": 3
},
# Testing aids
"development": {
"debug_mode": True,
"mock_llm": True,
"seed_random": 42
}
}
Configuration Templates¶
Save common configurations as templates:
# Save current configuration as template
engine.save_config_template("research_template.json")
# Load template and customize
config = engine.load_config_template("research_template.json")
config["evolution"]["population_size"] = 100
engine.update_config(config)
Next Steps¶
- Performance Guide - Optimize for your workload
- Security Guide - Secure your installation
- API Reference - Complete API documentation
- Troubleshooting - Common issues and solutions