Merge branch 'internal'
All checks were successful
Build docker and publish / build (20.15.1) (push) Successful in 7m24s

# Conflicts:
#	internal/logic/public/order/getDiscount.go
#	queue/logic/order/activateOrderLogic.go
This commit is contained in:
shanshanzhong 2026-03-31 00:48:05 -07:00
commit 6724e341b8
171 changed files with 27555 additions and 777 deletions

View File

@ -0,0 +1,550 @@
---
name: "AgentDB Advanced Features"
description: "Master advanced AgentDB features including QUIC synchronization, multi-database management, custom distance metrics, hybrid search, and distributed systems integration. Use when building distributed AI systems, multi-agent coordination, or advanced vector search applications."
---
# AgentDB Advanced Features
## What This Skill Does
Covers advanced AgentDB capabilities for distributed systems, multi-database coordination, custom distance metrics, hybrid search (vector + metadata), QUIC synchronization, and production deployment patterns. Enables building sophisticated AI systems with sub-millisecond cross-node communication and advanced search capabilities.
**Performance**: <1ms QUIC sync, hybrid search with filters, custom distance metrics.
## Prerequisites
- Node.js 18+
- AgentDB v1.0.7+ (via agentic-flow)
- Understanding of distributed systems (for QUIC sync)
- Vector search fundamentals
---
## QUIC Synchronization
### What is QUIC Sync?
QUIC (Quick UDP Internet Connections) enables sub-millisecond latency synchronization between AgentDB instances across network boundaries with automatic retry, multiplexing, and encryption.
**Benefits**:
- <1ms latency between nodes
- Multiplexed streams (multiple operations simultaneously)
- Built-in encryption (TLS 1.3)
- Automatic retry and recovery
- Event-based broadcasting
### Enable QUIC Sync
```typescript
import { createAgentDBAdapter } from 'agentic-flow/reasoningbank';
// Initialize with QUIC synchronization
const adapter = await createAgentDBAdapter({
dbPath: '.agentdb/distributed.db',
enableQUICSync: true,
syncPort: 4433,
syncPeers: [
'192.168.1.10:4433',
'192.168.1.11:4433',
'192.168.1.12:4433',
],
});
// Patterns automatically sync across all peers
await adapter.insertPattern({
// ... pattern data
});
// Available on all peers within ~1ms
```
### QUIC Configuration
```typescript
const adapter = await createAgentDBAdapter({
enableQUICSync: true,
syncPort: 4433, // QUIC server port
syncPeers: ['host1:4433'], // Peer addresses
syncInterval: 1000, // Sync interval (ms)
syncBatchSize: 100, // Patterns per batch
maxRetries: 3, // Retry failed syncs
compression: true, // Enable compression
});
```
### Multi-Node Deployment
```bash
# Node 1 (192.168.1.10)
AGENTDB_QUIC_SYNC=true \
AGENTDB_QUIC_PORT=4433 \
AGENTDB_QUIC_PEERS=192.168.1.11:4433,192.168.1.12:4433 \
node server.js
# Node 2 (192.168.1.11)
AGENTDB_QUIC_SYNC=true \
AGENTDB_QUIC_PORT=4433 \
AGENTDB_QUIC_PEERS=192.168.1.10:4433,192.168.1.12:4433 \
node server.js
# Node 3 (192.168.1.12)
AGENTDB_QUIC_SYNC=true \
AGENTDB_QUIC_PORT=4433 \
AGENTDB_QUIC_PEERS=192.168.1.10:4433,192.168.1.11:4433 \
node server.js
```
---
## Distance Metrics
### Cosine Similarity (Default)
Best for normalized vectors, semantic similarity:
```bash
# CLI
npx agentdb@latest query ./vectors.db "[0.1,0.2,...]" -m cosine
# API
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
metric: 'cosine',
k: 10,
});
```
**Use Cases**:
- Text embeddings (BERT, GPT, etc.)
- Semantic search
- Document similarity
- Most general-purpose applications
**Formula**: `cos(θ) = (A · B) / (||A|| × ||B||)`
**Range**: [-1, 1] (1 = identical, -1 = opposite)
### Euclidean Distance (L2)
Best for spatial data, geometric similarity:
```bash
# CLI
npx agentdb@latest query ./vectors.db "[0.1,0.2,...]" -m euclidean
# API
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
metric: 'euclidean',
k: 10,
});
```
**Use Cases**:
- Image embeddings
- Spatial data
- Computer vision
- When vector magnitude matters
**Formula**: `d = √(Σ(ai - bi)²)`
**Range**: [0, ∞] (0 = identical, ∞ = very different)
### Dot Product
Best for pre-normalized vectors, fast computation:
```bash
# CLI
npx agentdb@latest query ./vectors.db "[0.1,0.2,...]" -m dot
# API
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
metric: 'dot',
k: 10,
});
```
**Use Cases**:
- Pre-normalized embeddings
- Fast similarity computation
- When vectors are already unit-length
**Formula**: `dot = Σ(ai × bi)`
**Range**: [-∞, ∞] (higher = more similar)
### Custom Distance Metrics
```typescript
// Implement custom distance function
function customDistance(vec1: number[], vec2: number[]): number {
// Weighted Euclidean distance
const weights = [1.0, 2.0, 1.5, ...];
let sum = 0;
for (let i = 0; i < vec1.length; i++) {
sum += weights[i] * Math.pow(vec1[i] - vec2[i], 2);
}
return Math.sqrt(sum);
}
// Use in search (requires custom implementation)
```
---
## Hybrid Search (Vector + Metadata)
### Basic Hybrid Search
Combine vector similarity with metadata filtering:
```typescript
// Store documents with metadata
await adapter.insertPattern({
id: '',
type: 'document',
domain: 'research-papers',
pattern_data: JSON.stringify({
embedding: documentEmbedding,
text: documentText,
metadata: {
author: 'Jane Smith',
year: 2025,
category: 'machine-learning',
citations: 150,
}
}),
confidence: 1.0,
usage_count: 0,
success_count: 0,
created_at: Date.now(),
last_used: Date.now(),
});
// Hybrid search: vector similarity + metadata filters
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'research-papers',
k: 20,
filters: {
year: { $gte: 2023 }, // Published 2023 or later
category: 'machine-learning', // ML papers only
citations: { $gte: 50 }, // Highly cited
},
});
```
### Advanced Filtering
```typescript
// Complex metadata queries
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'products',
k: 50,
filters: {
price: { $gte: 10, $lte: 100 }, // Price range
category: { $in: ['electronics', 'gadgets'] }, // Multiple categories
rating: { $gte: 4.0 }, // High rated
inStock: true, // Available
tags: { $contains: 'wireless' }, // Has tag
},
});
```
### Weighted Hybrid Search
Combine vector and metadata scores:
```typescript
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'content',
k: 20,
hybridWeights: {
vectorSimilarity: 0.7, // 70% weight on semantic similarity
metadataScore: 0.3, // 30% weight on metadata match
},
filters: {
category: 'technology',
recency: { $gte: Date.now() - 30 * 24 * 3600000 }, // Last 30 days
},
});
```
---
## Multi-Database Management
### Multiple Databases
```typescript
// Separate databases for different domains
const knowledgeDB = await createAgentDBAdapter({
dbPath: '.agentdb/knowledge.db',
});
const conversationDB = await createAgentDBAdapter({
dbPath: '.agentdb/conversations.db',
});
const codeDB = await createAgentDBAdapter({
dbPath: '.agentdb/code.db',
});
// Use appropriate database for each task
await knowledgeDB.insertPattern({ /* knowledge */ });
await conversationDB.insertPattern({ /* conversation */ });
await codeDB.insertPattern({ /* code */ });
```
### Database Sharding
```typescript
// Shard by domain for horizontal scaling
const shards = {
'domain-a': await createAgentDBAdapter({ dbPath: '.agentdb/shard-a.db' }),
'domain-b': await createAgentDBAdapter({ dbPath: '.agentdb/shard-b.db' }),
'domain-c': await createAgentDBAdapter({ dbPath: '.agentdb/shard-c.db' }),
};
// Route queries to appropriate shard
function getDBForDomain(domain: string) {
const shardKey = domain.split('-')[0]; // Extract shard key
return shards[shardKey] || shards['domain-a'];
}
// Insert to correct shard
const db = getDBForDomain('domain-a-task');
await db.insertPattern({ /* ... */ });
```
---
## MMR (Maximal Marginal Relevance)
Retrieve diverse results to avoid redundancy:
```typescript
// Without MMR: Similar results may be redundant
const standardResults = await adapter.retrieveWithReasoning(queryEmbedding, {
k: 10,
useMMR: false,
});
// With MMR: Diverse, non-redundant results
const diverseResults = await adapter.retrieveWithReasoning(queryEmbedding, {
k: 10,
useMMR: true,
mmrLambda: 0.5, // Balance relevance (0) vs diversity (1)
});
```
**MMR Parameters**:
- `mmrLambda = 0`: Maximum relevance (may be redundant)
- `mmrLambda = 0.5`: Balanced (default)
- `mmrLambda = 1`: Maximum diversity (may be less relevant)
**Use Cases**:
- Search result diversification
- Recommendation systems
- Avoiding echo chambers
- Exploratory search
---
## Context Synthesis
Generate rich context from multiple memories:
```typescript
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'problem-solving',
k: 10,
synthesizeContext: true, // Enable context synthesis
});
// ContextSynthesizer creates coherent narrative
console.log('Synthesized Context:', result.context);
// "Based on 10 similar problem-solving attempts, the most effective
// approach involves: 1) analyzing root cause, 2) brainstorming solutions,
// 3) evaluating trade-offs, 4) implementing incrementally. Success rate: 85%"
console.log('Patterns:', result.patterns);
// Extracted common patterns across memories
```
---
## Production Patterns
### Connection Pooling
```typescript
// Singleton pattern for shared adapter
class AgentDBPool {
private static instance: AgentDBAdapter;
static async getInstance() {
if (!this.instance) {
this.instance = await createAgentDBAdapter({
dbPath: '.agentdb/production.db',
quantizationType: 'scalar',
cacheSize: 2000,
});
}
return this.instance;
}
}
// Use in application
const db = await AgentDBPool.getInstance();
const results = await db.retrieveWithReasoning(queryEmbedding, { k: 10 });
```
### Error Handling
```typescript
async function safeRetrieve(queryEmbedding: number[], options: any) {
try {
const result = await adapter.retrieveWithReasoning(queryEmbedding, options);
return result;
} catch (error) {
if (error.code === 'DIMENSION_MISMATCH') {
console.error('Query embedding dimension mismatch');
// Handle dimension error
} else if (error.code === 'DATABASE_LOCKED') {
// Retry with exponential backoff
await new Promise(resolve => setTimeout(resolve, 100));
return safeRetrieve(queryEmbedding, options);
}
throw error;
}
}
```
### Monitoring and Logging
```typescript
// Performance monitoring
const startTime = Date.now();
const result = await adapter.retrieveWithReasoning(queryEmbedding, { k: 10 });
const latency = Date.now() - startTime;
if (latency > 100) {
console.warn('Slow query detected:', latency, 'ms');
}
// Log statistics
const stats = await adapter.getStats();
console.log('Database Stats:', {
totalPatterns: stats.totalPatterns,
dbSize: stats.dbSize,
cacheHitRate: stats.cacheHitRate,
avgSearchLatency: stats.avgSearchLatency,
});
```
---
## CLI Advanced Operations
### Database Import/Export
```bash
# Export with compression
npx agentdb@latest export ./vectors.db ./backup.json.gz --compress
# Import from backup
npx agentdb@latest import ./backup.json.gz --decompress
# Merge databases
npx agentdb@latest merge ./db1.sqlite ./db2.sqlite ./merged.sqlite
```
### Database Optimization
```bash
# Vacuum database (reclaim space)
sqlite3 .agentdb/vectors.db "VACUUM;"
# Analyze for query optimization
sqlite3 .agentdb/vectors.db "ANALYZE;"
# Rebuild indices
npx agentdb@latest reindex ./vectors.db
```
---
## Environment Variables
```bash
# AgentDB configuration
AGENTDB_PATH=.agentdb/reasoningbank.db
AGENTDB_ENABLED=true
# Performance tuning
AGENTDB_QUANTIZATION=binary # binary|scalar|product|none
AGENTDB_CACHE_SIZE=2000
AGENTDB_HNSW_M=16
AGENTDB_HNSW_EF=100
# Learning plugins
AGENTDB_LEARNING=true
# Reasoning agents
AGENTDB_REASONING=true
# QUIC synchronization
AGENTDB_QUIC_SYNC=true
AGENTDB_QUIC_PORT=4433
AGENTDB_QUIC_PEERS=host1:4433,host2:4433
```
---
## Troubleshooting
### Issue: QUIC sync not working
```bash
# Check firewall allows UDP port 4433
sudo ufw allow 4433/udp
# Verify peers are reachable
ping host1
# Check QUIC logs
DEBUG=agentdb:quic node server.js
```
### Issue: Hybrid search returns no results
```typescript
// Relax filters
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
k: 100, // Increase k
filters: {
// Remove or relax filters
},
});
```
### Issue: Memory consolidation too aggressive
```typescript
// Disable automatic optimization
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
optimizeMemory: false, // Disable auto-consolidation
k: 10,
});
```
---
## Learn More
- **QUIC Protocol**: docs/quic-synchronization.pdf
- **Hybrid Search**: docs/hybrid-search-guide.md
- **GitHub**: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb
- **Website**: https://agentdb.ruv.io
---
**Category**: Advanced / Distributed Systems
**Difficulty**: Advanced
**Estimated Time**: 45-60 minutes

View File

@ -0,0 +1,545 @@
---
name: "AgentDB Learning Plugins"
description: "Create and train AI learning plugins with AgentDB's 9 reinforcement learning algorithms. Includes Decision Transformer, Q-Learning, SARSA, Actor-Critic, and more. Use when building self-learning agents, implementing RL, or optimizing agent behavior through experience."
---
# AgentDB Learning Plugins
## What This Skill Does
Provides access to 9 reinforcement learning algorithms via AgentDB's plugin system. Create, train, and deploy learning plugins for autonomous agents that improve through experience. Includes offline RL (Decision Transformer), value-based learning (Q-Learning), policy gradients (Actor-Critic), and advanced techniques.
**Performance**: Train models 10-100x faster with WASM-accelerated neural inference.
## Prerequisites
- Node.js 18+
- AgentDB v1.0.7+ (via agentic-flow)
- Basic understanding of reinforcement learning (recommended)
---
## Quick Start with CLI
### Create Learning Plugin
```bash
# Interactive wizard
npx agentdb@latest create-plugin
# Use specific template
npx agentdb@latest create-plugin -t decision-transformer -n my-agent
# Preview without creating
npx agentdb@latest create-plugin -t q-learning --dry-run
# Custom output directory
npx agentdb@latest create-plugin -t actor-critic -o ./plugins
```
### List Available Templates
```bash
# Show all plugin templates
npx agentdb@latest list-templates
# Available templates:
# - decision-transformer (sequence modeling RL - recommended)
# - q-learning (value-based learning)
# - sarsa (on-policy TD learning)
# - actor-critic (policy gradient with baseline)
# - curiosity-driven (exploration-based)
```
### Manage Plugins
```bash
# List installed plugins
npx agentdb@latest list-plugins
# Get plugin information
npx agentdb@latest plugin-info my-agent
# Shows: algorithm, configuration, training status
```
---
## Quick Start with API
```typescript
import { createAgentDBAdapter } from 'agentic-flow/reasoningbank';
// Initialize with learning enabled
const adapter = await createAgentDBAdapter({
dbPath: '.agentdb/learning.db',
enableLearning: true, // Enable learning plugins
enableReasoning: true,
cacheSize: 1000,
});
// Store training experience
await adapter.insertPattern({
id: '',
type: 'experience',
domain: 'game-playing',
pattern_data: JSON.stringify({
embedding: await computeEmbedding('state-action-reward'),
pattern: {
state: [0.1, 0.2, 0.3],
action: 2,
reward: 1.0,
next_state: [0.15, 0.25, 0.35],
done: false
}
}),
confidence: 0.9,
usage_count: 1,
success_count: 1,
created_at: Date.now(),
last_used: Date.now(),
});
// Train learning model
const metrics = await adapter.train({
epochs: 50,
batchSize: 32,
});
console.log('Training Loss:', metrics.loss);
console.log('Duration:', metrics.duration, 'ms');
```
---
## Available Learning Algorithms (9 Total)
### 1. Decision Transformer (Recommended)
**Type**: Offline Reinforcement Learning
**Best For**: Learning from logged experiences, imitation learning
**Strengths**: No online interaction needed, stable training
```bash
npx agentdb@latest create-plugin -t decision-transformer -n dt-agent
```
**Use Cases**:
- Learn from historical data
- Imitation learning from expert demonstrations
- Safe learning without environment interaction
- Sequence modeling tasks
**Configuration**:
```json
{
"algorithm": "decision-transformer",
"model_size": "base",
"context_length": 20,
"embed_dim": 128,
"n_heads": 8,
"n_layers": 6
}
```
### 2. Q-Learning
**Type**: Value-Based RL (Off-Policy)
**Best For**: Discrete action spaces, sample efficiency
**Strengths**: Proven, simple, works well for small/medium problems
```bash
npx agentdb@latest create-plugin -t q-learning -n q-agent
```
**Use Cases**:
- Grid worlds, board games
- Navigation tasks
- Resource allocation
- Discrete decision-making
**Configuration**:
```json
{
"algorithm": "q-learning",
"learning_rate": 0.001,
"gamma": 0.99,
"epsilon": 0.1,
"epsilon_decay": 0.995
}
```
### 3. SARSA
**Type**: Value-Based RL (On-Policy)
**Best For**: Safe exploration, risk-sensitive tasks
**Strengths**: More conservative than Q-Learning, better for safety
```bash
npx agentdb@latest create-plugin -t sarsa -n sarsa-agent
```
**Use Cases**:
- Safety-critical applications
- Risk-sensitive decision-making
- Online learning with exploration
**Configuration**:
```json
{
"algorithm": "sarsa",
"learning_rate": 0.001,
"gamma": 0.99,
"epsilon": 0.1
}
```
### 4. Actor-Critic
**Type**: Policy Gradient with Value Baseline
**Best For**: Continuous actions, variance reduction
**Strengths**: Stable, works for continuous/discrete actions
```bash
npx agentdb@latest create-plugin -t actor-critic -n ac-agent
```
**Use Cases**:
- Continuous control (robotics, simulations)
- Complex action spaces
- Multi-agent coordination
**Configuration**:
```json
{
"algorithm": "actor-critic",
"actor_lr": 0.001,
"critic_lr": 0.002,
"gamma": 0.99,
"entropy_coef": 0.01
}
```
### 5. Active Learning
**Type**: Query-Based Learning
**Best For**: Label-efficient learning, human-in-the-loop
**Strengths**: Minimizes labeling cost, focuses on uncertain samples
**Use Cases**:
- Human feedback incorporation
- Label-efficient training
- Uncertainty sampling
- Annotation cost reduction
### 6. Adversarial Training
**Type**: Robustness Enhancement
**Best For**: Safety, robustness to perturbations
**Strengths**: Improves model robustness, adversarial defense
**Use Cases**:
- Security applications
- Robust decision-making
- Adversarial defense
- Safety testing
### 7. Curriculum Learning
**Type**: Progressive Difficulty Training
**Best For**: Complex tasks, faster convergence
**Strengths**: Stable learning, faster convergence on hard tasks
**Use Cases**:
- Complex multi-stage tasks
- Hard exploration problems
- Skill composition
- Transfer learning
### 8. Federated Learning
**Type**: Distributed Learning
**Best For**: Privacy, distributed data
**Strengths**: Privacy-preserving, scalable
**Use Cases**:
- Multi-agent systems
- Privacy-sensitive data
- Distributed training
- Collaborative learning
### 9. Multi-Task Learning
**Type**: Transfer Learning
**Best For**: Related tasks, knowledge sharing
**Strengths**: Faster learning on new tasks, better generalization
**Use Cases**:
- Task families
- Transfer learning
- Domain adaptation
- Meta-learning
---
## Training Workflow
### 1. Collect Experiences
```typescript
// Store experiences during agent execution
for (let i = 0; i < numEpisodes; i++) {
const episode = runEpisode();
for (const step of episode.steps) {
await adapter.insertPattern({
id: '',
type: 'experience',
domain: 'task-domain',
pattern_data: JSON.stringify({
embedding: await computeEmbedding(JSON.stringify(step)),
pattern: {
state: step.state,
action: step.action,
reward: step.reward,
next_state: step.next_state,
done: step.done
}
}),
confidence: step.reward > 0 ? 0.9 : 0.5,
usage_count: 1,
success_count: step.reward > 0 ? 1 : 0,
created_at: Date.now(),
last_used: Date.now(),
});
}
}
```
### 2. Train Model
```typescript
// Train on collected experiences
const trainingMetrics = await adapter.train({
epochs: 100,
batchSize: 64,
learningRate: 0.001,
validationSplit: 0.2,
});
console.log('Training Metrics:', trainingMetrics);
// {
// loss: 0.023,
// valLoss: 0.028,
// duration: 1523,
// epochs: 100
// }
```
### 3. Evaluate Performance
```typescript
// Retrieve similar successful experiences
const testQuery = await computeEmbedding(JSON.stringify(testState));
const result = await adapter.retrieveWithReasoning(testQuery, {
domain: 'task-domain',
k: 10,
synthesizeContext: true,
});
// Evaluate action quality
const suggestedAction = result.memories[0].pattern.action;
const confidence = result.memories[0].similarity;
console.log('Suggested Action:', suggestedAction);
console.log('Confidence:', confidence);
```
---
## Advanced Training Techniques
### Experience Replay
```typescript
// Store experiences in buffer
const replayBuffer = [];
// Sample random batch for training
const batch = sampleRandomBatch(replayBuffer, batchSize: 32);
// Train on batch
await adapter.train({
data: batch,
epochs: 1,
batchSize: 32,
});
```
### Prioritized Experience Replay
```typescript
// Store experiences with priority (TD error)
await adapter.insertPattern({
// ... standard fields
confidence: tdError, // Use TD error as confidence/priority
// ...
});
// Retrieve high-priority experiences
const highPriority = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'task-domain',
k: 32,
minConfidence: 0.7, // Only high TD-error experiences
});
```
### Multi-Agent Training
```typescript
// Collect experiences from multiple agents
for (const agent of agents) {
const experience = await agent.step();
await adapter.insertPattern({
// ... store experience with agent ID
domain: `multi-agent/${agent.id}`,
});
}
// Train shared model
await adapter.train({
epochs: 50,
batchSize: 64,
});
```
---
## Performance Optimization
### Batch Training
```typescript
// Collect batch of experiences
const experiences = collectBatch(size: 1000);
// Batch insert (500x faster)
for (const exp of experiences) {
await adapter.insertPattern({ /* ... */ });
}
// Train on batch
await adapter.train({
epochs: 10,
batchSize: 128, // Larger batch for efficiency
});
```
### Incremental Learning
```typescript
// Train incrementally as new data arrives
setInterval(async () => {
const newExperiences = getNewExperiences();
if (newExperiences.length > 100) {
await adapter.train({
epochs: 5,
batchSize: 32,
});
}
}, 60000); // Every minute
```
---
## Integration with Reasoning Agents
Combine learning with reasoning for better performance:
```typescript
// Train learning model
await adapter.train({ epochs: 50, batchSize: 32 });
// Use reasoning agents for inference
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'decision-making',
k: 10,
useMMR: true, // Diverse experiences
synthesizeContext: true, // Rich context
optimizeMemory: true, // Consolidate patterns
});
// Make decision based on learned experiences + reasoning
const decision = result.context.suggestedAction;
const confidence = result.memories[0].similarity;
```
---
## CLI Operations
```bash
# Create plugin
npx agentdb@latest create-plugin -t decision-transformer -n my-plugin
# List plugins
npx agentdb@latest list-plugins
# Get plugin info
npx agentdb@latest plugin-info my-plugin
# List templates
npx agentdb@latest list-templates
```
---
## Troubleshooting
### Issue: Training not converging
```typescript
// Reduce learning rate
await adapter.train({
epochs: 100,
batchSize: 32,
learningRate: 0.0001, // Lower learning rate
});
```
### Issue: Overfitting
```typescript
// Use validation split
await adapter.train({
epochs: 50,
batchSize: 64,
validationSplit: 0.2, // 20% validation
});
// Enable memory optimization
await adapter.retrieveWithReasoning(queryEmbedding, {
optimizeMemory: true, // Consolidate, reduce overfitting
});
```
### Issue: Slow training
```bash
# Enable quantization for faster inference
# Use binary quantization (32x faster)
```
---
## Learn More
- **Algorithm Papers**: See docs/algorithms/ for detailed papers
- **GitHub**: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb
- **MCP Integration**: `npx agentdb@latest mcp`
- **Website**: https://agentdb.ruv.io
---
**Category**: Machine Learning / Reinforcement Learning
**Difficulty**: Intermediate to Advanced
**Estimated Time**: 30-60 minutes

View File

@ -0,0 +1,339 @@
---
name: "AgentDB Memory Patterns"
description: "Implement persistent memory patterns for AI agents using AgentDB. Includes session memory, long-term storage, pattern learning, and context management. Use when building stateful agents, chat systems, or intelligent assistants."
---
# AgentDB Memory Patterns
## What This Skill Does
Provides memory management patterns for AI agents using AgentDB's persistent storage and ReasoningBank integration. Enables agents to remember conversations, learn from interactions, and maintain context across sessions.
**Performance**: 150x-12,500x faster than traditional solutions with 100% backward compatibility.
## Prerequisites
- Node.js 18+
- AgentDB v1.0.7+ (via agentic-flow or standalone)
- Understanding of agent architectures
## Quick Start with CLI
### Initialize AgentDB
```bash
# Initialize vector database
npx agentdb@latest init ./agents.db
# Or with custom dimensions
npx agentdb@latest init ./agents.db --dimension 768
# Use preset configurations
npx agentdb@latest init ./agents.db --preset large
# In-memory database for testing
npx agentdb@latest init ./memory.db --in-memory
```
### Start MCP Server for Codex
```bash
# Start MCP server (integrates with Codex)
npx agentdb@latest mcp
# Add to Codex (one-time setup)
Codex mcp add agentdb npx agentdb@latest mcp
```
### Create Learning Plugin
```bash
# Interactive plugin wizard
npx agentdb@latest create-plugin
# Use template directly
npx agentdb@latest create-plugin -t decision-transformer -n my-agent
# Available templates:
# - decision-transformer (sequence modeling RL)
# - q-learning (value-based learning)
# - sarsa (on-policy TD learning)
# - actor-critic (policy gradient)
# - curiosity-driven (exploration-based)
```
## Quick Start with API
```typescript
import { createAgentDBAdapter } from 'agentic-flow/reasoningbank';
// Initialize with default configuration
const adapter = await createAgentDBAdapter({
dbPath: '.agentdb/reasoningbank.db',
enableLearning: true, // Enable learning plugins
enableReasoning: true, // Enable reasoning agents
quantizationType: 'scalar', // binary | scalar | product | none
cacheSize: 1000, // In-memory cache
});
// Store interaction memory
const patternId = await adapter.insertPattern({
id: '',
type: 'pattern',
domain: 'conversation',
pattern_data: JSON.stringify({
embedding: await computeEmbedding('What is the capital of France?'),
pattern: {
user: 'What is the capital of France?',
assistant: 'The capital of France is Paris.',
timestamp: Date.now()
}
}),
confidence: 0.95,
usage_count: 1,
success_count: 1,
created_at: Date.now(),
last_used: Date.now(),
});
// Retrieve context with reasoning
const context = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'conversation',
k: 10,
useMMR: true, // Maximal Marginal Relevance
synthesizeContext: true, // Generate rich context
});
```
## Memory Patterns
### 1. Session Memory
```typescript
class SessionMemory {
async storeMessage(role: string, content: string) {
return await db.storeMemory({
sessionId: this.sessionId,
role,
content,
timestamp: Date.now()
});
}
async getSessionHistory(limit = 20) {
return await db.query({
filters: { sessionId: this.sessionId },
orderBy: 'timestamp',
limit
});
}
}
```
### 2. Long-Term Memory
```typescript
// Store important facts
await db.storeFact({
category: 'user_preference',
key: 'language',
value: 'English',
confidence: 1.0,
source: 'explicit'
});
// Retrieve facts
const prefs = await db.getFacts({
category: 'user_preference'
});
```
### 3. Pattern Learning
```typescript
// Learn from successful interactions
await db.storePattern({
trigger: 'user_asks_time',
response: 'provide_formatted_time',
success: true,
context: { timezone: 'UTC' }
});
// Apply learned patterns
const pattern = await db.matchPattern(currentContext);
```
## Advanced Patterns
### Hierarchical Memory
```typescript
// Organize memory in hierarchy
await memory.organize({
immediate: recentMessages, // Last 10 messages
shortTerm: sessionContext, // Current session
longTerm: importantFacts, // Persistent facts
semantic: embeddedKnowledge // Vector search
});
```
### Memory Consolidation
```typescript
// Periodically consolidate memories
await memory.consolidate({
strategy: 'importance', // Keep important memories
maxSize: 10000, // Size limit
minScore: 0.5 // Relevance threshold
});
```
## CLI Operations
### Query Database
```bash
# Query with vector embedding
npx agentdb@latest query ./agents.db "[0.1,0.2,0.3,...]"
# Top-k results
npx agentdb@latest query ./agents.db "[0.1,0.2,0.3]" -k 10
# With similarity threshold
npx agentdb@latest query ./agents.db "0.1 0.2 0.3" -t 0.75
# JSON output
npx agentdb@latest query ./agents.db "[...]" -f json
```
### Import/Export Data
```bash
# Export vectors to file
npx agentdb@latest export ./agents.db ./backup.json
# Import vectors from file
npx agentdb@latest import ./backup.json
# Get database statistics
npx agentdb@latest stats ./agents.db
```
### Performance Benchmarks
```bash
# Run performance benchmarks
npx agentdb@latest benchmark
# Results show:
# - Pattern Search: 150x faster (100µs vs 15ms)
# - Batch Insert: 500x faster (2ms vs 1s)
# - Large-scale Query: 12,500x faster (8ms vs 100s)
```
## Integration with ReasoningBank
```typescript
import { createAgentDBAdapter, migrateToAgentDB } from 'agentic-flow/reasoningbank';
// Migrate from legacy ReasoningBank
const result = await migrateToAgentDB(
'.swarm/memory.db', // Source (legacy)
'.agentdb/reasoningbank.db' // Destination (AgentDB)
);
console.log(`✅ Migrated ${result.patternsMigrated} patterns`);
// Train learning model
const adapter = await createAgentDBAdapter({
enableLearning: true,
});
await adapter.train({
epochs: 50,
batchSize: 32,
});
// Get optimal strategy with reasoning
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'task-planning',
synthesizeContext: true,
optimizeMemory: true,
});
```
## Learning Plugins
### Available Algorithms (9 Total)
1. **Decision Transformer** - Sequence modeling RL (recommended)
2. **Q-Learning** - Value-based learning
3. **SARSA** - On-policy TD learning
4. **Actor-Critic** - Policy gradient with baseline
5. **Active Learning** - Query selection
6. **Adversarial Training** - Robustness
7. **Curriculum Learning** - Progressive difficulty
8. **Federated Learning** - Distributed learning
9. **Multi-task Learning** - Transfer learning
### List and Manage Plugins
```bash
# List available plugins
npx agentdb@latest list-plugins
# List plugin templates
npx agentdb@latest list-templates
# Get plugin info
npx agentdb@latest plugin-info <name>
```
## Reasoning Agents (4 Modules)
1. **PatternMatcher** - Find similar patterns with HNSW indexing
2. **ContextSynthesizer** - Generate rich context from multiple sources
3. **MemoryOptimizer** - Consolidate similar patterns, prune low-quality
4. **ExperienceCurator** - Quality-based experience filtering
## Best Practices
1. **Enable quantization**: Use scalar/binary for 4-32x memory reduction
2. **Use caching**: 1000 pattern cache for <1ms retrieval
3. **Batch operations**: 500x faster than individual inserts
4. **Train regularly**: Update learning models with new experiences
5. **Enable reasoning**: Automatic context synthesis and optimization
6. **Monitor metrics**: Use `stats` command to track performance
## Troubleshooting
### Issue: Memory growing too large
```bash
# Check database size
npx agentdb@latest stats ./agents.db
# Enable quantization
# Use 'binary' (32x smaller) or 'scalar' (4x smaller)
```
### Issue: Slow search performance
```bash
# Enable HNSW indexing and caching
# Results: <100µs search time
```
### Issue: Migration from legacy ReasoningBank
```bash
# Automatic migration with validation
npx agentdb@latest migrate --source .swarm/memory.db
```
## Performance Characteristics
- **Vector Search**: <100µs (HNSW indexing)
- **Pattern Retrieval**: <1ms (with cache)
- **Batch Insert**: 2ms for 100 patterns
- **Memory Efficiency**: 4-32x reduction with quantization
- **Backward Compatibility**: 100% compatible with ReasoningBank API
## Learn More
- GitHub: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb
- Documentation: node_modules/agentic-flow/docs/AGENTDB_INTEGRATION.md
- MCP Integration: `npx agentdb@latest mcp` for Codex
- Website: https://agentdb.ruv.io

View File

@ -0,0 +1,509 @@
---
name: "AgentDB Performance Optimization"
description: "Optimize AgentDB performance with quantization (4-32x memory reduction), HNSW indexing (150x faster search), caching, and batch operations. Use when optimizing memory usage, improving search speed, or scaling to millions of vectors."
---
# AgentDB Performance Optimization
## What This Skill Does
Provides comprehensive performance optimization techniques for AgentDB vector databases. Achieve 150x-12,500x performance improvements through quantization, HNSW indexing, caching strategies, and batch operations. Reduce memory usage by 4-32x while maintaining accuracy.
**Performance**: <100µs vector search, <1ms pattern retrieval, 2ms batch insert for 100 vectors.
## Prerequisites
- Node.js 18+
- AgentDB v1.0.7+ (via agentic-flow)
- Existing AgentDB database or application
---
## Quick Start
### Run Performance Benchmarks
```bash
# Comprehensive performance benchmarking
npx agentdb@latest benchmark
# Results show:
# ✅ Pattern Search: 150x faster (100µs vs 15ms)
# ✅ Batch Insert: 500x faster (2ms vs 1s for 100 vectors)
# ✅ Large-scale Query: 12,500x faster (8ms vs 100s at 1M vectors)
# ✅ Memory Efficiency: 4-32x reduction with quantization
```
### Enable Optimizations
```typescript
import { createAgentDBAdapter } from 'agentic-flow/reasoningbank';
// Optimized configuration
const adapter = await createAgentDBAdapter({
dbPath: '.agentdb/optimized.db',
quantizationType: 'binary', // 32x memory reduction
cacheSize: 1000, // In-memory cache
enableLearning: true,
enableReasoning: true,
});
```
---
## Quantization Strategies
### 1. Binary Quantization (32x Reduction)
**Best For**: Large-scale deployments (1M+ vectors), memory-constrained environments
**Trade-off**: ~2-5% accuracy loss, 32x memory reduction, 10x faster
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'binary',
// 768-dim float32 (3072 bytes) → 96 bytes binary
// 1M vectors: 3GB → 96MB
});
```
**Use Cases**:
- Mobile/edge deployment
- Large-scale vector storage (millions of vectors)
- Real-time search with memory constraints
**Performance**:
- Memory: 32x smaller
- Search Speed: 10x faster (bit operations)
- Accuracy: 95-98% of original
### 2. Scalar Quantization (4x Reduction)
**Best For**: Balanced performance/accuracy, moderate datasets
**Trade-off**: ~1-2% accuracy loss, 4x memory reduction, 3x faster
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'scalar',
// 768-dim float32 (3072 bytes) → 768 bytes (uint8)
// 1M vectors: 3GB → 768MB
});
```
**Use Cases**:
- Production applications requiring high accuracy
- Medium-scale deployments (10K-1M vectors)
- General-purpose optimization
**Performance**:
- Memory: 4x smaller
- Search Speed: 3x faster
- Accuracy: 98-99% of original
### 3. Product Quantization (8-16x Reduction)
**Best For**: High-dimensional vectors, balanced compression
**Trade-off**: ~3-7% accuracy loss, 8-16x memory reduction, 5x faster
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'product',
// 768-dim float32 (3072 bytes) → 48-96 bytes
// 1M vectors: 3GB → 192MB
});
```
**Use Cases**:
- High-dimensional embeddings (>512 dims)
- Image/video embeddings
- Large-scale similarity search
**Performance**:
- Memory: 8-16x smaller
- Search Speed: 5x faster
- Accuracy: 93-97% of original
### 4. No Quantization (Full Precision)
**Best For**: Maximum accuracy, small datasets
**Trade-off**: No accuracy loss, full memory usage
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'none',
// Full float32 precision
});
```
---
## HNSW Indexing
**Hierarchical Navigable Small World** - O(log n) search complexity
### Automatic HNSW
AgentDB automatically builds HNSW indices:
```typescript
const adapter = await createAgentDBAdapter({
dbPath: '.agentdb/vectors.db',
// HNSW automatically enabled
});
// Search with HNSW (100µs vs 15ms linear scan)
const results = await adapter.retrieveWithReasoning(queryEmbedding, {
k: 10,
});
```
### HNSW Parameters
```typescript
// Advanced HNSW configuration
const adapter = await createAgentDBAdapter({
dbPath: '.agentdb/vectors.db',
hnswM: 16, // Connections per layer (default: 16)
hnswEfConstruction: 200, // Build quality (default: 200)
hnswEfSearch: 100, // Search quality (default: 100)
});
```
**Parameter Tuning**:
- **M** (connections): Higher = better recall, more memory
- Small datasets (<10K): M = 8
- Medium datasets (10K-100K): M = 16
- Large datasets (>100K): M = 32
- **efConstruction**: Higher = better index quality, slower build
- Fast build: 100
- Balanced: 200 (default)
- High quality: 400
- **efSearch**: Higher = better recall, slower search
- Fast search: 50
- Balanced: 100 (default)
- High recall: 200
---
## Caching Strategies
### In-Memory Pattern Cache
```typescript
const adapter = await createAgentDBAdapter({
cacheSize: 1000, // Cache 1000 most-used patterns
});
// First retrieval: ~2ms (database)
// Subsequent: <1ms (cache hit)
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
k: 10,
});
```
**Cache Tuning**:
- Small applications: 100-500 patterns
- Medium applications: 500-2000 patterns
- Large applications: 2000-5000 patterns
### LRU Cache Behavior
```typescript
// Cache automatically evicts least-recently-used patterns
// Most frequently accessed patterns stay in cache
// Monitor cache performance
const stats = await adapter.getStats();
console.log('Cache Hit Rate:', stats.cacheHitRate);
// Aim for >80% hit rate
```
---
## Batch Operations
### Batch Insert (500x Faster)
```typescript
// ❌ SLOW: Individual inserts
for (const doc of documents) {
await adapter.insertPattern({ /* ... */ }); // 1s for 100 docs
}
// ✅ FAST: Batch insert
const patterns = documents.map(doc => ({
id: '',
type: 'document',
domain: 'knowledge',
pattern_data: JSON.stringify({
embedding: doc.embedding,
text: doc.text,
}),
confidence: 1.0,
usage_count: 0,
success_count: 0,
created_at: Date.now(),
last_used: Date.now(),
}));
// Insert all at once (2ms for 100 docs)
for (const pattern of patterns) {
await adapter.insertPattern(pattern);
}
```
### Batch Retrieval
```typescript
// Retrieve multiple queries efficiently
const queries = [queryEmbedding1, queryEmbedding2, queryEmbedding3];
// Parallel retrieval
const results = await Promise.all(
queries.map(q => adapter.retrieveWithReasoning(q, { k: 5 }))
);
```
---
## Memory Optimization
### Automatic Consolidation
```typescript
// Enable automatic pattern consolidation
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'documents',
optimizeMemory: true, // Consolidate similar patterns
k: 10,
});
console.log('Optimizations:', result.optimizations);
// {
// consolidated: 15, // Merged 15 similar patterns
// pruned: 3, // Removed 3 low-quality patterns
// improved_quality: 0.12 // 12% quality improvement
// }
```
### Manual Optimization
```typescript
// Manually trigger optimization
await adapter.optimize();
// Get statistics
const stats = await adapter.getStats();
console.log('Before:', stats.totalPatterns);
console.log('After:', stats.totalPatterns); // Reduced by ~10-30%
```
### Pruning Strategies
```typescript
// Prune low-confidence patterns
await adapter.prune({
minConfidence: 0.5, // Remove confidence < 0.5
minUsageCount: 2, // Remove usage_count < 2
maxAge: 30 * 24 * 3600, // Remove >30 days old
});
```
---
## Performance Monitoring
### Database Statistics
```bash
# Get comprehensive stats
npx agentdb@latest stats .agentdb/vectors.db
# Output:
# Total Patterns: 125,430
# Database Size: 47.2 MB (with binary quantization)
# Avg Confidence: 0.87
# Domains: 15
# Cache Hit Rate: 84%
# Index Type: HNSW
```
### Runtime Metrics
```typescript
const stats = await adapter.getStats();
console.log('Performance Metrics:');
console.log('Total Patterns:', stats.totalPatterns);
console.log('Database Size:', stats.dbSize);
console.log('Avg Confidence:', stats.avgConfidence);
console.log('Cache Hit Rate:', stats.cacheHitRate);
console.log('Search Latency (avg):', stats.avgSearchLatency);
console.log('Insert Latency (avg):', stats.avgInsertLatency);
```
---
## Optimization Recipes
### Recipe 1: Maximum Speed (Sacrifice Accuracy)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'binary', // 32x memory reduction
cacheSize: 5000, // Large cache
hnswM: 8, // Fewer connections = faster
hnswEfSearch: 50, // Low search quality = faster
});
// Expected: <50µs search, 90-95% accuracy
```
### Recipe 2: Balanced Performance
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'scalar', // 4x memory reduction
cacheSize: 1000, // Standard cache
hnswM: 16, // Balanced connections
hnswEfSearch: 100, // Balanced quality
});
// Expected: <100µs search, 98-99% accuracy
```
### Recipe 3: Maximum Accuracy
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'none', // No quantization
cacheSize: 2000, // Large cache
hnswM: 32, // Many connections
hnswEfSearch: 200, // High search quality
});
// Expected: <200µs search, 100% accuracy
```
### Recipe 4: Memory-Constrained (Mobile/Edge)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'binary', // 32x memory reduction
cacheSize: 100, // Small cache
hnswM: 8, // Minimal connections
});
// Expected: <100µs search, ~10MB for 100K vectors
```
---
## Scaling Strategies
### Small Scale (<10K vectors)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'none', // Full precision
cacheSize: 500,
hnswM: 8,
});
```
### Medium Scale (10K-100K vectors)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'scalar', // 4x reduction
cacheSize: 1000,
hnswM: 16,
});
```
### Large Scale (100K-1M vectors)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'binary', // 32x reduction
cacheSize: 2000,
hnswM: 32,
});
```
### Massive Scale (>1M vectors)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'product', // 8-16x reduction
cacheSize: 5000,
hnswM: 48,
hnswEfConstruction: 400,
});
```
---
## Troubleshooting
### Issue: High memory usage
```bash
# Check database size
npx agentdb@latest stats .agentdb/vectors.db
# Enable quantization
# Use 'binary' for 32x reduction
```
### Issue: Slow search performance
```typescript
// Increase cache size
const adapter = await createAgentDBAdapter({
cacheSize: 2000, // Increase from 1000
});
// Reduce search quality (faster)
const result = await adapter.retrieveWithReasoning(queryEmbedding, {
k: 5, // Reduce from 10
});
```
### Issue: Low accuracy
```typescript
// Disable or use lighter quantization
const adapter = await createAgentDBAdapter({
quantizationType: 'scalar', // Instead of 'binary'
hnswEfSearch: 200, // Higher search quality
});
```
---
## Performance Benchmarks
**Test System**: AMD Ryzen 9 5950X, 64GB RAM
| Operation | Vector Count | No Optimization | Optimized | Improvement |
|-----------|-------------|-----------------|-----------|-------------|
| Search | 10K | 15ms | 100µs | 150x |
| Search | 100K | 150ms | 120µs | 1,250x |
| Search | 1M | 100s | 8ms | 12,500x |
| Batch Insert (100) | - | 1s | 2ms | 500x |
| Memory Usage | 1M | 3GB | 96MB | 32x (binary) |
---
## Learn More
- **Quantization Paper**: docs/quantization-techniques.pdf
- **HNSW Algorithm**: docs/hnsw-index.pdf
- **GitHub**: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb
- **Website**: https://agentdb.ruv.io
---
**Category**: Performance / Optimization
**Difficulty**: Intermediate
**Estimated Time**: 20-30 minutes

View File

@ -0,0 +1,339 @@
---
name: "AgentDB Vector Search"
description: "Implement semantic vector search with AgentDB for intelligent document retrieval, similarity matching, and context-aware querying. Use when building RAG systems, semantic search engines, or intelligent knowledge bases."
---
# AgentDB Vector Search
## What This Skill Does
Implements vector-based semantic search using AgentDB's high-performance vector database with **150x-12,500x faster** operations than traditional solutions. Features HNSW indexing, quantization, and sub-millisecond search (<100µs).
## Prerequisites
- Node.js 18+
- AgentDB v1.0.7+ (via agentic-flow or standalone)
- OpenAI API key (for embeddings) or custom embedding model
## Quick Start with CLI
### Initialize Vector Database
```bash
# Initialize with default dimensions (1536 for OpenAI ada-002)
npx agentdb@latest init ./vectors.db
# Custom dimensions for different embedding models
npx agentdb@latest init ./vectors.db --dimension 768 # sentence-transformers
npx agentdb@latest init ./vectors.db --dimension 384 # all-MiniLM-L6-v2
# Use preset configurations
npx agentdb@latest init ./vectors.db --preset small # <10K vectors
npx agentdb@latest init ./vectors.db --preset medium # 10K-100K vectors
npx agentdb@latest init ./vectors.db --preset large # >100K vectors
# In-memory database for testing
npx agentdb@latest init ./vectors.db --in-memory
```
### Query Vector Database
```bash
# Basic similarity search
npx agentdb@latest query ./vectors.db "[0.1,0.2,0.3,...]"
# Top-k results
npx agentdb@latest query ./vectors.db "[0.1,0.2,0.3]" -k 10
# With similarity threshold (cosine similarity)
npx agentdb@latest query ./vectors.db "0.1 0.2 0.3" -t 0.75 -m cosine
# Different distance metrics
npx agentdb@latest query ./vectors.db "[...]" -m euclidean # L2 distance
npx agentdb@latest query ./vectors.db "[...]" -m dot # Dot product
# JSON output for automation
npx agentdb@latest query ./vectors.db "[...]" -f json -k 5
# Verbose output with distances
npx agentdb@latest query ./vectors.db "[...]" -v
```
### Import/Export Vectors
```bash
# Export vectors to JSON
npx agentdb@latest export ./vectors.db ./backup.json
# Import vectors from JSON
npx agentdb@latest import ./backup.json
# Get database statistics
npx agentdb@latest stats ./vectors.db
```
## Quick Start with API
```typescript
import { createAgentDBAdapter, computeEmbedding } from 'agentic-flow/reasoningbank';
// Initialize with vector search optimizations
const adapter = await createAgentDBAdapter({
dbPath: '.agentdb/vectors.db',
enableLearning: false, // Vector search only
enableReasoning: true, // Enable semantic matching
quantizationType: 'binary', // 32x memory reduction
cacheSize: 1000, // Fast retrieval
});
// Store document with embedding
const text = "The quantum computer achieved 100 qubits";
const embedding = await computeEmbedding(text);
await adapter.insertPattern({
id: '',
type: 'document',
domain: 'technology',
pattern_data: JSON.stringify({
embedding,
text,
metadata: { category: "quantum", date: "2025-01-15" }
}),
confidence: 1.0,
usage_count: 0,
success_count: 0,
created_at: Date.now(),
last_used: Date.now(),
});
// Semantic search with MMR (Maximal Marginal Relevance)
const queryEmbedding = await computeEmbedding("quantum computing advances");
const results = await adapter.retrieveWithReasoning(queryEmbedding, {
domain: 'technology',
k: 10,
useMMR: true, // Diverse results
synthesizeContext: true, // Rich context
});
```
## Core Features
### 1. Vector Storage
```typescript
// Store with automatic embedding
await db.storeWithEmbedding({
content: "Your document text",
metadata: { source: "docs", page: 42 }
});
```
### 2. Similarity Search
```typescript
// Find similar documents
const similar = await db.findSimilar("quantum computing", {
limit: 5,
minScore: 0.75
});
```
### 3. Hybrid Search (Vector + Metadata)
```typescript
// Combine vector similarity with metadata filtering
const results = await db.hybridSearch({
query: "machine learning models",
filters: {
category: "research",
date: { $gte: "2024-01-01" }
},
limit: 20
});
```
## Advanced Usage
### RAG (Retrieval Augmented Generation)
```typescript
// Build RAG pipeline
async function ragQuery(question: string) {
// 1. Get relevant context
const context = await db.searchSimilar(
await embed(question),
{ limit: 5, threshold: 0.7 }
);
// 2. Generate answer with context
const prompt = `Context: ${context.map(c => c.text).join('\n')}
Question: ${question}`;
return await llm.generate(prompt);
}
```
### Batch Operations
```typescript
// Efficient batch storage
await db.batchStore(documents.map(doc => ({
text: doc.content,
embedding: doc.vector,
metadata: doc.meta
})));
```
## MCP Server Integration
```bash
# Start AgentDB MCP server for Codex
npx agentdb@latest mcp
# Add to Codex (one-time setup)
Codex mcp add agentdb npx agentdb@latest mcp
# Now use MCP tools in Codex:
# - agentdb_query: Semantic vector search
# - agentdb_store: Store documents with embeddings
# - agentdb_stats: Database statistics
```
## Performance Benchmarks
```bash
# Run comprehensive benchmarks
npx agentdb@latest benchmark
# Results:
# ✅ Pattern Search: 150x faster (100µs vs 15ms)
# ✅ Batch Insert: 500x faster (2ms vs 1s for 100 vectors)
# ✅ Large-scale Query: 12,500x faster (8ms vs 100s at 1M vectors)
# ✅ Memory Efficiency: 4-32x reduction with quantization
```
## Quantization Options
AgentDB provides multiple quantization strategies for memory efficiency:
### Binary Quantization (32x reduction)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'binary', // 768-dim → 96 bytes
});
```
### Scalar Quantization (4x reduction)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'scalar', // 768-dim → 768 bytes
});
```
### Product Quantization (8-16x reduction)
```typescript
const adapter = await createAgentDBAdapter({
quantizationType: 'product', // 768-dim → 48-96 bytes
});
```
## Distance Metrics
```bash
# Cosine similarity (default, best for most use cases)
npx agentdb@latest query ./db.sqlite "[...]" -m cosine
# Euclidean distance (L2 norm)
npx agentdb@latest query ./db.sqlite "[...]" -m euclidean
# Dot product (for normalized vectors)
npx agentdb@latest query ./db.sqlite "[...]" -m dot
```
## Advanced Features
### HNSW Indexing
- **O(log n) search complexity**
- **Sub-millisecond retrieval** (<100µs)
- **Automatic index building**
### Caching
- **1000 pattern in-memory cache**
- **<1ms pattern retrieval**
- **Automatic cache invalidation**
### MMR (Maximal Marginal Relevance)
- **Diverse result sets**
- **Avoid redundancy**
- **Balance relevance and diversity**
## Performance Tips
1. **Enable HNSW indexing**: Automatic with AgentDB, 10-100x faster
2. **Use quantization**: Binary (32x), Scalar (4x), Product (8-16x) memory reduction
3. **Batch operations**: 500x faster for bulk inserts
4. **Match dimensions**: 1536 (OpenAI), 768 (sentence-transformers), 384 (MiniLM)
5. **Similarity threshold**: Start at 0.7 for quality, adjust based on use case
6. **Enable caching**: 1000 pattern cache for frequent queries
## Troubleshooting
### Issue: Slow search performance
```bash
# Check if HNSW indexing is enabled (automatic)
npx agentdb@latest stats ./vectors.db
# Expected: <100µs search time
```
### Issue: High memory usage
```bash
# Enable binary quantization (32x reduction)
# Use in adapter: quantizationType: 'binary'
```
### Issue: Poor relevance
```bash
# Adjust similarity threshold
npx agentdb@latest query ./db.sqlite "[...]" -t 0.8 # Higher threshold
# Or use MMR for diverse results
# Use in adapter: useMMR: true
```
### Issue: Wrong dimensions
```bash
# Check embedding model dimensions:
# - OpenAI ada-002: 1536
# - sentence-transformers: 768
# - all-MiniLM-L6-v2: 384
npx agentdb@latest init ./db.sqlite --dimension 768
```
## Database Statistics
```bash
# Get comprehensive stats
npx agentdb@latest stats ./vectors.db
# Shows:
# - Total patterns/vectors
# - Database size
# - Average confidence
# - Domains distribution
# - Index status
```
## Performance Characteristics
- **Vector Search**: <100µs (HNSW indexing)
- **Pattern Retrieval**: <1ms (with cache)
- **Batch Insert**: 2ms for 100 vectors
- **Memory Efficiency**: 4-32x reduction with quantization
- **Scalability**: Handles 1M+ vectors efficiently
- **Latency**: Sub-millisecond for most operations
## Learn More
- GitHub: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb
- Documentation: node_modules/agentic-flow/docs/AGENTDB_INTEGRATION.md
- MCP Integration: `npx agentdb@latest mcp` for Codex
- Website: https://agentdb.ruv.io
- CLI Help: `npx agentdb@latest --help`
- Command Help: `npx agentdb@latest help <command>`

View File

@ -0,0 +1,204 @@
---
name: browser
description: Web browser automation with AI-optimized snapshots for Codex-flow agents
version: 1.0.0
triggers:
- /browser
- browse
- web automation
- scrape
- navigate
- screenshot
tools:
- browser/open
- browser/snapshot
- browser/click
- browser/fill
- browser/screenshot
- browser/close
---
# Browser Automation Skill
Web browser automation using agent-browser with AI-optimized snapshots. Reduces context by 93% using element refs (@e1, @e2) instead of full DOM.
## Core Workflow
```bash
# 1. Navigate to page
agent-browser open <url>
# 2. Get accessibility tree with element refs
agent-browser snapshot -i # -i = interactive elements only
# 3. Interact using refs from snapshot
agent-browser click @e2
agent-browser fill @e3 "text"
# 4. Re-snapshot after page changes
agent-browser snapshot -i
```
## Quick Reference
### Navigation
| Command | Description |
|---------|-------------|
| `open <url>` | Navigate to URL |
| `back` | Go back |
| `forward` | Go forward |
| `reload` | Reload page |
| `close` | Close browser |
### Snapshots (AI-Optimized)
| Command | Description |
|---------|-------------|
| `snapshot` | Full accessibility tree |
| `snapshot -i` | Interactive elements only (buttons, links, inputs) |
| `snapshot -c` | Compact (remove empty elements) |
| `snapshot -d 3` | Limit depth to 3 levels |
| `screenshot [path]` | Capture screenshot (base64 if no path) |
### Interaction
| Command | Description |
|---------|-------------|
| `click <sel>` | Click element |
| `fill <sel> <text>` | Clear and fill input |
| `type <sel> <text>` | Type with key events |
| `press <key>` | Press key (Enter, Tab, etc.) |
| `hover <sel>` | Hover element |
| `select <sel> <val>` | Select dropdown option |
| `check/uncheck <sel>` | Toggle checkbox |
| `scroll <dir> [px]` | Scroll page |
### Get Info
| Command | Description |
|---------|-------------|
| `get text <sel>` | Get text content |
| `get html <sel>` | Get innerHTML |
| `get value <sel>` | Get input value |
| `get attr <sel> <attr>` | Get attribute |
| `get title` | Get page title |
| `get url` | Get current URL |
### Wait
| Command | Description |
|---------|-------------|
| `wait <selector>` | Wait for element |
| `wait <ms>` | Wait milliseconds |
| `wait --text "text"` | Wait for text |
| `wait --url "pattern"` | Wait for URL |
| `wait --load networkidle` | Wait for load state |
### Sessions
| Command | Description |
|---------|-------------|
| `--session <name>` | Use isolated session |
| `session list` | List active sessions |
## Selectors
### Element Refs (Recommended)
```bash
# Get refs from snapshot
agent-browser snapshot -i
# Output: button "Submit" [ref=e2]
# Use ref to interact
agent-browser click @e2
```
### CSS Selectors
```bash
agent-browser click "#submit"
agent-browser fill ".email-input" "test@test.com"
```
### Semantic Locators
```bash
agent-browser find role button click --name "Submit"
agent-browser find label "Email" fill "test@test.com"
agent-browser find testid "login-btn" click
```
## Examples
### Login Flow
```bash
agent-browser open https://example.com/login
agent-browser snapshot -i
agent-browser fill @e2 "user@example.com"
agent-browser fill @e3 "password123"
agent-browser click @e4
agent-browser wait --url "**/dashboard"
```
### Form Submission
```bash
agent-browser open https://example.com/contact
agent-browser snapshot -i
agent-browser fill @e1 "John Doe"
agent-browser fill @e2 "john@example.com"
agent-browser fill @e3 "Hello, this is my message"
agent-browser click @e4
agent-browser wait --text "Thank you"
```
### Data Extraction
```bash
agent-browser open https://example.com/products
agent-browser snapshot -i
# Iterate through product refs
agent-browser get text @e1 # Product name
agent-browser get text @e2 # Price
agent-browser get attr @e3 href # Link
```
### Multi-Session (Swarm)
```bash
# Session 1: Navigator
agent-browser --session nav open https://example.com
agent-browser --session nav state save auth.json
# Session 2: Scraper (uses same auth)
agent-browser --session scrape state load auth.json
agent-browser --session scrape open https://example.com/data
agent-browser --session scrape snapshot -i
```
## Integration with Codex Flow
### MCP Tools
All browser operations are available as MCP tools with `browser/` prefix:
- `browser/open`
- `browser/snapshot`
- `browser/click`
- `browser/fill`
- `browser/screenshot`
- etc.
### Memory Integration
```bash
# Store successful patterns
npx @Codex-flow/cli memory store --namespace browser-patterns --key "login-flow" --value "snapshot->fill->click->wait"
# Retrieve before similar task
npx @Codex-flow/cli memory search --query "login automation"
```
### Hooks
```bash
# Pre-browse hook (get context)
npx @Codex-flow/cli hooks pre-edit --file "browser-task.ts"
# Post-browse hook (record success)
npx @Codex-flow/cli hooks post-task --task-id "browse-1" --success true
```
## Tips
1. **Always use snapshots** - They're optimized for AI with refs
2. **Prefer `-i` flag** - Gets only interactive elements, smaller output
3. **Use refs, not selectors** - More reliable, deterministic
4. **Re-snapshot after navigation** - Page state changes
5. **Use sessions for parallel work** - Each session is isolated

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,874 @@
---
name: github-multi-repo
version: 1.0.0
description: Multi-repository coordination, synchronization, and architecture management with AI swarm orchestration
category: github-integration
tags: [multi-repo, synchronization, architecture, coordination, github]
author: Codex Flow Team
requires:
- ruv-swarm@^1.0.11
- gh-cli@^2.0.0
capabilities:
- cross-repository coordination
- package synchronization
- architecture optimization
- template management
- distributed workflows
---
# GitHub Multi-Repository Coordination Skill
## Overview
Advanced multi-repository coordination system that combines swarm intelligence, package synchronization, and repository architecture optimization. This skill enables organization-wide automation, cross-project collaboration, and scalable repository management.
## Core Capabilities
### 🔄 Multi-Repository Swarm Coordination
Cross-repository AI swarm orchestration for distributed development workflows.
### 📦 Package Synchronization
Intelligent dependency resolution and version alignment across multiple packages.
### 🏗️ Repository Architecture
Structure optimization and template management for scalable projects.
### 🔗 Integration Management
Cross-package integration testing and deployment coordination.
## Quick Start
### Initialize Multi-Repo Coordination
```bash
# Basic swarm initialization
npx Codex-flow skill run github-multi-repo init \
--repos "org/frontend,org/backend,org/shared" \
--topology hierarchical
# Advanced initialization with synchronization
npx Codex-flow skill run github-multi-repo init \
--repos "org/frontend,org/backend,org/shared" \
--topology mesh \
--shared-memory \
--sync-strategy eventual
```
### Synchronize Packages
```bash
# Synchronize package versions and dependencies
npx Codex-flow skill run github-multi-repo sync \
--packages "Codex-flow,ruv-swarm" \
--align-versions \
--update-docs
```
### Optimize Architecture
```bash
# Analyze and optimize repository structure
npx Codex-flow skill run github-multi-repo optimize \
--analyze-structure \
--suggest-improvements \
--create-templates
```
## Features
### 1. Cross-Repository Swarm Orchestration
#### Repository Discovery
```javascript
// Auto-discover related repositories with gh CLI
const REPOS = Bash(`gh repo list my-organization --limit 100 \
--json name,description,languages,topics \
--jq '.[] | select(.languages | keys | contains(["TypeScript"]))'`)
// Analyze repository dependencies
const DEPS = Bash(`gh repo list my-organization --json name | \
jq -r '.[].name' | while read -r repo; do
gh api repos/my-organization/$repo/contents/package.json \
--jq '.content' 2>/dev/null | base64 -d | jq '{name, dependencies}'
done | jq -s '.'`)
// Initialize swarm with discovered repositories
mcp__claude-flow__swarm_init({
topology: "hierarchical",
maxAgents: 8,
metadata: { repos: REPOS, dependencies: DEPS }
})
```
#### Synchronized Operations
```javascript
// Execute synchronized changes across repositories
[Parallel Multi-Repo Operations]:
// Spawn coordination agents
Task("Repository Coordinator", "Coordinate changes across all repositories", "coordinator")
Task("Dependency Analyzer", "Analyze cross-repo dependencies", "analyst")
Task("Integration Tester", "Validate cross-repo changes", "tester")
// Get matching repositories
Bash(`gh repo list org --limit 100 --json name \
--jq '.[] | select(.name | test("-service$")) | .name' > /tmp/repos.txt`)
// Execute task across repositories
Bash(`cat /tmp/repos.txt | while read -r repo; do
gh repo clone org/$repo /tmp/$repo -- --depth=1
cd /tmp/$repo
# Apply changes
npm update
npm test
# Create PR if successful
if [ $? -eq 0 ]; then
git checkout -b update-dependencies-$(date +%Y%m%d)
git add -A
git commit -m "chore: Update dependencies"
git push origin HEAD
gh pr create --title "Update dependencies" --body "Automated update" --label "dependencies"
fi
done`)
// Track all operations
TodoWrite { todos: [
{ id: "discover", content: "Discover all service repositories", status: "completed" },
{ id: "update", content: "Update dependencies", status: "completed" },
{ id: "test", content: "Run integration tests", status: "in_progress" },
{ id: "pr", content: "Create pull requests", status: "pending" }
]}
```
### 2. Package Synchronization
#### Version Alignment
```javascript
// Synchronize package dependencies and versions
[Complete Package Sync]:
// Initialize sync swarm
mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 5 })
// Spawn sync agents
Task("Sync Coordinator", "Coordinate version alignment", "coordinator")
Task("Dependency Analyzer", "Analyze dependencies", "analyst")
Task("Integration Tester", "Validate synchronization", "tester")
// Read package states
Read("/workspaces/ruv-FANN/Codex-flow/Codex-flow/package.json")
Read("/workspaces/ruv-FANN/ruv-swarm/npm/package.json")
// Align versions using gh CLI
Bash(`gh api repos/:owner/:repo/git/refs \
-f ref='refs/heads/sync/package-alignment' \
-f sha=$(gh api repos/:owner/:repo/git/refs/heads/main --jq '.object.sha')`)
// Update package.json files
Bash(`gh api repos/:owner/:repo/contents/package.json \
--method PUT \
-f message="feat: Align Node.js version requirements" \
-f branch="sync/package-alignment" \
-f content="$(cat aligned-package.json | base64)"`)
// Store sync state
mcp__claude-flow__memory_usage({
action: "store",
key: "sync/packages/status",
value: {
timestamp: Date.now(),
packages_synced: ["Codex-flow", "ruv-swarm"],
status: "synchronized"
}
})
```
#### Documentation Synchronization
```javascript
// Synchronize AGENTS.md files across packages
[Documentation Sync]:
// Get source documentation
Bash(`gh api repos/:owner/:repo/contents/ruv-swarm/docs/AGENTS.md \
--jq '.content' | base64 -d > /tmp/Codex-source.md`)
// Update target documentation
Bash(`gh api repos/:owner/:repo/contents/Codex-flow/AGENTS.md \
--method PUT \
-f message="docs: Synchronize AGENTS.md" \
-f branch="sync/documentation" \
-f content="$(cat /tmp/Codex-source.md | base64)"`)
// Track sync status
mcp__claude-flow__memory_usage({
action: "store",
key: "sync/documentation/status",
value: { status: "synchronized", files: ["AGENTS.md"] }
})
```
#### Cross-Package Integration
```javascript
// Coordinate feature implementation across packages
[Cross-Package Feature]:
// Push changes to all packages
mcp__github__push_files({
branch: "feature/github-integration",
files: [
{
path: "Codex-flow/.Codex/commands/github/github-modes.md",
content: "[GitHub modes documentation]"
},
{
path: "ruv-swarm/src/github-coordinator/hooks.js",
content: "[GitHub coordination hooks]"
}
],
message: "feat: Add GitHub workflow integration"
})
// Create coordinated PR
Bash(`gh pr create \
--title "Feature: GitHub Workflow Integration" \
--body "## 🚀 GitHub Integration
### Features
- ✅ Multi-repo coordination
- ✅ Package synchronization
- ✅ Architecture optimization
### Testing
- [x] Package dependency verification
- [x] Integration tests
- [x] Cross-package compatibility"`)
```
### 3. Repository Architecture
#### Structure Analysis
```javascript
// Analyze and optimize repository structure
[Architecture Analysis]:
// Initialize architecture swarm
mcp__claude-flow__swarm_init({ topology: "hierarchical", maxAgents: 6 })
// Spawn architecture agents
Task("Senior Architect", "Analyze repository structure", "architect")
Task("Structure Analyst", "Identify optimization opportunities", "analyst")
Task("Performance Optimizer", "Optimize structure for scalability", "optimizer")
Task("Best Practices Researcher", "Research architecture patterns", "researcher")
// Analyze current structures
LS("/workspaces/ruv-FANN/Codex-flow/Codex-flow")
LS("/workspaces/ruv-FANN/ruv-swarm/npm")
// Search for best practices
Bash(`gh search repos "language:javascript template architecture" \
--limit 10 \
--json fullName,description,stargazersCount \
--sort stars \
--order desc`)
// Store analysis results
mcp__claude-flow__memory_usage({
action: "store",
key: "architecture/analysis/results",
value: {
repositories_analyzed: ["Codex-flow", "ruv-swarm"],
optimization_areas: ["structure", "workflows", "templates"],
recommendations: ["standardize_structure", "improve_workflows"]
}
})
```
#### Template Creation
```javascript
// Create standardized repository template
[Template Creation]:
// Create template repository
mcp__github__create_repository({
name: "Codex-project-template",
description: "Standardized template for Codex projects",
private: false,
autoInit: true
})
// Push template structure
mcp__github__push_files({
repo: "Codex-project-template",
files: [
{
path: ".Codex/commands/github/github-modes.md",
content: "[GitHub modes template]"
},
{
path: ".Codex/config.json",
content: JSON.stringify({
version: "1.0",
mcp_servers: {
"ruv-swarm": {
command: "npx",
args: ["ruv-swarm", "mcp", "start"]
}
}
})
},
{
path: "AGENTS.md",
content: "[Standardized AGENTS.md]"
},
{
path: "package.json",
content: JSON.stringify({
name: "Codex-project-template",
engines: { node: ">=20.0.0" },
dependencies: { "ruv-swarm": "^1.0.11" }
})
}
],
message: "feat: Create standardized template"
})
```
#### Cross-Repository Standardization
```javascript
// Synchronize structure across repositories
[Structure Standardization]:
const repositories = ["Codex-flow", "ruv-swarm", "Codex-extensions"]
// Update common files across all repositories
repositories.forEach(repo => {
mcp__github__create_or_update_file({
repo: "ruv-FANN",
path: `${repo}/.github/workflows/integration.yml`,
content: `name: Integration Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with: { node-version: '20' }
- run: npm install && npm test`,
message: "ci: Standardize integration workflow",
branch: "structure/standardization"
})
})
```
### 4. Orchestration Workflows
#### Dependency Management
```javascript
// Update dependencies across all repositories
[Organization-Wide Dependency Update]:
// Create tracking issue
TRACKING_ISSUE=$(Bash(`gh issue create \
--title "Dependency Update: typescript@5.0.0" \
--body "Tracking TypeScript update across all repositories" \
--label "dependencies,tracking" \
--json number -q .number`))
// Find all TypeScript repositories
TS_REPOS=$(Bash(`gh repo list org --limit 100 --json name | \
jq -r '.[].name' | while read -r repo; do
if gh api repos/org/$repo/contents/package.json 2>/dev/null | \
jq -r '.content' | base64 -d | grep -q '"typescript"'; then
echo "$repo"
fi
done`))
// Update each repository
Bash(`echo "$TS_REPOS" | while read -r repo; do
gh repo clone org/$repo /tmp/$repo -- --depth=1
cd /tmp/$repo
npm install --save-dev typescript@5.0.0
if npm test; then
git checkout -b update-typescript-5
git add package.json package-lock.json
git commit -m "chore: Update TypeScript to 5.0.0
Part of #$TRACKING_ISSUE"
git push origin HEAD
gh pr create \
--title "Update TypeScript to 5.0.0" \
--body "Updates TypeScript\n\nTracking: #$TRACKING_ISSUE" \
--label "dependencies"
else
gh issue comment $TRACKING_ISSUE \
--body "❌ Failed to update $repo - tests failing"
fi
done`)
```
#### Refactoring Operations
```javascript
// Coordinate large-scale refactoring
[Cross-Repo Refactoring]:
// Initialize refactoring swarm
mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 8 })
// Spawn specialized agents
Task("Refactoring Coordinator", "Coordinate refactoring across repos", "coordinator")
Task("Impact Analyzer", "Analyze refactoring impact", "analyst")
Task("Code Transformer", "Apply refactoring changes", "coder")
Task("Migration Guide Creator", "Create migration documentation", "documenter")
Task("Integration Tester", "Validate refactored code", "tester")
// Execute refactoring
mcp__claude-flow__task_orchestrate({
task: "Rename OldAPI to NewAPI across all repositories",
strategy: "sequential",
priority: "high"
})
```
#### Security Updates
```javascript
// Coordinate security patches
[Security Patch Deployment]:
// Scan all repositories
Bash(`gh repo list org --limit 100 --json name | jq -r '.[].name' | \
while read -r repo; do
gh repo clone org/$repo /tmp/$repo -- --depth=1
cd /tmp/$repo
npm audit --json > /tmp/audit-$repo.json
done`)
// Apply patches
Bash(`for repo in /tmp/audit-*.json; do
if [ $(jq '.vulnerabilities | length' $repo) -gt 0 ]; then
cd /tmp/$(basename $repo .json | sed 's/audit-//')
npm audit fix
if npm test; then
git checkout -b security/patch-$(date +%Y%m%d)
git add -A
git commit -m "security: Apply security patches"
git push origin HEAD
gh pr create --title "Security patches" --label "security"
fi
fi
done`)
```
## Configuration
### Multi-Repo Config File
```yaml
# .swarm/multi-repo.yml
version: 1
organization: my-org
repositories:
- name: frontend
url: github.com/my-org/frontend
role: ui
agents: [coder, designer, tester]
- name: backend
url: github.com/my-org/backend
role: api
agents: [architect, coder, tester]
- name: shared
url: github.com/my-org/shared
role: library
agents: [analyst, coder]
coordination:
topology: hierarchical
communication: webhook
memory: redis://shared-memory
dependencies:
- from: frontend
to: [backend, shared]
- from: backend
to: [shared]
```
### Repository Roles
```javascript
{
"roles": {
"ui": {
"responsibilities": ["user-interface", "ux", "accessibility"],
"default-agents": ["designer", "coder", "tester"]
},
"api": {
"responsibilities": ["endpoints", "business-logic", "data"],
"default-agents": ["architect", "coder", "security"]
},
"library": {
"responsibilities": ["shared-code", "utilities", "types"],
"default-agents": ["analyst", "coder", "documenter"]
}
}
}
```
## Communication Strategies
### 1. Webhook-Based Coordination
```javascript
const { MultiRepoSwarm } = require('ruv-swarm');
const swarm = new MultiRepoSwarm({
webhook: {
url: 'https://swarm-coordinator.example.com',
secret: process.env.WEBHOOK_SECRET
}
});
swarm.on('repo:update', async (event) => {
await swarm.propagate(event, {
to: event.dependencies,
strategy: 'eventual-consistency'
});
});
```
### 2. Event Streaming
```yaml
# Kafka configuration for real-time coordination
kafka:
brokers: ['kafka1:9092', 'kafka2:9092']
topics:
swarm-events:
partitions: 10
replication: 3
swarm-memory:
partitions: 5
replication: 3
```
## Synchronization Patterns
### 1. Eventually Consistent
```javascript
{
"sync": {
"strategy": "eventual",
"max-lag": "5m",
"retry": {
"attempts": 3,
"backoff": "exponential"
}
}
}
```
### 2. Strong Consistency
```javascript
{
"sync": {
"strategy": "strong",
"consensus": "raft",
"quorum": 0.51,
"timeout": "30s"
}
}
```
### 3. Hybrid Approach
```javascript
{
"sync": {
"default": "eventual",
"overrides": {
"security-updates": "strong",
"dependency-updates": "strong",
"documentation": "eventual"
}
}
}
```
## Use Cases
### 1. Microservices Coordination
```bash
npx Codex-flow skill run github-multi-repo microservices \
--services "auth,users,orders,payments" \
--ensure-compatibility \
--sync-contracts \
--integration-tests
```
### 2. Library Updates
```bash
npx Codex-flow skill run github-multi-repo lib-update \
--library "org/shared-lib" \
--version "2.0.0" \
--find-consumers \
--update-imports \
--run-tests
```
### 3. Organization-Wide Changes
```bash
npx Codex-flow skill run github-multi-repo org-policy \
--policy "add-security-headers" \
--repos "org/*" \
--validate-compliance \
--create-reports
```
## Architecture Patterns
### Monorepo Structure
```
ruv-FANN/
├── packages/
│ ├── Codex-flow/
│ │ ├── src/
│ │ ├── .Codex/
│ │ └── package.json
│ ├── ruv-swarm/
│ │ ├── src/
│ │ ├── wasm/
│ │ └── package.json
│ └── shared/
│ ├── types/
│ ├── utils/
│ └── config/
├── tools/
│ ├── build/
│ ├── test/
│ └── deploy/
├── docs/
│ ├── architecture/
│ ├── integration/
│ └── examples/
└── .github/
├── workflows/
├── templates/
└── actions/
```
### Command Structure
```
.Codex/
├── commands/
│ ├── github/
│ │ ├── github-modes.md
│ │ ├── pr-manager.md
│ │ ├── issue-tracker.md
│ │ └── sync-coordinator.md
│ ├── sparc/
│ │ ├── sparc-modes.md
│ │ ├── coder.md
│ │ └── tester.md
│ └── swarm/
│ ├── coordination.md
│ └── orchestration.md
├── templates/
│ ├── issue.md
│ ├── pr.md
│ └── project.md
└── config.json
```
## Monitoring & Visualization
### Multi-Repo Dashboard
```bash
npx Codex-flow skill run github-multi-repo dashboard \
--port 3000 \
--metrics "agent-activity,task-progress,memory-usage" \
--real-time
```
### Dependency Graph
```bash
npx Codex-flow skill run github-multi-repo dep-graph \
--format mermaid \
--include-agents \
--show-data-flow
```
### Health Monitoring
```bash
npx Codex-flow skill run github-multi-repo health-check \
--repos "org/*" \
--check "connectivity,memory,agents" \
--alert-on-issues
```
## Best Practices
### 1. Repository Organization
- Clear repository roles and boundaries
- Consistent naming conventions
- Documented dependencies
- Shared configuration standards
### 2. Communication
- Use appropriate sync strategies
- Implement circuit breakers
- Monitor latency and failures
- Clear error propagation
### 3. Security
- Secure cross-repo authentication
- Encrypted communication channels
- Audit trail for all operations
- Principle of least privilege
### 4. Version Management
- Semantic versioning alignment
- Dependency compatibility validation
- Automated version bump coordination
### 5. Testing Integration
- Cross-package test validation
- Integration test automation
- Performance regression detection
## Performance Optimization
### Caching Strategy
```bash
npx Codex-flow skill run github-multi-repo cache-strategy \
--analyze-patterns \
--suggest-cache-layers \
--implement-invalidation
```
### Parallel Execution
```bash
npx Codex-flow skill run github-multi-repo parallel-optimize \
--analyze-dependencies \
--identify-parallelizable \
--execute-optimal
```
### Resource Pooling
```bash
npx Codex-flow skill run github-multi-repo resource-pool \
--share-agents \
--distribute-load \
--monitor-usage
```
## Troubleshooting
### Connectivity Issues
```bash
npx Codex-flow skill run github-multi-repo diagnose-connectivity \
--test-all-repos \
--check-permissions \
--verify-webhooks
```
### Memory Synchronization
```bash
npx Codex-flow skill run github-multi-repo debug-memory \
--check-consistency \
--identify-conflicts \
--repair-state
```
### Performance Bottlenecks
```bash
npx Codex-flow skill run github-multi-repo perf-analysis \
--profile-operations \
--identify-bottlenecks \
--suggest-optimizations
```
## Advanced Features
### 1. Distributed Task Queue
```bash
npx Codex-flow skill run github-multi-repo queue \
--backend redis \
--workers 10 \
--priority-routing \
--dead-letter-queue
```
### 2. Cross-Repo Testing
```bash
npx Codex-flow skill run github-multi-repo test \
--setup-test-env \
--link-services \
--run-e2e \
--tear-down
```
### 3. Monorepo Migration
```bash
npx Codex-flow skill run github-multi-repo to-monorepo \
--analyze-repos \
--suggest-structure \
--preserve-history \
--create-migration-prs
```
## Examples
### Full-Stack Application Update
```bash
npx Codex-flow skill run github-multi-repo fullstack-update \
--frontend "org/web-app" \
--backend "org/api-server" \
--database "org/db-migrations" \
--coordinate-deployment
```
### Cross-Team Collaboration
```bash
npx Codex-flow skill run github-multi-repo cross-team \
--teams "frontend,backend,devops" \
--task "implement-feature-x" \
--assign-by-expertise \
--track-progress
```
## Metrics and Reporting
### Sync Quality Metrics
- Package version alignment percentage
- Documentation consistency score
- Integration test success rate
- Synchronization completion time
### Architecture Health Metrics
- Repository structure consistency score
- Documentation coverage percentage
- Cross-repository integration success rate
- Template adoption and usage statistics
### Automated Reporting
- Weekly sync status reports
- Dependency drift detection
- Documentation divergence alerts
- Integration health monitoring
## Integration Points
### Related Skills
- `github-workflow` - GitHub workflow automation
- `github-pr` - Pull request management
- `sparc-architect` - Architecture design
- `sparc-optimizer` - Performance optimization
### Related Commands
- `/github sync-coordinator` - Cross-repo synchronization
- `/github release-manager` - Coordinated releases
- `/github repo-architect` - Repository optimization
- `/sparc architect` - Detailed architecture design
## Support and Resources
- Documentation: https://github.com/ruvnet/Codex-flow
- Issues: https://github.com/ruvnet/Codex-flow/issues
- Examples: `.Codex/examples/github-multi-repo/`
---
**Version:** 1.0.0
**Last Updated:** 2025-10-19
**Maintainer:** Codex Flow Team

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,446 @@
---
name: "ReasoningBank with AgentDB"
description: "Implement ReasoningBank adaptive learning with AgentDB's 150x faster vector database. Includes trajectory tracking, verdict judgment, memory distillation, and pattern recognition. Use when building self-learning agents, optimizing decision-making, or implementing experience replay systems."
---
# ReasoningBank with AgentDB
## What This Skill Does
Provides ReasoningBank adaptive learning patterns using AgentDB's high-performance backend (150x-12,500x faster). Enables agents to learn from experiences, judge outcomes, distill memories, and improve decision-making over time with 100% backward compatibility.
**Performance**: 150x faster pattern retrieval, 500x faster batch operations, <1ms memory access.
## Prerequisites
- Node.js 18+
- AgentDB v1.0.7+ (via agentic-flow)
- Understanding of reinforcement learning concepts (optional)
---
## Quick Start with CLI
### Initialize ReasoningBank Database
```bash
# Initialize AgentDB for ReasoningBank
npx agentdb@latest init ./.agentdb/reasoningbank.db --dimension 1536
# Start MCP server for Codex integration
npx agentdb@latest mcp
Codex mcp add agentdb npx agentdb@latest mcp
```
### Migrate from Legacy ReasoningBank
```bash
# Automatic migration with validation
npx agentdb@latest migrate --source .swarm/memory.db
# Verify migration
npx agentdb@latest stats ./.agentdb/reasoningbank.db
```
---
## Quick Start with API
```typescript
import { createAgentDBAdapter, computeEmbedding } from 'agentic-flow/reasoningbank';
// Initialize ReasoningBank with AgentDB
const rb = await createAgentDBAdapter({
dbPath: '.agentdb/reasoningbank.db',
enableLearning: true, // Enable learning plugins
enableReasoning: true, // Enable reasoning agents
cacheSize: 1000, // 1000 pattern cache
});
// Store successful experience
const query = "How to optimize database queries?";
const embedding = await computeEmbedding(query);
await rb.insertPattern({
id: '',
type: 'experience',
domain: 'database-optimization',
pattern_data: JSON.stringify({
embedding,
pattern: {
query,
approach: 'indexing + query optimization',
outcome: 'success',
metrics: { latency_reduction: 0.85 }
}
}),
confidence: 0.95,
usage_count: 1,
success_count: 1,
created_at: Date.now(),
last_used: Date.now(),
});
// Retrieve similar experiences with reasoning
const result = await rb.retrieveWithReasoning(embedding, {
domain: 'database-optimization',
k: 5,
useMMR: true, // Diverse results
synthesizeContext: true, // Rich context synthesis
});
console.log('Memories:', result.memories);
console.log('Context:', result.context);
console.log('Patterns:', result.patterns);
```
---
## Core ReasoningBank Concepts
### 1. Trajectory Tracking
Track agent execution paths and outcomes:
```typescript
// Record trajectory (sequence of actions)
const trajectory = {
task: 'optimize-api-endpoint',
steps: [
{ action: 'analyze-bottleneck', result: 'found N+1 query' },
{ action: 'add-eager-loading', result: 'reduced queries' },
{ action: 'add-caching', result: 'improved latency' }
],
outcome: 'success',
metrics: { latency_before: 2500, latency_after: 150 }
};
const embedding = await computeEmbedding(JSON.stringify(trajectory));
await rb.insertPattern({
id: '',
type: 'trajectory',
domain: 'api-optimization',
pattern_data: JSON.stringify({ embedding, pattern: trajectory }),
confidence: 0.9,
usage_count: 1,
success_count: 1,
created_at: Date.now(),
last_used: Date.now(),
});
```
### 2. Verdict Judgment
Judge whether a trajectory was successful:
```typescript
// Retrieve similar past trajectories
const similar = await rb.retrieveWithReasoning(queryEmbedding, {
domain: 'api-optimization',
k: 10,
});
// Judge based on similarity to successful patterns
const verdict = similar.memories.filter(m =>
m.pattern.outcome === 'success' &&
m.similarity > 0.8
).length > 5 ? 'likely_success' : 'needs_review';
console.log('Verdict:', verdict);
console.log('Confidence:', similar.memories[0]?.similarity || 0);
```
### 3. Memory Distillation
Consolidate similar experiences into patterns:
```typescript
// Get all experiences in domain
const experiences = await rb.retrieveWithReasoning(embedding, {
domain: 'api-optimization',
k: 100,
optimizeMemory: true, // Automatic consolidation
});
// Distill into high-level pattern
const distilledPattern = {
domain: 'api-optimization',
pattern: 'For N+1 queries: add eager loading, then cache',
success_rate: 0.92,
sample_size: experiences.memories.length,
confidence: 0.95
};
await rb.insertPattern({
id: '',
type: 'distilled-pattern',
domain: 'api-optimization',
pattern_data: JSON.stringify({
embedding: await computeEmbedding(JSON.stringify(distilledPattern)),
pattern: distilledPattern
}),
confidence: 0.95,
usage_count: 0,
success_count: 0,
created_at: Date.now(),
last_used: Date.now(),
});
```
---
## Integration with Reasoning Agents
AgentDB provides 4 reasoning modules that enhance ReasoningBank:
### 1. PatternMatcher
Find similar successful patterns:
```typescript
const result = await rb.retrieveWithReasoning(queryEmbedding, {
domain: 'problem-solving',
k: 10,
useMMR: true, // Maximal Marginal Relevance for diversity
});
// PatternMatcher returns diverse, relevant memories
result.memories.forEach(mem => {
console.log(`Pattern: ${mem.pattern.approach}`);
console.log(`Similarity: ${mem.similarity}`);
console.log(`Success Rate: ${mem.success_count / mem.usage_count}`);
});
```
### 2. ContextSynthesizer
Generate rich context from multiple memories:
```typescript
const result = await rb.retrieveWithReasoning(queryEmbedding, {
domain: 'code-optimization',
synthesizeContext: true, // Enable context synthesis
k: 5,
});
// ContextSynthesizer creates coherent narrative
console.log('Synthesized Context:', result.context);
// "Based on 5 similar optimizations, the most effective approach
// involves profiling, identifying bottlenecks, and applying targeted
// improvements. Success rate: 87%"
```
### 3. MemoryOptimizer
Automatically consolidate and prune:
```typescript
const result = await rb.retrieveWithReasoning(queryEmbedding, {
domain: 'testing',
optimizeMemory: true, // Enable automatic optimization
});
// MemoryOptimizer consolidates similar patterns and prunes low-quality
console.log('Optimizations:', result.optimizations);
// { consolidated: 15, pruned: 3, improved_quality: 0.12 }
```
### 4. ExperienceCurator
Filter by quality and relevance:
```typescript
const result = await rb.retrieveWithReasoning(queryEmbedding, {
domain: 'debugging',
k: 20,
minConfidence: 0.8, // Only high-confidence experiences
});
// ExperienceCurator returns only quality experiences
result.memories.forEach(mem => {
console.log(`Confidence: ${mem.confidence}`);
console.log(`Success Rate: ${mem.success_count / mem.usage_count}`);
});
```
---
## Legacy API Compatibility
AgentDB maintains 100% backward compatibility with legacy ReasoningBank:
```typescript
import {
retrieveMemories,
judgeTrajectory,
distillMemories
} from 'agentic-flow/reasoningbank';
// Legacy API works unchanged (uses AgentDB backend automatically)
const memories = await retrieveMemories(query, {
domain: 'code-generation',
agent: 'coder'
});
const verdict = await judgeTrajectory(trajectory, query);
const newMemories = await distillMemories(
trajectory,
verdict,
query,
{ domain: 'code-generation' }
);
```
---
## Performance Characteristics
- **Pattern Search**: 150x faster (100µs vs 15ms)
- **Memory Retrieval**: <1ms (with cache)
- **Batch Insert**: 500x faster (2ms vs 1s for 100 patterns)
- **Trajectory Judgment**: <5ms (including retrieval + analysis)
- **Memory Distillation**: <50ms (consolidate 100 patterns)
---
## Advanced Patterns
### Hierarchical Memory
Organize memories by abstraction level:
```typescript
// Low-level: Specific implementation
await rb.insertPattern({
type: 'concrete',
domain: 'debugging/null-pointer',
pattern_data: JSON.stringify({
embedding,
pattern: { bug: 'NPE in UserService.getUser()', fix: 'Add null check' }
}),
confidence: 0.9,
// ...
});
// Mid-level: Pattern across similar cases
await rb.insertPattern({
type: 'pattern',
domain: 'debugging',
pattern_data: JSON.stringify({
embedding,
pattern: { category: 'null-pointer', approach: 'defensive-checks' }
}),
confidence: 0.85,
// ...
});
// High-level: General principle
await rb.insertPattern({
type: 'principle',
domain: 'software-engineering',
pattern_data: JSON.stringify({
embedding,
pattern: { principle: 'fail-fast with clear errors' }
}),
confidence: 0.95,
// ...
});
```
### Multi-Domain Learning
Transfer learning across domains:
```typescript
// Learn from backend optimization
const backendExperience = await rb.retrieveWithReasoning(embedding, {
domain: 'backend-optimization',
k: 10,
});
// Apply to frontend optimization
const transferredKnowledge = backendExperience.memories.map(mem => ({
...mem,
domain: 'frontend-optimization',
adapted: true,
}));
```
---
## CLI Operations
### Database Management
```bash
# Export trajectories and patterns
npx agentdb@latest export ./.agentdb/reasoningbank.db ./backup.json
# Import experiences
npx agentdb@latest import ./experiences.json
# Get statistics
npx agentdb@latest stats ./.agentdb/reasoningbank.db
# Shows: total patterns, domains, confidence distribution
```
### Migration
```bash
# Migrate from legacy ReasoningBank
npx agentdb@latest migrate --source .swarm/memory.db --target .agentdb/reasoningbank.db
# Validate migration
npx agentdb@latest stats .agentdb/reasoningbank.db
```
---
## Troubleshooting
### Issue: Migration fails
```bash
# Check source database exists
ls -la .swarm/memory.db
# Run with verbose logging
DEBUG=agentdb:* npx agentdb@latest migrate --source .swarm/memory.db
```
### Issue: Low confidence scores
```typescript
// Enable context synthesis for better quality
const result = await rb.retrieveWithReasoning(embedding, {
synthesizeContext: true,
useMMR: true,
k: 10,
});
```
### Issue: Memory growing too large
```typescript
// Enable automatic optimization
const result = await rb.retrieveWithReasoning(embedding, {
optimizeMemory: true, // Consolidates similar patterns
});
// Or manually optimize
await rb.optimize();
```
---
## Learn More
- **AgentDB Integration**: node_modules/agentic-flow/docs/AGENTDB_INTEGRATION.md
- **GitHub**: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb
- **MCP Integration**: `npx agentdb@latest mcp`
- **Website**: https://agentdb.ruv.io
---
**Category**: Machine Learning / Reinforcement Learning
**Difficulty**: Intermediate
**Estimated Time**: 20-30 minutes

View File

@ -0,0 +1,201 @@
---
name: "ReasoningBank Intelligence"
description: "Implement adaptive learning with ReasoningBank for pattern recognition, strategy optimization, and continuous improvement. Use when building self-learning agents, optimizing workflows, or implementing meta-cognitive systems."
---
# ReasoningBank Intelligence
## What This Skill Does
Implements ReasoningBank's adaptive learning system for AI agents to learn from experience, recognize patterns, and optimize strategies over time. Enables meta-cognitive capabilities and continuous improvement.
## Prerequisites
- agentic-flow v3.0.0-alpha.1+
- AgentDB v3.0.0-alpha.10+ (for persistence)
- Node.js 18+
## Quick Start
```typescript
import { ReasoningBank } from 'agentic-flow/reasoningbank';
// Initialize ReasoningBank
const rb = new ReasoningBank({
persist: true,
learningRate: 0.1,
adapter: 'agentdb' // Use AgentDB for storage
});
// Record task outcome
await rb.recordExperience({
task: 'code_review',
approach: 'static_analysis_first',
outcome: {
success: true,
metrics: {
bugs_found: 5,
time_taken: 120,
false_positives: 1
}
},
context: {
language: 'typescript',
complexity: 'medium'
}
});
// Get optimal strategy
const strategy = await rb.recommendStrategy('code_review', {
language: 'typescript',
complexity: 'high'
});
```
## Core Features
### 1. Pattern Recognition
```typescript
// Learn patterns from data
await rb.learnPattern({
pattern: 'api_errors_increase_after_deploy',
triggers: ['deployment', 'traffic_spike'],
actions: ['rollback', 'scale_up'],
confidence: 0.85
});
// Match patterns
const matches = await rb.matchPatterns(currentSituation);
```
### 2. Strategy Optimization
```typescript
// Compare strategies
const comparison = await rb.compareStrategies('bug_fixing', [
'tdd_approach',
'debug_first',
'reproduce_then_fix'
]);
// Get best strategy
const best = comparison.strategies[0];
console.log(`Best: ${best.name} (score: ${best.score})`);
```
### 3. Continuous Learning
```typescript
// Enable auto-learning from all tasks
await rb.enableAutoLearning({
threshold: 0.7, // Only learn from high-confidence outcomes
updateFrequency: 100 // Update models every 100 experiences
});
```
## Advanced Usage
### Meta-Learning
```typescript
// Learn about learning
await rb.metaLearn({
observation: 'parallel_execution_faster_for_independent_tasks',
confidence: 0.95,
applicability: {
task_types: ['batch_processing', 'data_transformation'],
conditions: ['tasks_independent', 'io_bound']
}
});
```
### Transfer Learning
```typescript
// Apply knowledge from one domain to another
await rb.transferKnowledge({
from: 'code_review_javascript',
to: 'code_review_typescript',
similarity: 0.8
});
```
### Adaptive Agents
```typescript
// Create self-improving agent
class AdaptiveAgent {
async execute(task: Task) {
// Get optimal strategy
const strategy = await rb.recommendStrategy(task.type, task.context);
// Execute with strategy
const result = await this.executeWithStrategy(task, strategy);
// Learn from outcome
await rb.recordExperience({
task: task.type,
approach: strategy.name,
outcome: result,
context: task.context
});
return result;
}
}
```
## Integration with AgentDB
```typescript
// Persist ReasoningBank data
await rb.configure({
storage: {
type: 'agentdb',
options: {
database: './reasoning-bank.db',
enableVectorSearch: true
}
}
});
// Query learned patterns
const patterns = await rb.query({
category: 'optimization',
minConfidence: 0.8,
timeRange: { last: '30d' }
});
```
## Performance Metrics
```typescript
// Track learning effectiveness
const metrics = await rb.getMetrics();
console.log(`
Total Experiences: ${metrics.totalExperiences}
Patterns Learned: ${metrics.patternsLearned}
Strategy Success Rate: ${metrics.strategySuccessRate}
Improvement Over Time: ${metrics.improvement}
`);
```
## Best Practices
1. **Record consistently**: Log all task outcomes, not just successes
2. **Provide context**: Rich context improves pattern matching
3. **Set thresholds**: Filter low-confidence learnings
4. **Review periodically**: Audit learned patterns for quality
5. **Use vector search**: Enable semantic pattern matching
## Troubleshooting
### Issue: Poor recommendations
**Solution**: Ensure sufficient training data (100+ experiences per task type)
### Issue: Slow pattern matching
**Solution**: Enable vector indexing in AgentDB
### Issue: Memory growing large
**Solution**: Set TTL for old experiences or enable pruning
## Learn More
- ReasoningBank Guide: agentic-flow/src/reasoningbank/README.md
- AgentDB Integration: packages/agentdb/docs/reasoningbank.md
- Pattern Learning: docs/reasoning/patterns.md

View File

@ -0,0 +1,910 @@
---
name: "Skill Builder"
description: "Create new Codex Skills with proper YAML frontmatter, progressive disclosure structure, and complete directory organization. Use when you need to build custom skills for specific workflows, generate skill templates, or understand the Codex Skills specification."
---
# Skill Builder
## What This Skill Does
Creates production-ready Codex Skills with proper YAML frontmatter, progressive disclosure architecture, and complete file/folder structure. This skill guides you through building skills that Codex can autonomously discover and use across all surfaces (Codex.ai, Codex, SDK, API).
## Prerequisites
- Codex 2.0+ or Codex.ai with Skills support
- Basic understanding of Markdown and YAML
- Text editor or IDE
## Quick Start
### Creating Your First Skill
```bash
# 1. Create skill directory (MUST be at top level, NOT in subdirectories!)
mkdir -p ~/.Codex/skills/my-first-skill
# 2. Create SKILL.md with proper format
cat > ~/.Codex/skills/my-first-skill/SKILL.md << 'EOF'
---
name: "My First Skill"
description: "Brief description of what this skill does and when Codex should use it. Maximum 1024 characters."
---
# My First Skill
## What This Skill Does
[Your instructions here]
## Quick Start
[Basic usage]
EOF
# 3. Verify skill is detected
# Restart Codex or refresh Codex.ai
```
---
## Complete Specification
### 📋 YAML Frontmatter (REQUIRED)
Every SKILL.md **must** start with YAML frontmatter containing exactly two required fields:
```yaml
---
name: "Skill Name" # REQUIRED: Max 64 chars
description: "What this skill does # REQUIRED: Max 1024 chars
and when Codex should use it." # Include BOTH what & when
---
```
#### Field Requirements
**`name`** (REQUIRED):
- **Type**: String
- **Max Length**: 64 characters
- **Format**: Human-friendly display name
- **Usage**: Shown in skill lists, UI, and loaded into Codex's system prompt
- **Best Practice**: Use Title Case, be concise and descriptive
- **Examples**:
- ✅ "API Documentation Generator"
- ✅ "React Component Builder"
- ✅ "Database Schema Designer"
- ❌ "skill-1" (not descriptive)
- ❌ "This is a very long skill name that exceeds sixty-four characters" (too long)
**`description`** (REQUIRED):
- **Type**: String
- **Max Length**: 1024 characters
- **Format**: Plain text or minimal markdown
- **Content**: MUST include:
1. **What** the skill does (functionality)
2. **When** Codex should invoke it (trigger conditions)
- **Usage**: Loaded into Codex's system prompt for autonomous matching
- **Best Practice**: Front-load key trigger words, be specific about use cases
- **Examples**:
- ✅ "Generate OpenAPI 3.0 documentation from Express.js routes. Use when creating API docs, documenting endpoints, or building API specifications."
- ✅ "Create React functional components with TypeScript, hooks, and tests. Use when scaffolding new components or converting class components."
- ❌ "A comprehensive guide to API documentation" (no "when" clause)
- ❌ "Documentation tool" (too vague)
#### YAML Formatting Rules
```yaml
---
# ✅ CORRECT: Simple string
name: "API Builder"
description: "Creates REST APIs with Express and TypeScript."
# ✅ CORRECT: Multi-line description
name: "Full-Stack Generator"
description: "Generates full-stack applications with React frontend and Node.js backend. Use when starting new projects or scaffolding applications."
# ✅ CORRECT: Special characters quoted
name: "JSON:API Builder"
description: "Creates JSON:API compliant endpoints: pagination, filtering, relationships."
# ❌ WRONG: Missing quotes with special chars
name: API:Builder # YAML parse error!
# ❌ WRONG: Extra fields (ignored but discouraged)
name: "My Skill"
description: "My description"
version: "1.0.0" # NOT part of spec
author: "Me" # NOT part of spec
tags: ["dev", "api"] # NOT part of spec
---
```
**Critical**: Only `name` and `description` are used by Codex. Additional fields are ignored.
---
### 📂 Directory Structure
#### Minimal Skill (Required)
```
~/.Codex/skills/ # Personal skills location
└── my-skill/ # Skill directory (MUST be at top level!)
└── SKILL.md # REQUIRED: Main skill file
```
**IMPORTANT**: Skills MUST be directly under `~/.Codex/skills/[skill-name]/`.
Codex does NOT support nested subdirectories or namespaces!
#### Full-Featured Skill (Recommended)
```
~/.Codex/skills/
└── my-skill/ # Top-level skill directory
├── SKILL.md # REQUIRED: Main skill file
├── README.md # Optional: Human-readable docs
├── scripts/ # Optional: Executable scripts
│ ├── setup.sh
│ ├── validate.js
│ └── deploy.py
├── resources/ # Optional: Supporting files
│ ├── templates/
│ │ ├── api-template.js
│ │ └── component.tsx
│ ├── examples/
│ │ └── sample-output.json
│ └── schemas/
│ └── config-schema.json
└── docs/ # Optional: Additional documentation
├── ADVANCED.md
├── TROUBLESHOOTING.md
└── API_REFERENCE.md
```
#### Skills Locations
**Personal Skills** (available across all projects):
```
~/.Codex/skills/
└── [your-skills]/
```
- **Path**: `~/.Codex/skills/` or `$HOME/.Codex/skills/`
- **Scope**: Available in all projects for this user
- **Version Control**: NOT committed to git (outside repo)
- **Use Case**: Personal productivity tools, custom workflows
**Project Skills** (team-shared, version controlled):
```
<project-root>/.Codex/skills/
└── [team-skills]/
```
- **Path**: `.Codex/skills/` in project root
- **Scope**: Available only in this project
- **Version Control**: SHOULD be committed to git
- **Use Case**: Team workflows, project-specific tools, shared knowledge
---
### 🎯 Progressive Disclosure Architecture
Codex uses a **3-level progressive disclosure system** to scale to 100+ skills without context penalty:
#### Level 1: Metadata (Name + Description)
**Loaded**: At Codex startup, always
**Size**: ~200 chars per skill
**Purpose**: Enable autonomous skill matching
**Context**: Loaded into system prompt for ALL skills
```yaml
---
name: "API Builder" # 11 chars
description: "Creates REST APIs..." # ~50 chars
---
# Total: ~61 chars per skill
# 100 skills = ~6KB context (minimal!)
```
#### Level 2: SKILL.md Body
**Loaded**: When skill is triggered/matched
**Size**: ~1-10KB typically
**Purpose**: Main instructions and procedures
**Context**: Only loaded for ACTIVE skills
```markdown
# API Builder
## What This Skill Does
[Main instructions - loaded only when skill is active]
## Quick Start
[Basic procedures]
## Step-by-Step Guide
[Detailed instructions]
```
#### Level 3+: Referenced Files
**Loaded**: On-demand as Codex navigates
**Size**: Variable (KB to MB)
**Purpose**: Deep reference, examples, schemas
**Context**: Loaded only when Codex accesses specific files
```markdown
# In SKILL.md
See [Advanced Configuration](docs/ADVANCED.md) for complex scenarios.
See [API Reference](docs/API_REFERENCE.md) for complete documentation.
Use template: `resources/templates/api-template.js`
# Codex will load these files ONLY if needed
```
**Benefit**: Install 100+ skills with ~6KB context. Only active skill content (1-10KB) enters context.
---
### 📝 SKILL.md Content Structure
#### Recommended 4-Level Structure
```markdown
---
name: "Your Skill Name"
description: "What it does and when to use it"
---
# Your Skill Name
## Level 1: Overview (Always Read First)
Brief 2-3 sentence description of the skill.
## Prerequisites
- Requirement 1
- Requirement 2
## What This Skill Does
1. Primary function
2. Secondary function
3. Key benefit
---
## Level 2: Quick Start (For Fast Onboarding)
### Basic Usage
```bash
# Simplest use case
command --option value
```
### Common Scenarios
1. **Scenario 1**: How to...
2. **Scenario 2**: How to...
---
## Level 3: Detailed Instructions (For Deep Work)
### Step-by-Step Guide
#### Step 1: Initial Setup
```bash
# Commands
```
Expected output:
```
Success message
```
#### Step 2: Configuration
- Configuration option 1
- Configuration option 2
#### Step 3: Execution
- Run the main command
- Verify results
### Advanced Options
#### Option 1: Custom Configuration
```bash
# Advanced usage
```
#### Option 2: Integration
```bash
# Integration steps
```
---
## Level 4: Reference (Rarely Needed)
### Troubleshooting
#### Issue: Common Problem
**Symptoms**: What you see
**Cause**: Why it happens
**Solution**: How to fix
```bash
# Fix command
```
#### Issue: Another Problem
**Solution**: Steps to resolve
### Complete API Reference
See [API_REFERENCE.md](docs/API_REFERENCE.md)
### Examples
See [examples/](resources/examples/)
### Related Skills
- [Related Skill 1](#)
- [Related Skill 2](#)
### Resources
- [External Link 1](https://example.com)
- [Documentation](https://docs.example.com)
```
---
### 🎨 Content Best Practices
#### Writing Effective Descriptions
**Front-Load Keywords**:
```yaml
# ✅ GOOD: Keywords first
description: "Generate TypeScript interfaces from JSON schema. Use when converting schemas, creating types, or building API clients."
# ❌ BAD: Keywords buried
description: "This skill helps developers who need to work with JSON schemas by providing a way to generate TypeScript interfaces."
```
**Include Trigger Conditions**:
```yaml
# ✅ GOOD: Clear "when" clause
description: "Debug React performance issues using Chrome DevTools. Use when components re-render unnecessarily, investigating slow updates, or optimizing bundle size."
# ❌ BAD: No trigger conditions
description: "Helps with React performance debugging."
```
**Be Specific**:
```yaml
# ✅ GOOD: Specific technologies
description: "Create Express.js REST endpoints with Joi validation, Swagger docs, and Jest tests. Use when building new APIs or adding endpoints."
# ❌ BAD: Too generic
description: "Build API endpoints with proper validation and testing."
```
#### Progressive Disclosure Writing
**Keep Level 1 Brief** (Overview):
```markdown
## What This Skill Does
Creates production-ready React components with TypeScript, hooks, and tests in 3 steps.
```
**Level 2 for Common Paths** (Quick Start):
```markdown
## Quick Start
```bash
# Most common use case (80% of users)
generate-component MyComponent
```
```
**Level 3 for Details** (Step-by-Step):
```markdown
## Step-by-Step Guide
### Creating a Basic Component
1. Run generator
2. Choose template
3. Customize options
[Detailed explanations]
```
**Level 4 for Edge Cases** (Reference):
```markdown
## Advanced Configuration
For complex scenarios like HOCs, render props, or custom hooks, see [ADVANCED.md](docs/ADVANCED.md).
```
---
### 🛠️ Adding Scripts and Resources
#### Scripts Directory
**Purpose**: Executable scripts that Codex can run
**Location**: `scripts/` in skill directory
**Usage**: Referenced from SKILL.md
Example:
```bash
# In skill directory
scripts/
├── setup.sh # Initialization script
├── validate.js # Validation logic
├── generate.py # Code generation
└── deploy.sh # Deployment script
```
Reference from SKILL.md:
```markdown
## Setup
Run the setup script:
```bash
./scripts/setup.sh
```
## Validation
Validate your configuration:
```bash
node scripts/validate.js config.json
```
```
#### Resources Directory
**Purpose**: Templates, examples, schemas, static files
**Location**: `resources/` in skill directory
**Usage**: Referenced or copied by scripts
Example:
```bash
resources/
├── templates/
│ ├── component.tsx.template
│ ├── test.spec.ts.template
│ └── story.stories.tsx.template
├── examples/
│ ├── basic-example/
│ ├── advanced-example/
│ └── integration-example/
└── schemas/
├── config.schema.json
└── output.schema.json
```
Reference from SKILL.md:
```markdown
## Templates
Use the component template:
```bash
cp resources/templates/component.tsx.template src/components/MyComponent.tsx
```
## Examples
See working examples in `resources/examples/`:
- `basic-example/` - Simple component
- `advanced-example/` - With hooks and context
```
---
### 🔗 File References and Navigation
Codex can navigate to referenced files automatically. Use these patterns:
#### Markdown Links
```markdown
See [Advanced Configuration](docs/ADVANCED.md) for complex scenarios.
See [Troubleshooting Guide](docs/TROUBLESHOOTING.md) if you encounter errors.
```
#### Relative File Paths
```markdown
Use the template located at `resources/templates/api-template.js`
See examples in `resources/examples/basic-usage/`
```
#### Inline File Content
```markdown
## Example Configuration
See `resources/examples/config.json`:
```json
{
"option": "value"
}
```
```
**Best Practice**: Keep SKILL.md lean (~2-5KB). Move lengthy content to separate files and reference them. Codex will load only what's needed.
---
### ✅ Validation Checklist
Before publishing a skill, verify:
**YAML Frontmatter**:
- [ ] Starts with `---`
- [ ] Contains `name` field (max 64 chars)
- [ ] Contains `description` field (max 1024 chars)
- [ ] Description includes "what" and "when"
- [ ] Ends with `---`
- [ ] No YAML syntax errors
**File Structure**:
- [ ] SKILL.md exists in skill directory
- [ ] Directory is DIRECTLY in `~/.Codex/skills/[skill-name]/` or `.Codex/skills/[skill-name]/`
- [ ] Uses clear, descriptive directory name
- [ ] **NO nested subdirectories** (Codex requires top-level structure)
**Content Quality**:
- [ ] Level 1 (Overview) is brief and clear
- [ ] Level 2 (Quick Start) shows common use case
- [ ] Level 3 (Details) provides step-by-step guide
- [ ] Level 4 (Reference) links to advanced content
- [ ] Examples are concrete and runnable
- [ ] Troubleshooting section addresses common issues
**Progressive Disclosure**:
- [ ] Core instructions in SKILL.md (~2-5KB)
- [ ] Advanced content in separate docs/
- [ ] Large resources in resources/ directory
- [ ] Clear navigation between levels
**Testing**:
- [ ] Skill appears in Codex's skill list
- [ ] Description triggers on relevant queries
- [ ] Instructions are clear and actionable
- [ ] Scripts execute successfully (if included)
- [ ] Examples work as documented
---
## Skill Builder Templates
### Template 1: Basic Skill (Minimal)
```markdown
---
name: "My Basic Skill"
description: "One sentence what. One sentence when to use."
---
# My Basic Skill
## What This Skill Does
[2-3 sentences describing functionality]
## Quick Start
```bash
# Single command to get started
```
## Step-by-Step Guide
### Step 1: Setup
[Instructions]
### Step 2: Usage
[Instructions]
### Step 3: Verify
[Instructions]
## Troubleshooting
- **Issue**: Problem description
- **Solution**: Fix description
```
### Template 2: Intermediate Skill (With Scripts)
```markdown
---
name: "My Intermediate Skill"
description: "Detailed what with key features. When to use with specific triggers: scaffolding, generating, building."
---
# My Intermediate Skill
## Prerequisites
- Requirement 1
- Requirement 2
## What This Skill Does
1. Primary function
2. Secondary function
3. Integration capability
## Quick Start
```bash
./scripts/setup.sh
./scripts/generate.sh my-project
```
## Configuration
Edit `config.json`:
```json
{
"option1": "value1",
"option2": "value2"
}
```
## Step-by-Step Guide
### Basic Usage
[Steps for 80% use case]
### Advanced Usage
[Steps for complex scenarios]
## Available Scripts
- `scripts/setup.sh` - Initial setup
- `scripts/generate.sh` - Code generation
- `scripts/validate.sh` - Validation
## Resources
- Templates: `resources/templates/`
- Examples: `resources/examples/`
## Troubleshooting
[Common issues and solutions]
```
### Template 3: Advanced Skill (Full-Featured)
```markdown
---
name: "My Advanced Skill"
description: "Comprehensive what with all features and integrations. Use when [trigger 1], [trigger 2], or [trigger 3]. Supports [technology stack]."
---
# My Advanced Skill
## Overview
[Brief 2-3 sentence description]
## Prerequisites
- Technology 1 (version X+)
- Technology 2 (version Y+)
- API keys or credentials
## What This Skill Does
1. **Core Feature**: Description
2. **Integration**: Description
3. **Automation**: Description
---
## Quick Start (60 seconds)
### Installation
```bash
./scripts/install.sh
```
### First Use
```bash
./scripts/quickstart.sh
```
Expected output:
```
✓ Setup complete
✓ Configuration validated
→ Ready to use
```
---
## Configuration
### Basic Configuration
Edit `config.json`:
```json
{
"mode": "production",
"features": ["feature1", "feature2"]
}
```
### Advanced Configuration
See [Configuration Guide](docs/CONFIGURATION.md)
---
## Step-by-Step Guide
### 1. Initial Setup
[Detailed steps]
### 2. Core Workflow
[Main procedures]
### 3. Integration
[Integration steps]
---
## Advanced Features
### Feature 1: Custom Templates
```bash
./scripts/generate.sh --template custom
```
### Feature 2: Batch Processing
```bash
./scripts/batch.sh --input data.json
```
### Feature 3: CI/CD Integration
See [CI/CD Guide](docs/CICD.md)
---
## Scripts Reference
| Script | Purpose | Usage |
|--------|---------|-------|
| `install.sh` | Install dependencies | `./scripts/install.sh` |
| `generate.sh` | Generate code | `./scripts/generate.sh [name]` |
| `validate.sh` | Validate output | `./scripts/validate.sh` |
| `deploy.sh` | Deploy to environment | `./scripts/deploy.sh [env]` |
---
## Resources
### Templates
- `resources/templates/basic.template` - Basic template
- `resources/templates/advanced.template` - Advanced template
### Examples
- `resources/examples/basic/` - Simple example
- `resources/examples/advanced/` - Complex example
- `resources/examples/integration/` - Integration example
### Schemas
- `resources/schemas/config.schema.json` - Configuration schema
- `resources/schemas/output.schema.json` - Output validation
---
## Troubleshooting
### Issue: Installation Failed
**Symptoms**: Error during `install.sh`
**Cause**: Missing dependencies
**Solution**:
```bash
# Install prerequisites
npm install -g required-package
./scripts/install.sh --force
```
### Issue: Validation Errors
**Symptoms**: Validation script fails
**Solution**: See [Troubleshooting Guide](docs/TROUBLESHOOTING.md)
---
## API Reference
Complete API documentation: [API_REFERENCE.md](docs/API_REFERENCE.md)
## Related Skills
- [Related Skill 1](../related-skill-1/)
- [Related Skill 2](../related-skill-2/)
## Resources
- [Official Documentation](https://example.com/docs)
- [GitHub Repository](https://github.com/example/repo)
- [Community Forum](https://forum.example.com)
---
**Created**: 2025-10-19
**Category**: Advanced
**Difficulty**: Intermediate
**Estimated Time**: 15-30 minutes
```
---
## Examples from the Wild
### Example 1: Simple Documentation Skill
```markdown
---
name: "README Generator"
description: "Generate comprehensive README.md files for GitHub repositories. Use when starting new projects, documenting code, or improving existing READMEs."
---
# README Generator
## What This Skill Does
Creates well-structured README.md files with badges, installation, usage, and contribution sections.
## Quick Start
```bash
# Answer a few questions
./scripts/generate-readme.sh
# README.md created with:
# - Project title and description
# - Installation instructions
# - Usage examples
# - Contribution guidelines
```
## Customization
Edit sections in `resources/templates/sections/` before generating.
```
### Example 2: Code Generation Skill
```markdown
---
name: "React Component Generator"
description: "Generate React functional components with TypeScript, hooks, tests, and Storybook stories. Use when creating new components, scaffolding UI, or following component architecture patterns."
---
# React Component Generator
## Prerequisites
- Node.js 18+
- React 18+
- TypeScript 5+
## Quick Start
```bash
./scripts/generate-component.sh MyComponent
# Creates:
# - src/components/MyComponent/MyComponent.tsx
# - src/components/MyComponent/MyComponent.test.tsx
# - src/components/MyComponent/MyComponent.stories.tsx
# - src/components/MyComponent/index.ts
```
## Step-by-Step Guide
### 1. Run Generator
```bash
./scripts/generate-component.sh ComponentName
```
### 2. Choose Template
- Basic: Simple functional component
- With State: useState hooks
- With Context: useContext integration
- With API: Data fetching component
### 3. Customize
Edit generated files in `src/components/ComponentName/`
## Templates
See `resources/templates/` for available component templates.
```
---
## Learn More
### Official Resources
- [Anthropic Agent Skills Documentation](https://docs.Codex.com/en/docs/agents-and-tools/agent-skills)
- [GitHub Skills Repository](https://github.com/anthropics/skills)
- [Codex Documentation](https://docs.Codex.com/en/docs/Codex)
### Community
- [Skills Marketplace](https://github.com/anthropics/skills) - Browse community skills
- [Anthropic Discord](https://discord.gg/anthropic) - Get help from community
### Advanced Topics
- Multi-file skills with complex navigation
- Skills that spawn other skills
- Integration with MCP tools
- Dynamic skill generation
---
**Created**: 2025-10-19
**Version**: 1.0.0
**Maintained By**: agentic-flow team
**License**: MIT

View File

@ -0,0 +1,144 @@
---
name: soft-delete-relogin-consistency
description: |
Fix for missing auth/identity records after account deletion + device re-login.
Use when: (1) User deletes account but device records are intentionally kept
(e.g., to prevent trial abuse), (2) Re-login via device succeeds but user
appears to have wrong identity type, (3) Frontend shows incorrect UI because
auth_methods or similar identity records are empty/wrong after re-login,
(4) Soft-deleted records cause stale cache entries that misrepresent user state.
Covers GORM soft-delete, device-based auth, cache invalidation after re-creation.
author: Codex
version: 1.0.0
date: 2026-03-11
---
# Soft-Delete + Re-Login Auth Consistency
## Problem
When a system uses soft-delete for auth/identity records during account deletion but
intentionally keeps primary records (like device records) for abuse prevention, re-login
flows may succeed at the "find existing record" step but fail to re-create the
soft-deleted identity records. This causes the user to exist in an inconsistent state
where they're authenticated but missing critical identity metadata.
## Context / Trigger Conditions
- Account deletion (注销) soft-deletes `auth_methods` (or equivalent identity records)
- Device/hardware records are intentionally kept to prevent trial reward abuse
- Device-based re-login finds existing device record -> reuses old user_id
- But the "device found" code path skips identity record creation (only the
"device not found" registration path creates them)
- Result: User is logged in but `auth_methods` is empty or missing the expected type
- Frontend UI breaks because it relies on `auth_methods[0].auth_type` to determine
login mode and show/hide UI elements
### Symptoms
- Buttons or UI elements that should be hidden for device-only users appear after
account deletion + re-login
- API returns user info with empty or unexpected `auth_methods` array
- `isDeviceLogin()` or similar identity checks return wrong results
- Cache returns stale user data even after re-login
## Solution
### Step 1: Identify the re-login code path
Find the "device found" branch in the login logic. This is the code path that runs
when a device record already exists (as opposed to the registration path).
### Step 2: Add identity record existence check
After finding the user via device record, check if the expected identity record exists:
```go
// After finding user via existing device record
hasDeviceAuth := false
for _, am := range userInfo.AuthMethods {
if am.AuthType == "device" && am.AuthIdentifier == req.Identifier {
hasDeviceAuth = true
break
}
}
if !hasDeviceAuth {
// Re-create the soft-deleted auth record
authMethod := &user.AuthMethods{
UserId: userInfo.Id,
AuthType: "device",
AuthIdentifier: req.Identifier,
Verified: true,
}
if createErr := db.Create(authMethod).Error; createErr != nil {
log.Error("re-create auth method failed", err)
} else {
// CRITICAL: Clear user cache so subsequent reads return updated data
_ = userModel.ClearUserCache(ctx, userInfo)
}
}
```
### Step 3: Ensure cache invalidation
After re-creating the identity record, clear the user cache. This is critical because
cached user data (with `Preload("AuthMethods")`) will still show the old empty state
until the cache is invalidated.
### Step 4: Verify GORM soft-delete behavior
GORM's soft-delete (`deleted_at IS NULL` filter) means:
- `Preload("AuthMethods")` will NOT return soft-deleted records
- `db.Create()` will create a NEW record (not undelete the old one)
- The old soft-deleted record remains in the database (harmless)
## Verification
1. Delete account (注销)
2. Re-login via device
3. Call user info API - verify `auth_methods` contains the device type
4. Check frontend UI - verify device-specific UI state is correct
## Example
**Before fix:**
```
1. User has auth_methods: [device_A, email_A]
2. User deletes account -> auth_methods all soft-deleted
3. Device record kept (abuse prevention)
4. User re-logins via same device
5. FindOneDeviceByIdentifier finds device -> reuses user_id
6. FindOne returns user with AuthMethods=[] (soft-deleted, filtered out)
7. Frontend: isDeviceLogin() = false (no auth_methods) -> shows wrong buttons
```
**After fix:**
```
1-4. Same as above
5. FindOneDeviceByIdentifier finds device -> reuses user_id
6. FindOne returns user with AuthMethods=[]
7. NEW: Detects missing device auth_method, re-creates it, clears cache
8. Frontend: isDeviceLogin() = true -> correct UI
```
## Notes
- This pattern applies broadly to any system where:
- Account deletion removes identity records but keeps usage records
- Re-login can succeed via the usage records
- UI/business logic depends on the identity records existing
- The "don't delete device records" design is intentional for preventing abuse
(e.g., users repeatedly deleting and re-creating accounts to get trial rewards)
- Cache invalidation is the most commonly missed step - without it, the fix appears
to not work because cached data is served until TTL expires
- Consider whether `Unscoped()` (GORM) should be used to also query soft-deleted
records, or whether re-creation is the better approach (usually re-creation is
cleaner as it creates a fresh record with correct timestamps)
## Related Patterns
- **Cache key dependency chains**: When `ClearUserCache` depends on `AuthMethods`
to generate email cache keys, capture auth_methods BEFORE deletion, then explicitly
clear derived cache keys after the transaction
- **Family ownership transfer**: When an owner exits a shared resource group, transfer
ownership to a remaining member instead of dissolving the group

View File

@ -0,0 +1,563 @@
---
name: stream-chain
description: Stream-JSON chaining for multi-agent pipelines, data transformation, and sequential workflows
version: 1.0.0
category: workflow
tags: [streaming, pipeline, chaining, multi-agent, workflow]
---
# Stream-Chain Skill
Execute sophisticated multi-step workflows where each agent's output flows into the next, enabling complex data transformations and sequential processing pipelines.
## Overview
Stream-Chain provides two powerful modes for orchestrating multi-agent workflows:
1. **Custom Chains** (`run`): Execute custom prompt sequences with full control
2. **Predefined Pipelines** (`pipeline`): Use battle-tested workflows for common tasks
Each step in a chain receives the complete output from the previous step, enabling sophisticated multi-agent coordination through streaming data flow.
---
## Quick Start
### Run a Custom Chain
```bash
Codex-flow stream-chain run \
"Analyze codebase structure" \
"Identify improvement areas" \
"Generate action plan"
```
### Execute a Pipeline
```bash
Codex-flow stream-chain pipeline analysis
```
---
## Custom Chains (`run`)
Execute custom stream chains with your own prompts for maximum flexibility.
### Syntax
```bash
Codex-flow stream-chain run <prompt1> <prompt2> [...] [options]
```
**Requirements:**
- Minimum 2 prompts required
- Each prompt becomes a step in the chain
- Output flows sequentially through all steps
### Options
| Option | Description | Default |
|--------|-------------|---------|
| `--verbose` | Show detailed execution information | `false` |
| `--timeout <seconds>` | Timeout per step | `30` |
| `--debug` | Enable debug mode with full logging | `false` |
### How Context Flows
Each step receives the previous output as context:
```
Step 1: "Write a sorting function"
Output: [function implementation]
Step 2 receives:
"Previous step output:
[function implementation]
Next task: Add comprehensive tests"
Step 3 receives:
"Previous steps output:
[function + tests]
Next task: Optimize performance"
```
### Examples
#### Basic Development Chain
```bash
Codex-flow stream-chain run \
"Write a user authentication function" \
"Add input validation and error handling" \
"Create unit tests with edge cases"
```
#### Security Audit Workflow
```bash
Codex-flow stream-chain run \
"Analyze authentication system for vulnerabilities" \
"Identify and categorize security issues by severity" \
"Propose fixes with implementation priority" \
"Generate security test cases" \
--timeout 45 \
--verbose
```
#### Code Refactoring Chain
```bash
Codex-flow stream-chain run \
"Identify code smells in src/ directory" \
"Create refactoring plan with specific changes" \
"Apply refactoring to top 3 priority items" \
"Verify refactored code maintains behavior" \
--debug
```
#### Data Processing Pipeline
```bash
Codex-flow stream-chain run \
"Extract data from API responses" \
"Transform data into normalized format" \
"Validate data against schema" \
"Generate data quality report"
```
---
## Predefined Pipelines (`pipeline`)
Execute battle-tested workflows optimized for common development tasks.
### Syntax
```bash
Codex-flow stream-chain pipeline <type> [options]
```
### Available Pipelines
#### 1. Analysis Pipeline
Comprehensive codebase analysis and improvement identification.
```bash
Codex-flow stream-chain pipeline analysis
```
**Workflow Steps:**
1. **Structure Analysis**: Map directory structure and identify components
2. **Issue Detection**: Find potential improvements and problems
3. **Recommendations**: Generate actionable improvement report
**Use Cases:**
- New codebase onboarding
- Technical debt assessment
- Architecture review
- Code quality audits
#### 2. Refactor Pipeline
Systematic code refactoring with prioritization.
```bash
Codex-flow stream-chain pipeline refactor
```
**Workflow Steps:**
1. **Candidate Identification**: Find code needing refactoring
2. **Prioritization**: Create ranked refactoring plan
3. **Implementation**: Provide refactored code for top priorities
**Use Cases:**
- Technical debt reduction
- Code quality improvement
- Legacy code modernization
- Design pattern implementation
#### 3. Test Pipeline
Comprehensive test generation with coverage analysis.
```bash
Codex-flow stream-chain pipeline test
```
**Workflow Steps:**
1. **Coverage Analysis**: Identify areas lacking tests
2. **Test Design**: Create test cases for critical functions
3. **Implementation**: Generate unit tests with assertions
**Use Cases:**
- Increasing test coverage
- TDD workflow support
- Regression test creation
- Quality assurance
#### 4. Optimize Pipeline
Performance optimization with profiling and implementation.
```bash
Codex-flow stream-chain pipeline optimize
```
**Workflow Steps:**
1. **Profiling**: Identify performance bottlenecks
2. **Strategy**: Analyze and suggest optimization approaches
3. **Implementation**: Provide optimized code
**Use Cases:**
- Performance improvement
- Resource optimization
- Scalability enhancement
- Latency reduction
### Pipeline Options
| Option | Description | Default |
|--------|-------------|---------|
| `--verbose` | Show detailed execution | `false` |
| `--timeout <seconds>` | Timeout per step | `30` |
| `--debug` | Enable debug mode | `false` |
### Pipeline Examples
#### Quick Analysis
```bash
Codex-flow stream-chain pipeline analysis
```
#### Extended Refactoring
```bash
Codex-flow stream-chain pipeline refactor --timeout 60 --verbose
```
#### Debug Test Generation
```bash
Codex-flow stream-chain pipeline test --debug
```
#### Comprehensive Optimization
```bash
Codex-flow stream-chain pipeline optimize --timeout 90 --verbose
```
### Pipeline Output
Each pipeline execution provides:
- **Progress**: Step-by-step execution status
- **Results**: Success/failure per step
- **Timing**: Total and per-step execution time
- **Summary**: Consolidated results and recommendations
---
## Custom Pipeline Definitions
Define reusable pipelines in `.Codex-flow/config.json`:
### Configuration Format
```json
{
"streamChain": {
"pipelines": {
"security": {
"name": "Security Audit Pipeline",
"description": "Comprehensive security analysis",
"prompts": [
"Scan codebase for security vulnerabilities",
"Categorize issues by severity (critical/high/medium/low)",
"Generate fixes with priority and implementation steps",
"Create security test suite"
],
"timeout": 45
},
"documentation": {
"name": "Documentation Generation Pipeline",
"prompts": [
"Analyze code structure and identify undocumented areas",
"Generate API documentation with examples",
"Create usage guides and tutorials",
"Build architecture diagrams and flow charts"
]
}
}
}
}
```
### Execute Custom Pipeline
```bash
Codex-flow stream-chain pipeline security
Codex-flow stream-chain pipeline documentation
```
---
## Advanced Use Cases
### Multi-Agent Coordination
Chain different agent types for complex workflows:
```bash
Codex-flow stream-chain run \
"Research best practices for API design" \
"Design REST API with discovered patterns" \
"Implement API endpoints with validation" \
"Generate OpenAPI specification" \
"Create integration tests" \
"Write deployment documentation"
```
### Data Transformation Pipeline
Process and transform data through multiple stages:
```bash
Codex-flow stream-chain run \
"Extract user data from CSV files" \
"Normalize and validate data format" \
"Enrich data with external API calls" \
"Generate analytics report" \
"Create visualization code"
```
### Code Migration Workflow
Systematic code migration with validation:
```bash
Codex-flow stream-chain run \
"Analyze legacy codebase dependencies" \
"Create migration plan with risk assessment" \
"Generate modernized code for high-priority modules" \
"Create migration tests" \
"Document migration steps and rollback procedures"
```
### Quality Assurance Chain
Comprehensive code quality workflow:
```bash
Codex-flow stream-chain pipeline analysis
Codex-flow stream-chain pipeline refactor
Codex-flow stream-chain pipeline test
Codex-flow stream-chain pipeline optimize
```
---
## Best Practices
### 1. Clear and Specific Prompts
**Good:**
```bash
"Analyze authentication.js for SQL injection vulnerabilities"
```
**Avoid:**
```bash
"Check security"
```
### 2. Logical Progression
Order prompts to build on previous outputs:
```bash
1. "Identify the problem"
2. "Analyze root causes"
3. "Design solution"
4. "Implement solution"
5. "Verify implementation"
```
### 3. Appropriate Timeouts
- Simple tasks: 30 seconds (default)
- Analysis tasks: 45-60 seconds
- Implementation tasks: 60-90 seconds
- Complex workflows: 90-120 seconds
### 4. Verification Steps
Include validation in your chains:
```bash
Codex-flow stream-chain run \
"Implement feature X" \
"Write tests for feature X" \
"Verify tests pass and cover edge cases"
```
### 5. Iterative Refinement
Use chains for iterative improvement:
```bash
Codex-flow stream-chain run \
"Generate initial implementation" \
"Review and identify issues" \
"Refine based on issues found" \
"Final quality check"
```
---
## Integration with Codex Flow
### Combine with Swarm Coordination
```bash
# Initialize swarm for coordination
Codex-flow swarm init --topology mesh
# Execute stream chain with swarm agents
Codex-flow stream-chain run \
"Agent 1: Research task" \
"Agent 2: Implement solution" \
"Agent 3: Test implementation" \
"Agent 4: Review and refine"
```
### Memory Integration
Stream chains automatically store context in memory for cross-session persistence:
```bash
# Execute chain with memory
Codex-flow stream-chain run \
"Analyze requirements" \
"Design architecture" \
--verbose
# Results stored in .Codex-flow/memory/stream-chain/
```
### Neural Pattern Training
Successful chains train neural patterns for improved performance:
```bash
# Enable neural training
Codex-flow stream-chain pipeline optimize --debug
# Patterns learned and stored for future optimizations
```
---
## Troubleshooting
### Chain Timeout
If steps timeout, increase timeout value:
```bash
Codex-flow stream-chain run "complex task" --timeout 120
```
### Context Loss
If context not flowing properly, use `--debug`:
```bash
Codex-flow stream-chain run "step 1" "step 2" --debug
```
### Pipeline Not Found
Verify pipeline name and custom definitions:
```bash
# Check available pipelines
cat .Codex-flow/config.json | grep -A 10 "streamChain"
```
---
## Performance Characteristics
- **Throughput**: 2-5 steps per minute (varies by complexity)
- **Context Size**: Up to 100K tokens per step
- **Memory Usage**: ~50MB per active chain
- **Concurrency**: Supports parallel chain execution
---
## Related Skills
- **SPARC Methodology**: Systematic development workflow
- **Swarm Coordination**: Multi-agent orchestration
- **Memory Management**: Persistent context storage
- **Neural Patterns**: Adaptive learning
---
## Examples Repository
### Complete Development Workflow
```bash
# Full feature development chain
Codex-flow stream-chain run \
"Analyze requirements for user profile feature" \
"Design database schema and API endpoints" \
"Implement backend with validation" \
"Create frontend components" \
"Write comprehensive tests" \
"Generate API documentation" \
--timeout 60 \
--verbose
```
### Code Review Pipeline
```bash
# Automated code review workflow
Codex-flow stream-chain run \
"Analyze recent git changes" \
"Identify code quality issues" \
"Check for security vulnerabilities" \
"Verify test coverage" \
"Generate code review report with recommendations"
```
### Migration Assistant
```bash
# Framework migration helper
Codex-flow stream-chain run \
"Analyze current Vue 2 codebase" \
"Identify Vue 3 breaking changes" \
"Create migration checklist" \
"Generate migration scripts" \
"Provide updated code examples"
```
---
## Conclusion
Stream-Chain enables sophisticated multi-step workflows by:
- **Sequential Processing**: Each step builds on previous results
- **Context Preservation**: Full output history flows through chain
- **Flexible Orchestration**: Custom chains or predefined pipelines
- **Agent Coordination**: Natural multi-agent collaboration pattern
- **Data Transformation**: Complex processing through simple steps
Use `run` for custom workflows and `pipeline` for battle-tested solutions.

View File

@ -0,0 +1,973 @@
---
name: swarm-advanced
description: Advanced swarm orchestration patterns for research, development, testing, and complex distributed workflows
version: 2.0.0
category: orchestration
tags: [swarm, distributed, parallel, research, testing, development, coordination]
author: Codex Flow Team
---
# Advanced Swarm Orchestration
Master advanced swarm patterns for distributed research, development, and testing workflows. This skill covers comprehensive orchestration strategies using both MCP tools and CLI commands.
## Quick Start
### Prerequisites
```bash
# Ensure Codex Flow is installed
npm install -g Codex-flow@alpha
# Add MCP server (if using MCP tools)
Codex mcp add Codex-flow npx Codex-flow@alpha mcp start
```
### Basic Pattern
```javascript
// 1. Initialize swarm topology
mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 6 })
// 2. Spawn specialized agents
mcp__claude-flow__agent_spawn({ type: "researcher", name: "Agent 1" })
// 3. Orchestrate tasks
mcp__claude-flow__task_orchestrate({ task: "...", strategy: "parallel" })
```
## Core Concepts
### Swarm Topologies
**Mesh Topology** - Peer-to-peer communication, best for research and analysis
- All agents communicate directly
- High flexibility and resilience
- Use for: Research, analysis, brainstorming
**Hierarchical Topology** - Coordinator with subordinates, best for development
- Clear command structure
- Sequential workflow support
- Use for: Development, structured workflows
**Star Topology** - Central coordinator, best for testing
- Centralized control and monitoring
- Parallel execution with coordination
- Use for: Testing, validation, quality assurance
**Ring Topology** - Sequential processing chain
- Step-by-step processing
- Pipeline workflows
- Use for: Multi-stage processing, data pipelines
### Agent Strategies
**Adaptive** - Dynamic adjustment based on task complexity
**Balanced** - Equal distribution of work across agents
**Specialized** - Task-specific agent assignment
**Parallel** - Maximum concurrent execution
## Pattern 1: Research Swarm
### Purpose
Deep research through parallel information gathering, analysis, and synthesis.
### Architecture
```javascript
// Initialize research swarm
mcp__claude-flow__swarm_init({
"topology": "mesh",
"maxAgents": 6,
"strategy": "adaptive"
})
// Spawn research team
const researchAgents = [
{
type: "researcher",
name: "Web Researcher",
capabilities: ["web-search", "content-extraction", "source-validation"]
},
{
type: "researcher",
name: "Academic Researcher",
capabilities: ["paper-analysis", "citation-tracking", "literature-review"]
},
{
type: "analyst",
name: "Data Analyst",
capabilities: ["data-processing", "statistical-analysis", "visualization"]
},
{
type: "analyst",
name: "Pattern Analyzer",
capabilities: ["trend-detection", "correlation-analysis", "outlier-detection"]
},
{
type: "documenter",
name: "Report Writer",
capabilities: ["synthesis", "technical-writing", "formatting"]
}
]
// Spawn all agents
researchAgents.forEach(agent => {
mcp__claude-flow__agent_spawn({
type: agent.type,
name: agent.name,
capabilities: agent.capabilities
})
})
```
### Research Workflow
#### Phase 1: Information Gathering
```javascript
// Parallel information collection
mcp__claude-flow__parallel_execute({
"tasks": [
{
"id": "web-search",
"command": "search recent publications and articles"
},
{
"id": "academic-search",
"command": "search academic databases and papers"
},
{
"id": "data-collection",
"command": "gather relevant datasets and statistics"
},
{
"id": "expert-search",
"command": "identify domain experts and thought leaders"
}
]
})
// Store research findings in memory
mcp__claude-flow__memory_usage({
"action": "store",
"key": "research-findings-" + Date.now(),
"value": JSON.stringify(findings),
"namespace": "research",
"ttl": 604800 // 7 days
})
```
#### Phase 2: Analysis and Validation
```javascript
// Pattern recognition in findings
mcp__claude-flow__pattern_recognize({
"data": researchData,
"patterns": ["trend", "correlation", "outlier", "emerging-pattern"]
})
// Cognitive analysis
mcp__claude-flow__cognitive_analyze({
"behavior": "research-synthesis"
})
// Quality assessment
mcp__claude-flow__quality_assess({
"target": "research-sources",
"criteria": ["credibility", "relevance", "recency", "authority"]
})
// Cross-reference validation
mcp__claude-flow__neural_patterns({
"action": "analyze",
"operation": "fact-checking",
"metadata": { "sources": sourcesArray }
})
```
#### Phase 3: Knowledge Management
```javascript
// Search existing knowledge base
mcp__claude-flow__memory_search({
"pattern": "topic X",
"namespace": "research",
"limit": 20
})
// Create knowledge graph connections
mcp__claude-flow__neural_patterns({
"action": "learn",
"operation": "knowledge-graph",
"metadata": {
"topic": "X",
"connections": relatedTopics,
"depth": 3
}
})
// Store connections for future use
mcp__claude-flow__memory_usage({
"action": "store",
"key": "knowledge-graph-X",
"value": JSON.stringify(knowledgeGraph),
"namespace": "research/graphs",
"ttl": 2592000 // 30 days
})
```
#### Phase 4: Report Generation
```javascript
// Orchestrate report generation
mcp__claude-flow__task_orchestrate({
"task": "generate comprehensive research report",
"strategy": "sequential",
"priority": "high",
"dependencies": ["gather", "analyze", "validate", "synthesize"]
})
// Monitor research progress
mcp__claude-flow__swarm_status({
"swarmId": "research-swarm"
})
// Generate final report
mcp__claude-flow__workflow_execute({
"workflowId": "research-report-generation",
"params": {
"findings": findings,
"format": "comprehensive",
"sections": ["executive-summary", "methodology", "findings", "analysis", "conclusions", "references"]
}
})
```
### CLI Fallback
```bash
# Quick research swarm
npx Codex-flow swarm "research AI trends in 2025" \
--strategy research \
--mode distributed \
--max-agents 6 \
--parallel \
--output research-report.md
```
## Pattern 2: Development Swarm
### Purpose
Full-stack development through coordinated specialist agents.
### Architecture
```javascript
// Initialize development swarm with hierarchy
mcp__claude-flow__swarm_init({
"topology": "hierarchical",
"maxAgents": 8,
"strategy": "balanced"
})
// Spawn development team
const devTeam = [
{ type: "architect", name: "System Architect", role: "coordinator" },
{ type: "coder", name: "Backend Developer", capabilities: ["node", "api", "database"] },
{ type: "coder", name: "Frontend Developer", capabilities: ["react", "ui", "ux"] },
{ type: "coder", name: "Database Engineer", capabilities: ["sql", "nosql", "optimization"] },
{ type: "tester", name: "QA Engineer", capabilities: ["unit", "integration", "e2e"] },
{ type: "reviewer", name: "Code Reviewer", capabilities: ["security", "performance", "best-practices"] },
{ type: "documenter", name: "Technical Writer", capabilities: ["api-docs", "guides", "tutorials"] },
{ type: "monitor", name: "DevOps Engineer", capabilities: ["ci-cd", "deployment", "monitoring"] }
]
// Spawn all team members
devTeam.forEach(member => {
mcp__claude-flow__agent_spawn({
type: member.type,
name: member.name,
capabilities: member.capabilities,
swarmId: "dev-swarm"
})
})
```
### Development Workflow
#### Phase 1: Architecture and Design
```javascript
// System architecture design
mcp__claude-flow__task_orchestrate({
"task": "design system architecture for REST API",
"strategy": "sequential",
"priority": "critical",
"assignTo": "System Architect"
})
// Store architecture decisions
mcp__claude-flow__memory_usage({
"action": "store",
"key": "architecture-decisions",
"value": JSON.stringify(architectureDoc),
"namespace": "development/design"
})
```
#### Phase 2: Parallel Implementation
```javascript
// Parallel development tasks
mcp__claude-flow__parallel_execute({
"tasks": [
{
"id": "backend-api",
"command": "implement REST API endpoints",
"assignTo": "Backend Developer"
},
{
"id": "frontend-ui",
"command": "build user interface components",
"assignTo": "Frontend Developer"
},
{
"id": "database-schema",
"command": "design and implement database schema",
"assignTo": "Database Engineer"
},
{
"id": "api-documentation",
"command": "create API documentation",
"assignTo": "Technical Writer"
}
]
})
// Monitor development progress
mcp__claude-flow__swarm_monitor({
"swarmId": "dev-swarm",
"interval": 5000
})
```
#### Phase 3: Testing and Validation
```javascript
// Comprehensive testing
mcp__claude-flow__batch_process({
"items": [
{ type: "unit", target: "all-modules" },
{ type: "integration", target: "api-endpoints" },
{ type: "e2e", target: "user-flows" },
{ type: "performance", target: "critical-paths" }
],
"operation": "execute-tests"
})
// Quality assessment
mcp__claude-flow__quality_assess({
"target": "codebase",
"criteria": ["coverage", "complexity", "maintainability", "security"]
})
```
#### Phase 4: Review and Deployment
```javascript
// Code review workflow
mcp__claude-flow__workflow_execute({
"workflowId": "code-review-process",
"params": {
"reviewers": ["Code Reviewer"],
"criteria": ["security", "performance", "best-practices"]
}
})
// CI/CD pipeline
mcp__claude-flow__pipeline_create({
"config": {
"stages": ["build", "test", "security-scan", "deploy"],
"environment": "production"
}
})
```
### CLI Fallback
```bash
# Quick development swarm
npx Codex-flow swarm "build REST API with authentication" \
--strategy development \
--mode hierarchical \
--monitor \
--output sqlite
```
## Pattern 3: Testing Swarm
### Purpose
Comprehensive quality assurance through distributed testing.
### Architecture
```javascript
// Initialize testing swarm with star topology
mcp__claude-flow__swarm_init({
"topology": "star",
"maxAgents": 7,
"strategy": "parallel"
})
// Spawn testing team
const testingTeam = [
{
type: "tester",
name: "Unit Test Coordinator",
capabilities: ["unit-testing", "mocking", "coverage", "tdd"]
},
{
type: "tester",
name: "Integration Tester",
capabilities: ["integration", "api-testing", "contract-testing"]
},
{
type: "tester",
name: "E2E Tester",
capabilities: ["e2e", "ui-testing", "user-flows", "selenium"]
},
{
type: "tester",
name: "Performance Tester",
capabilities: ["load-testing", "stress-testing", "benchmarking"]
},
{
type: "monitor",
name: "Security Tester",
capabilities: ["security-testing", "penetration-testing", "vulnerability-scanning"]
},
{
type: "analyst",
name: "Test Analyst",
capabilities: ["coverage-analysis", "test-optimization", "reporting"]
},
{
type: "documenter",
name: "Test Documenter",
capabilities: ["test-documentation", "test-plans", "reports"]
}
]
// Spawn all testers
testingTeam.forEach(tester => {
mcp__claude-flow__agent_spawn({
type: tester.type,
name: tester.name,
capabilities: tester.capabilities,
swarmId: "testing-swarm"
})
})
```
### Testing Workflow
#### Phase 1: Test Planning
```javascript
// Analyze test coverage requirements
mcp__claude-flow__quality_assess({
"target": "test-coverage",
"criteria": [
"line-coverage",
"branch-coverage",
"function-coverage",
"edge-cases"
]
})
// Identify test scenarios
mcp__claude-flow__pattern_recognize({
"data": testScenarios,
"patterns": [
"edge-case",
"boundary-condition",
"error-path",
"happy-path"
]
})
// Store test plan
mcp__claude-flow__memory_usage({
"action": "store",
"key": "test-plan-" + Date.now(),
"value": JSON.stringify(testPlan),
"namespace": "testing/plans"
})
```
#### Phase 2: Parallel Test Execution
```javascript
// Execute all test suites in parallel
mcp__claude-flow__parallel_execute({
"tasks": [
{
"id": "unit-tests",
"command": "npm run test:unit",
"assignTo": "Unit Test Coordinator"
},
{
"id": "integration-tests",
"command": "npm run test:integration",
"assignTo": "Integration Tester"
},
{
"id": "e2e-tests",
"command": "npm run test:e2e",
"assignTo": "E2E Tester"
},
{
"id": "performance-tests",
"command": "npm run test:performance",
"assignTo": "Performance Tester"
},
{
"id": "security-tests",
"command": "npm run test:security",
"assignTo": "Security Tester"
}
]
})
// Batch process test suites
mcp__claude-flow__batch_process({
"items": testSuites,
"operation": "execute-test-suite"
})
```
#### Phase 3: Performance and Security
```javascript
// Run performance benchmarks
mcp__claude-flow__benchmark_run({
"suite": "comprehensive-performance"
})
// Bottleneck analysis
mcp__claude-flow__bottleneck_analyze({
"component": "application",
"metrics": ["response-time", "throughput", "memory", "cpu"]
})
// Security scanning
mcp__claude-flow__security_scan({
"target": "application",
"depth": "comprehensive"
})
// Vulnerability analysis
mcp__claude-flow__error_analysis({
"logs": securityScanLogs
})
```
#### Phase 4: Monitoring and Reporting
```javascript
// Real-time test monitoring
mcp__claude-flow__swarm_monitor({
"swarmId": "testing-swarm",
"interval": 2000
})
// Generate comprehensive test report
mcp__claude-flow__performance_report({
"format": "detailed",
"timeframe": "current-run"
})
// Get test results
mcp__claude-flow__task_results({
"taskId": "test-execution-001"
})
// Trend analysis
mcp__claude-flow__trend_analysis({
"metric": "test-coverage",
"period": "30d"
})
```
### CLI Fallback
```bash
# Quick testing swarm
npx Codex-flow swarm "test application comprehensively" \
--strategy testing \
--mode star \
--parallel \
--timeout 600
```
## Pattern 4: Analysis Swarm
### Purpose
Deep code and system analysis through specialized analyzers.
### Architecture
```javascript
// Initialize analysis swarm
mcp__claude-flow__swarm_init({
"topology": "mesh",
"maxAgents": 5,
"strategy": "adaptive"
})
// Spawn analysis specialists
const analysisTeam = [
{
type: "analyst",
name: "Code Analyzer",
capabilities: ["static-analysis", "complexity-analysis", "dead-code-detection"]
},
{
type: "analyst",
name: "Security Analyzer",
capabilities: ["security-scan", "vulnerability-detection", "dependency-audit"]
},
{
type: "analyst",
name: "Performance Analyzer",
capabilities: ["profiling", "bottleneck-detection", "optimization"]
},
{
type: "analyst",
name: "Architecture Analyzer",
capabilities: ["dependency-analysis", "coupling-detection", "modularity-assessment"]
},
{
type: "documenter",
name: "Analysis Reporter",
capabilities: ["reporting", "visualization", "recommendations"]
}
]
// Spawn all analysts
analysisTeam.forEach(analyst => {
mcp__claude-flow__agent_spawn({
type: analyst.type,
name: analyst.name,
capabilities: analyst.capabilities
})
})
```
### Analysis Workflow
```javascript
// Parallel analysis execution
mcp__claude-flow__parallel_execute({
"tasks": [
{ "id": "analyze-code", "command": "analyze codebase structure and quality" },
{ "id": "analyze-security", "command": "scan for security vulnerabilities" },
{ "id": "analyze-performance", "command": "identify performance bottlenecks" },
{ "id": "analyze-architecture", "command": "assess architectural patterns" }
]
})
// Generate comprehensive analysis report
mcp__claude-flow__performance_report({
"format": "detailed",
"timeframe": "current"
})
// Cost analysis
mcp__claude-flow__cost_analysis({
"timeframe": "30d"
})
```
## Advanced Techniques
### Error Handling and Fault Tolerance
```javascript
// Setup fault tolerance for all agents
mcp__claude-flow__daa_fault_tolerance({
"agentId": "all",
"strategy": "auto-recovery"
})
// Error handling pattern
try {
await mcp__claude-flow__task_orchestrate({
"task": "complex operation",
"strategy": "parallel",
"priority": "high"
})
} catch (error) {
// Check swarm health
const status = await mcp__claude-flow__swarm_status({})
// Analyze error patterns
await mcp__claude-flow__error_analysis({
"logs": [error.message]
})
// Auto-recovery attempt
if (status.healthy) {
await mcp__claude-flow__task_orchestrate({
"task": "retry failed operation",
"strategy": "sequential"
})
}
}
```
### Memory and State Management
```javascript
// Cross-session persistence
mcp__claude-flow__memory_persist({
"sessionId": "swarm-session-001"
})
// Namespace management for different swarms
mcp__claude-flow__memory_namespace({
"namespace": "research-swarm",
"action": "create"
})
// Create state snapshot
mcp__claude-flow__state_snapshot({
"name": "development-checkpoint-1"
})
// Restore from snapshot if needed
mcp__claude-flow__context_restore({
"snapshotId": "development-checkpoint-1"
})
// Backup memory stores
mcp__claude-flow__memory_backup({
"path": "/workspaces/Codex-flow/backups/swarm-memory.json"
})
```
### Neural Pattern Learning
```javascript
// Train neural patterns from successful workflows
mcp__claude-flow__neural_train({
"pattern_type": "coordination",
"training_data": JSON.stringify(successfulWorkflows),
"epochs": 50
})
// Adaptive learning from experience
mcp__claude-flow__learning_adapt({
"experience": {
"workflow": "research-to-report",
"success": true,
"duration": 3600,
"quality": 0.95
}
})
// Pattern recognition for optimization
mcp__claude-flow__pattern_recognize({
"data": workflowMetrics,
"patterns": ["bottleneck", "optimization-opportunity", "efficiency-gain"]
})
```
### Workflow Automation
```javascript
// Create reusable workflow
mcp__claude-flow__workflow_create({
"name": "full-stack-development",
"steps": [
{ "phase": "design", "agents": ["architect"] },
{ "phase": "implement", "agents": ["backend-dev", "frontend-dev"], "parallel": true },
{ "phase": "test", "agents": ["tester", "security-tester"], "parallel": true },
{ "phase": "review", "agents": ["reviewer"] },
{ "phase": "deploy", "agents": ["devops"] }
],
"triggers": ["on-commit", "scheduled-daily"]
})
// Setup automation rules
mcp__claude-flow__automation_setup({
"rules": [
{
"trigger": "file-changed",
"pattern": "*.js",
"action": "run-tests"
},
{
"trigger": "PR-created",
"action": "code-review-swarm"
}
]
})
// Event-driven triggers
mcp__claude-flow__trigger_setup({
"events": ["code-commit", "PR-merge", "deployment"],
"actions": ["test", "analyze", "document"]
})
```
### Performance Optimization
```javascript
// Topology optimization
mcp__claude-flow__topology_optimize({
"swarmId": "current-swarm"
})
// Load balancing
mcp__claude-flow__load_balance({
"swarmId": "development-swarm",
"tasks": taskQueue
})
// Agent coordination sync
mcp__claude-flow__coordination_sync({
"swarmId": "development-swarm"
})
// Auto-scaling
mcp__claude-flow__swarm_scale({
"swarmId": "development-swarm",
"targetSize": 12
})
```
### Monitoring and Metrics
```javascript
// Real-time swarm monitoring
mcp__claude-flow__swarm_monitor({
"swarmId": "active-swarm",
"interval": 3000
})
// Collect comprehensive metrics
mcp__claude-flow__metrics_collect({
"components": ["agents", "tasks", "memory", "performance"]
})
// Health monitoring
mcp__claude-flow__health_check({
"components": ["swarm", "agents", "neural", "memory"]
})
// Usage statistics
mcp__claude-flow__usage_stats({
"component": "swarm-orchestration"
})
// Trend analysis
mcp__claude-flow__trend_analysis({
"metric": "agent-performance",
"period": "7d"
})
```
## Best Practices
### 1. Choosing the Right Topology
- **Mesh**: Research, brainstorming, collaborative analysis
- **Hierarchical**: Structured development, sequential workflows
- **Star**: Testing, validation, centralized coordination
- **Ring**: Pipeline processing, staged workflows
### 2. Agent Specialization
- Assign specific capabilities to each agent
- Avoid overlapping responsibilities
- Use coordination agents for complex workflows
- Leverage memory for agent communication
### 3. Parallel Execution
- Identify independent tasks for parallelization
- Use sequential execution for dependent tasks
- Monitor resource usage during parallel execution
- Implement proper error handling
### 4. Memory Management
- Use namespaces to organize memory
- Set appropriate TTL values
- Create regular backups
- Implement state snapshots for checkpoints
### 5. Monitoring and Optimization
- Monitor swarm health regularly
- Collect and analyze metrics
- Optimize topology based on performance
- Use neural patterns to learn from success
### 6. Error Recovery
- Implement fault tolerance strategies
- Use auto-recovery mechanisms
- Analyze error patterns
- Create fallback workflows
## Real-World Examples
### Example 1: AI Research Project
```javascript
// Research AI trends, analyze findings, generate report
mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 6 })
// Spawn: 2 researchers, 2 analysts, 1 synthesizer, 1 documenter
// Parallel gather → Analyze patterns → Synthesize → Report
```
### Example 2: Full-Stack Application
```javascript
// Build complete web application with testing
mcp__claude-flow__swarm_init({ topology: "hierarchical", maxAgents: 8 })
// Spawn: 1 architect, 2 devs, 1 db engineer, 2 testers, 1 reviewer, 1 devops
// Design → Parallel implement → Test → Review → Deploy
```
### Example 3: Security Audit
```javascript
// Comprehensive security analysis
mcp__claude-flow__swarm_init({ topology: "star", maxAgents: 5 })
// Spawn: 1 coordinator, 1 code analyzer, 1 security scanner, 1 penetration tester, 1 reporter
// Parallel scan → Vulnerability analysis → Penetration test → Report
```
### Example 4: Performance Optimization
```javascript
// Identify and fix performance bottlenecks
mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 4 })
// Spawn: 1 profiler, 1 bottleneck analyzer, 1 optimizer, 1 tester
// Profile → Identify bottlenecks → Optimize → Validate
```
## Troubleshooting
### Common Issues
**Issue**: Swarm agents not coordinating properly
**Solution**: Check topology selection, verify memory usage, enable monitoring
**Issue**: Parallel execution failing
**Solution**: Verify task dependencies, check resource limits, implement error handling
**Issue**: Memory persistence not working
**Solution**: Verify namespaces, check TTL settings, ensure backup configuration
**Issue**: Performance degradation
**Solution**: Optimize topology, reduce agent count, analyze bottlenecks
## Related Skills
- `sparc-methodology` - Systematic development workflow
- `github-integration` - Repository management and automation
- `neural-patterns` - AI-powered coordination optimization
- `memory-management` - Cross-session state persistence
## References
- [Codex Flow Documentation](https://github.com/ruvnet/Codex-flow)
- [Swarm Orchestration Guide](https://github.com/ruvnet/Codex-flow/wiki/swarm)
- [MCP Tools Reference](https://github.com/ruvnet/Codex-flow/wiki/mcp)
- [Performance Optimization](https://github.com/ruvnet/Codex-flow/wiki/performance)
---
**Version**: 2.0.0
**Last Updated**: 2025-10-19
**Skill Level**: Advanced
**Estimated Learning Time**: 2-3 hours

View File

@ -0,0 +1,872 @@
---
name: "V3 CLI Modernization"
description: "CLI modernization and hooks system enhancement for Codex-flow v3. Implements interactive prompts, command decomposition, enhanced hooks integration, and intelligent workflow automation."
---
# V3 CLI Modernization
## What This Skill Does
Modernizes Codex-flow v3 CLI with interactive prompts, intelligent command decomposition, enhanced hooks integration, performance optimization, and comprehensive workflow automation capabilities.
## Quick Start
```bash
# Initialize CLI modernization analysis
Task("CLI architecture", "Analyze current CLI structure and identify optimization opportunities", "cli-hooks-developer")
# Modernization implementation (parallel)
Task("Command decomposition", "Break down large CLI files into focused modules", "cli-hooks-developer")
Task("Interactive prompts", "Implement intelligent interactive CLI experience", "cli-hooks-developer")
Task("Hooks enhancement", "Deep integrate hooks with CLI lifecycle", "cli-hooks-developer")
```
## CLI Architecture Modernization
### Current State Analysis
```
Current CLI Issues:
├── index.ts: 108KB monolithic file
├── enterprise.ts: 68KB feature module
├── Limited interactivity: Basic command parsing
├── Hooks integration: Basic pre/post execution
└── No intelligent workflows: Manual command chaining
Target Architecture:
├── Modular Commands: <500 lines per command
├── Interactive Prompts: Smart context-aware UX
├── Enhanced Hooks: Deep lifecycle integration
├── Workflow Automation: Intelligent command orchestration
└── Performance: <200ms command response time
```
### Modular Command Architecture
```typescript
// src/cli/core/command-registry.ts
interface CommandModule {
name: string;
description: string;
category: CommandCategory;
handler: CommandHandler;
middleware: MiddlewareStack;
permissions: Permission[];
examples: CommandExample[];
}
export class ModularCommandRegistry {
private commands = new Map<string, CommandModule>();
private categories = new Map<CommandCategory, CommandModule[]>();
private aliases = new Map<string, string>();
registerCommand(command: CommandModule): void {
this.commands.set(command.name, command);
// Register in category index
if (!this.categories.has(command.category)) {
this.categories.set(command.category, []);
}
this.categories.get(command.category)!.push(command);
}
async executeCommand(name: string, args: string[]): Promise<CommandResult> {
const command = this.resolveCommand(name);
if (!command) {
throw new CommandNotFoundError(name, this.getSuggestions(name));
}
// Execute middleware stack
const context = await this.buildExecutionContext(command, args);
const result = await command.middleware.execute(context);
return result;
}
private resolveCommand(name: string): CommandModule | undefined {
// Try exact match first
if (this.commands.has(name)) {
return this.commands.get(name);
}
// Try alias
const aliasTarget = this.aliases.get(name);
if (aliasTarget) {
return this.commands.get(aliasTarget);
}
// Try fuzzy match
return this.findFuzzyMatch(name);
}
}
```
## Command Decomposition Strategy
### Swarm Commands Module
```typescript
// src/cli/commands/swarm/swarm.command.ts
@Command({
name: 'swarm',
description: 'Swarm coordination and management',
category: 'orchestration'
})
export class SwarmCommand {
constructor(
private swarmCoordinator: UnifiedSwarmCoordinator,
private promptService: InteractivePromptService
) {}
@SubCommand('init')
@Option('--topology', 'Swarm topology (mesh|hierarchical|adaptive)', 'hierarchical')
@Option('--agents', 'Number of agents to spawn', 5)
@Option('--interactive', 'Interactive agent configuration', false)
async init(
@Arg('projectName') projectName: string,
options: SwarmInitOptions
): Promise<CommandResult> {
if (options.interactive) {
return this.interactiveSwarmInit(projectName);
}
return this.quickSwarmInit(projectName, options);
}
private async interactiveSwarmInit(projectName: string): Promise<CommandResult> {
console.log(`🚀 Initializing Swarm for ${projectName}`);
// Interactive topology selection
const topology = await this.promptService.select({
message: 'Select swarm topology:',
choices: [
{ name: 'Hierarchical (Queen-led coordination)', value: 'hierarchical' },
{ name: 'Mesh (Peer-to-peer collaboration)', value: 'mesh' },
{ name: 'Adaptive (Dynamic topology switching)', value: 'adaptive' }
]
});
// Agent configuration
const agents = await this.promptAgentConfiguration();
// Initialize with configuration
const swarm = await this.swarmCoordinator.initialize({
name: projectName,
topology,
agents,
hooks: {
onAgentSpawn: this.handleAgentSpawn.bind(this),
onTaskComplete: this.handleTaskComplete.bind(this),
onSwarmComplete: this.handleSwarmComplete.bind(this)
}
});
return CommandResult.success({
message: `✅ Swarm ${projectName} initialized with ${agents.length} agents`,
data: { swarmId: swarm.id, topology, agentCount: agents.length }
});
}
@SubCommand('status')
async status(): Promise<CommandResult> {
const swarms = await this.swarmCoordinator.listActiveSwarms();
if (swarms.length === 0) {
return CommandResult.info('No active swarms found');
}
// Interactive swarm selection if multiple
const selectedSwarm = swarms.length === 1
? swarms[0]
: await this.promptService.select({
message: 'Select swarm to inspect:',
choices: swarms.map(s => ({
name: `${s.name} (${s.agents.length} agents, ${s.topology})`,
value: s
}))
});
return this.displaySwarmStatus(selectedSwarm);
}
}
```
### Learning Commands Module
```typescript
// src/cli/commands/learning/learning.command.ts
@Command({
name: 'learning',
description: 'Learning system management and optimization',
category: 'intelligence'
})
export class LearningCommand {
constructor(
private learningService: IntegratedLearningService,
private promptService: InteractivePromptService
) {}
@SubCommand('start')
@Option('--algorithm', 'RL algorithm to use', 'auto')
@Option('--tier', 'Learning tier (basic|standard|advanced)', 'standard')
async start(options: LearningStartOptions): Promise<CommandResult> {
// Auto-detect optimal algorithm if not specified
if (options.algorithm === 'auto') {
const taskContext = await this.analyzeCurrentContext();
options.algorithm = this.learningService.selectOptimalAlgorithm(taskContext);
console.log(`🧠 Auto-selected ${options.algorithm} algorithm based on context`);
}
const session = await this.learningService.startSession({
algorithm: options.algorithm,
tier: options.tier,
userId: await this.getCurrentUser()
});
return CommandResult.success({
message: `🚀 Learning session started with ${options.algorithm}`,
data: { sessionId: session.id, algorithm: options.algorithm, tier: options.tier }
});
}
@SubCommand('feedback')
@Arg('reward', 'Reward value (0-1)', 'number')
async feedback(
@Arg('reward') reward: number,
@Option('--context', 'Additional context for learning')
context?: string
): Promise<CommandResult> {
const activeSession = await this.learningService.getActiveSession();
if (!activeSession) {
return CommandResult.error('No active learning session found. Start one with `learning start`');
}
await this.learningService.submitFeedback({
sessionId: activeSession.id,
reward,
context,
timestamp: new Date()
});
return CommandResult.success({
message: `📊 Feedback recorded (reward: ${reward})`,
data: { reward, sessionId: activeSession.id }
});
}
@SubCommand('metrics')
async metrics(): Promise<CommandResult> {
const metrics = await this.learningService.getMetrics();
// Interactive metrics display
await this.displayInteractiveMetrics(metrics);
return CommandResult.success('Metrics displayed');
}
}
```
## Interactive Prompt System
### Advanced Prompt Service
```typescript
// src/cli/services/interactive-prompt.service.ts
interface PromptOptions {
message: string;
type: 'select' | 'multiselect' | 'input' | 'confirm' | 'progress';
choices?: PromptChoice[];
default?: any;
validate?: (input: any) => boolean | string;
transform?: (input: any) => any;
}
export class InteractivePromptService {
private inquirer: any; // Dynamic import for tree-shaking
async select<T>(options: SelectPromptOptions<T>): Promise<T> {
const { default: inquirer } = await import('inquirer');
const result = await inquirer.prompt([{
type: 'list',
name: 'selection',
message: options.message,
choices: options.choices,
default: options.default
}]);
return result.selection;
}
async multiSelect<T>(options: MultiSelectPromptOptions<T>): Promise<T[]> {
const { default: inquirer } = await import('inquirer');
const result = await inquirer.prompt([{
type: 'checkbox',
name: 'selections',
message: options.message,
choices: options.choices,
validate: (input: T[]) => {
if (options.minSelections && input.length < options.minSelections) {
return `Please select at least ${options.minSelections} options`;
}
if (options.maxSelections && input.length > options.maxSelections) {
return `Please select at most ${options.maxSelections} options`;
}
return true;
}
}]);
return result.selections;
}
async input(options: InputPromptOptions): Promise<string> {
const { default: inquirer } = await import('inquirer');
const result = await inquirer.prompt([{
type: 'input',
name: 'input',
message: options.message,
default: options.default,
validate: options.validate,
transformer: options.transform
}]);
return result.input;
}
async progressTask<T>(
task: ProgressTask<T>,
options: ProgressOptions
): Promise<T> {
const { default: cliProgress } = await import('cli-progress');
const progressBar = new cliProgress.SingleBar({
format: `${options.title} |{bar}| {percentage}% | {status}`,
barCompleteChar: '█',
barIncompleteChar: '░',
hideCursor: true
});
progressBar.start(100, 0, { status: 'Starting...' });
try {
const result = await task({
updateProgress: (percent: number, status?: string) => {
progressBar.update(percent, { status: status || 'Processing...' });
}
});
progressBar.update(100, { status: 'Complete!' });
progressBar.stop();
return result;
} catch (error) {
progressBar.stop();
throw error;
}
}
async confirmWithDetails(
message: string,
details: ConfirmationDetails
): Promise<boolean> {
console.log('\n' + chalk.bold(message));
console.log(chalk.gray('Details:'));
for (const [key, value] of Object.entries(details)) {
console.log(chalk.gray(` ${key}: ${value}`));
}
return this.confirm('\nProceed?');
}
}
```
## Enhanced Hooks Integration
### Deep CLI Hooks Integration
```typescript
// src/cli/hooks/cli-hooks-manager.ts
interface CLIHookEvent {
type: 'command_start' | 'command_end' | 'command_error' | 'agent_spawn' | 'task_complete';
command: string;
args: string[];
context: ExecutionContext;
timestamp: Date;
}
export class CLIHooksManager {
private hooks: Map<string, HookHandler[]> = new Map();
private learningIntegration: LearningHooksIntegration;
constructor() {
this.learningIntegration = new LearningHooksIntegration();
this.setupDefaultHooks();
}
private setupDefaultHooks(): void {
// Learning integration hooks
this.registerHook('command_start', async (event: CLIHookEvent) => {
await this.learningIntegration.recordCommandStart(event);
});
this.registerHook('command_end', async (event: CLIHookEvent) => {
await this.learningIntegration.recordCommandSuccess(event);
});
this.registerHook('command_error', async (event: CLIHookEvent) => {
await this.learningIntegration.recordCommandError(event);
});
// Intelligent suggestions
this.registerHook('command_start', async (event: CLIHookEvent) => {
const suggestions = await this.generateIntelligentSuggestions(event);
if (suggestions.length > 0) {
this.displaySuggestions(suggestions);
}
});
// Performance monitoring
this.registerHook('command_end', async (event: CLIHookEvent) => {
await this.recordPerformanceMetrics(event);
});
}
async executeHooks(type: string, event: CLIHookEvent): Promise<void> {
const handlers = this.hooks.get(type) || [];
await Promise.all(handlers.map(handler =>
this.executeHookSafely(handler, event)
));
}
private async generateIntelligentSuggestions(event: CLIHookEvent): Promise<Suggestion[]> {
const context = await this.learningIntegration.getExecutionContext(event);
const patterns = await this.learningIntegration.findSimilarPatterns(context);
return patterns.map(pattern => ({
type: 'optimization',
message: `Based on similar executions, consider: ${pattern.suggestion}`,
confidence: pattern.confidence
}));
}
}
```
### Learning Integration
```typescript
// src/cli/hooks/learning-hooks-integration.ts
export class LearningHooksIntegration {
constructor(
private agenticFlowHooks: AgenticFlowHooksClient,
private agentDBLearning: AgentDBLearningClient
) {}
async recordCommandStart(event: CLIHookEvent): Promise<void> {
// Start trajectory tracking
await this.agenticFlowHooks.trajectoryStart({
sessionId: event.context.sessionId,
command: event.command,
args: event.args,
context: event.context
});
// Record experience in AgentDB
await this.agentDBLearning.recordExperience({
type: 'command_execution',
state: this.encodeCommandState(event),
action: event.command,
timestamp: event.timestamp
});
}
async recordCommandSuccess(event: CLIHookEvent): Promise<void> {
const executionTime = Date.now() - event.timestamp.getTime();
const reward = this.calculateReward(event, executionTime, true);
// Complete trajectory
await this.agenticFlowHooks.trajectoryEnd({
sessionId: event.context.sessionId,
success: true,
reward,
verdict: 'positive'
});
// Submit feedback to learning system
await this.agentDBLearning.submitFeedback({
sessionId: event.context.learningSessionId,
reward,
success: true,
latencyMs: executionTime
});
// Store successful pattern
if (reward > 0.8) {
await this.agenticFlowHooks.storePattern({
pattern: event.command,
solution: event.context.result,
confidence: reward
});
}
}
async recordCommandError(event: CLIHookEvent): Promise<void> {
const executionTime = Date.now() - event.timestamp.getTime();
const reward = this.calculateReward(event, executionTime, false);
// Complete trajectory with error
await this.agenticFlowHooks.trajectoryEnd({
sessionId: event.context.sessionId,
success: false,
reward,
verdict: 'negative',
error: event.context.error
});
// Learn from failure
await this.agentDBLearning.submitFeedback({
sessionId: event.context.learningSessionId,
reward,
success: false,
latencyMs: executionTime,
error: event.context.error
});
}
private calculateReward(event: CLIHookEvent, executionTime: number, success: boolean): number {
if (!success) return 0;
// Base reward for success
let reward = 0.5;
// Performance bonus (faster execution)
const expectedTime = this.getExpectedExecutionTime(event.command);
if (executionTime < expectedTime) {
reward += 0.3 * (1 - executionTime / expectedTime);
}
// Complexity bonus
const complexity = this.calculateCommandComplexity(event);
reward += complexity * 0.2;
return Math.min(reward, 1.0);
}
}
```
## Intelligent Workflow Automation
### Workflow Orchestrator
```typescript
// src/cli/workflows/workflow-orchestrator.ts
interface WorkflowStep {
id: string;
command: string;
args: string[];
dependsOn: string[];
condition?: WorkflowCondition;
retryPolicy?: RetryPolicy;
}
export class WorkflowOrchestrator {
constructor(
private commandRegistry: ModularCommandRegistry,
private promptService: InteractivePromptService
) {}
async executeWorkflow(workflow: Workflow): Promise<WorkflowResult> {
const context = new WorkflowExecutionContext(workflow);
// Display workflow overview
await this.displayWorkflowOverview(workflow);
const confirmed = await this.promptService.confirm(
'Execute this workflow?'
);
if (!confirmed) {
return WorkflowResult.cancelled();
}
// Execute steps
return this.promptService.progressTask(
async ({ updateProgress }) => {
const steps = this.sortStepsByDependencies(workflow.steps);
for (let i = 0; i < steps.length; i++) {
const step = steps[i];
updateProgress((i / steps.length) * 100, `Executing ${step.command}`);
await this.executeStep(step, context);
}
return WorkflowResult.success(context.getResults());
},
{ title: `Workflow: ${workflow.name}` }
);
}
async generateWorkflowFromIntent(intent: string): Promise<Workflow> {
// Use learning system to generate workflow
const patterns = await this.findWorkflowPatterns(intent);
if (patterns.length === 0) {
throw new Error('Could not generate workflow for intent');
}
// Select best pattern or let user choose
const selectedPattern = patterns.length === 1
? patterns[0]
: await this.promptService.select({
message: 'Select workflow template:',
choices: patterns.map(p => ({
name: `${p.name} (${p.confidence}% match)`,
value: p
}))
});
return this.customizeWorkflow(selectedPattern, intent);
}
private async executeStep(step: WorkflowStep, context: WorkflowExecutionContext): Promise<void> {
// Check conditions
if (step.condition && !this.evaluateCondition(step.condition, context)) {
context.skipStep(step.id, 'Condition not met');
return;
}
// Check dependencies
const missingDeps = step.dependsOn.filter(dep => !context.isStepCompleted(dep));
if (missingDeps.length > 0) {
throw new WorkflowError(`Step ${step.id} has unmet dependencies: ${missingDeps.join(', ')}`);
}
// Execute with retry policy
const retryPolicy = step.retryPolicy || { maxAttempts: 1 };
let lastError: Error | null = null;
for (let attempt = 1; attempt <= retryPolicy.maxAttempts; attempt++) {
try {
const result = await this.commandRegistry.executeCommand(step.command, step.args);
context.completeStep(step.id, result);
return;
} catch (error) {
lastError = error as Error;
if (attempt < retryPolicy.maxAttempts) {
await this.delay(retryPolicy.backoffMs || 1000);
}
}
}
throw new WorkflowError(`Step ${step.id} failed after ${retryPolicy.maxAttempts} attempts: ${lastError?.message}`);
}
}
```
## Performance Optimization
### Command Performance Monitoring
```typescript
// src/cli/performance/command-performance.ts
export class CommandPerformanceMonitor {
private metrics = new Map<string, CommandMetrics>();
async measureCommand<T>(
commandName: string,
executor: () => Promise<T>
): Promise<T> {
const start = performance.now();
const memBefore = process.memoryUsage();
try {
const result = await executor();
const end = performance.now();
const memAfter = process.memoryUsage();
this.recordMetrics(commandName, {
executionTime: end - start,
memoryDelta: memAfter.heapUsed - memBefore.heapUsed,
success: true
});
return result;
} catch (error) {
const end = performance.now();
this.recordMetrics(commandName, {
executionTime: end - start,
memoryDelta: 0,
success: false,
error: error as Error
});
throw error;
}
}
private recordMetrics(command: string, measurement: PerformanceMeasurement): void {
if (!this.metrics.has(command)) {
this.metrics.set(command, new CommandMetrics(command));
}
const metrics = this.metrics.get(command)!;
metrics.addMeasurement(measurement);
// Alert if performance degrades
if (metrics.getP95ExecutionTime() > 5000) { // 5 seconds
console.warn(`⚠️ Command '${command}' is performing slowly (P95: ${metrics.getP95ExecutionTime()}ms)`);
}
}
getCommandReport(command: string): PerformanceReport {
const metrics = this.metrics.get(command);
if (!metrics) {
throw new Error(`No metrics found for command: ${command}`);
}
return {
command,
totalExecutions: metrics.getTotalExecutions(),
successRate: metrics.getSuccessRate(),
avgExecutionTime: metrics.getAverageExecutionTime(),
p95ExecutionTime: metrics.getP95ExecutionTime(),
avgMemoryUsage: metrics.getAverageMemoryUsage(),
recommendations: this.generateRecommendations(metrics)
};
}
}
```
## Smart Auto-completion
### Intelligent Command Completion
```typescript
// src/cli/completion/intelligent-completion.ts
export class IntelligentCompletion {
constructor(
private learningService: LearningService,
private commandRegistry: ModularCommandRegistry
) {}
async generateCompletions(
partial: string,
context: CompletionContext
): Promise<Completion[]> {
const completions: Completion[] = [];
// 1. Exact command matches
const exactMatches = this.commandRegistry.findCommandsByPrefix(partial);
completions.push(...exactMatches.map(cmd => ({
value: cmd.name,
description: cmd.description,
type: 'command',
confidence: 1.0
})));
// 2. Learning-based suggestions
const learnedSuggestions = await this.learningService.suggestCommands(
partial,
context
);
completions.push(...learnedSuggestions);
// 3. Context-aware suggestions
const contextualSuggestions = await this.generateContextualSuggestions(
partial,
context
);
completions.push(...contextualSuggestions);
// Sort by confidence and relevance
return completions
.sort((a, b) => b.confidence - a.confidence)
.slice(0, 10); // Top 10 suggestions
}
private async generateContextualSuggestions(
partial: string,
context: CompletionContext
): Promise<Completion[]> {
const suggestions: Completion[] = [];
// If in git repository, suggest git-related commands
if (context.isGitRepository) {
if (partial.startsWith('git')) {
suggestions.push({
value: 'git commit',
description: 'Create git commit with generated message',
type: 'workflow',
confidence: 0.8
});
}
}
// If package.json exists, suggest npm commands
if (context.hasPackageJson) {
if (partial.startsWith('npm') || partial.startsWith('swarm')) {
suggestions.push({
value: 'swarm init',
description: 'Initialize swarm for this project',
type: 'workflow',
confidence: 0.9
});
}
}
return suggestions;
}
}
```
## Success Metrics
### CLI Performance Targets
- [ ] **Command Response**: <200ms average command execution time
- [ ] **File Decomposition**: index.ts (108KB) → <10KB per command module
- [ ] **Interactive UX**: Smart prompts with context awareness
- [ ] **Hook Integration**: Deep lifecycle integration with learning
- [ ] **Workflow Automation**: Intelligent multi-step command orchestration
- [ ] **Auto-completion**: >90% accuracy for command suggestions
### User Experience Improvements
```typescript
const cliImprovements = {
before: {
commandResponse: '~500ms',
interactivity: 'Basic command parsing',
workflows: 'Manual command chaining',
suggestions: 'Static help text'
},
after: {
commandResponse: '<200ms with caching',
interactivity: 'Smart context-aware prompts',
workflows: 'Automated multi-step execution',
suggestions: 'Learning-based intelligent completion'
}
};
```
## Related V3 Skills
- `v3-core-implementation` - Core domain integration
- `v3-memory-unification` - Memory-backed command caching
- `v3-swarm-coordination` - CLI swarm management integration
- `v3-performance-optimization` - CLI performance monitoring
## Usage Examples
### Complete CLI Modernization
```bash
# Full CLI modernization implementation
Task("CLI modernization implementation",
"Implement modular commands, interactive prompts, and intelligent workflows",
"cli-hooks-developer")
```
### Interactive Command Enhancement
```bash
# Enhanced interactive commands
Codex-flow swarm init --interactive
Codex-flow learning start --guided
Codex-flow workflow create --from-intent "setup new project"
```

View File

@ -0,0 +1,797 @@
---
name: "V3 Core Implementation"
description: "Core module implementation for Codex-flow v3. Implements DDD domains, clean architecture patterns, dependency injection, and modular TypeScript codebase with comprehensive testing."
---
# V3 Core Implementation
## What This Skill Does
Implements the core TypeScript modules for Codex-flow v3 following Domain-Driven Design principles, clean architecture patterns, and modern TypeScript best practices with comprehensive test coverage.
## Quick Start
```bash
# Initialize core implementation
Task("Core foundation", "Set up DDD domain structure and base classes", "core-implementer")
# Domain implementation (parallel)
Task("Task domain", "Implement task management domain with entities and services", "core-implementer")
Task("Session domain", "Implement session management domain", "core-implementer")
Task("Health domain", "Implement health monitoring domain", "core-implementer")
```
## Core Implementation Architecture
### Domain Structure
```
src/
├── core/
│ ├── kernel/ # Microkernel pattern
│ │ ├── Codex-flow-kernel.ts
│ │ ├── domain-registry.ts
│ │ └── plugin-loader.ts
│ │
│ ├── domains/ # DDD Bounded Contexts
│ │ ├── task-management/
│ │ │ ├── entities/
│ │ │ ├── value-objects/
│ │ │ ├── services/
│ │ │ ├── repositories/
│ │ │ └── events/
│ │ │
│ │ ├── session-management/
│ │ ├── health-monitoring/
│ │ ├── lifecycle-management/
│ │ └── event-coordination/
│ │
│ ├── shared/ # Shared kernel
│ │ ├── domain/
│ │ │ ├── entity.ts
│ │ │ ├── value-object.ts
│ │ │ ├── domain-event.ts
│ │ │ └── aggregate-root.ts
│ │ │
│ │ ├── infrastructure/
│ │ │ ├── event-bus.ts
│ │ │ ├── dependency-container.ts
│ │ │ └── logger.ts
│ │ │
│ │ └── types/
│ │ ├── common.ts
│ │ ├── errors.ts
│ │ └── interfaces.ts
│ │
│ └── application/ # Application services
│ ├── use-cases/
│ ├── commands/
│ ├── queries/
│ └── handlers/
```
## Base Domain Classes
### Entity Base Class
```typescript
// src/core/shared/domain/entity.ts
export abstract class Entity<T> {
protected readonly _id: T;
private _domainEvents: DomainEvent[] = [];
constructor(id: T) {
this._id = id;
}
get id(): T {
return this._id;
}
public equals(object?: Entity<T>): boolean {
if (object == null || object == undefined) {
return false;
}
if (this === object) {
return true;
}
if (!(object instanceof Entity)) {
return false;
}
return this._id === object._id;
}
protected addDomainEvent(domainEvent: DomainEvent): void {
this._domainEvents.push(domainEvent);
}
public getUncommittedEvents(): DomainEvent[] {
return this._domainEvents;
}
public markEventsAsCommitted(): void {
this._domainEvents = [];
}
}
```
### Value Object Base Class
```typescript
// src/core/shared/domain/value-object.ts
export abstract class ValueObject<T> {
protected readonly props: T;
constructor(props: T) {
this.props = Object.freeze(props);
}
public equals(object?: ValueObject<T>): boolean {
if (object == null || object == undefined) {
return false;
}
if (this === object) {
return true;
}
return JSON.stringify(this.props) === JSON.stringify(object.props);
}
get value(): T {
return this.props;
}
}
```
### Aggregate Root
```typescript
// src/core/shared/domain/aggregate-root.ts
export abstract class AggregateRoot<T> extends Entity<T> {
private _version: number = 0;
get version(): number {
return this._version;
}
protected incrementVersion(): void {
this._version++;
}
public applyEvent(event: DomainEvent): void {
this.addDomainEvent(event);
this.incrementVersion();
}
}
```
## Task Management Domain Implementation
### Task Entity
```typescript
// src/core/domains/task-management/entities/task.entity.ts
import { AggregateRoot } from '../../../shared/domain/aggregate-root';
import { TaskId } from '../value-objects/task-id.vo';
import { TaskStatus } from '../value-objects/task-status.vo';
import { Priority } from '../value-objects/priority.vo';
import { TaskAssignedEvent } from '../events/task-assigned.event';
interface TaskProps {
id: TaskId;
description: string;
priority: Priority;
status: TaskStatus;
assignedAgentId?: string;
createdAt: Date;
updatedAt: Date;
}
export class Task extends AggregateRoot<TaskId> {
private props: TaskProps;
private constructor(props: TaskProps) {
super(props.id);
this.props = props;
}
static create(description: string, priority: Priority): Task {
const task = new Task({
id: TaskId.create(),
description,
priority,
status: TaskStatus.pending(),
createdAt: new Date(),
updatedAt: new Date()
});
return task;
}
static reconstitute(props: TaskProps): Task {
return new Task(props);
}
public assignTo(agentId: string): void {
if (this.props.status.equals(TaskStatus.completed())) {
throw new Error('Cannot assign completed task');
}
this.props.assignedAgentId = agentId;
this.props.status = TaskStatus.assigned();
this.props.updatedAt = new Date();
this.applyEvent(new TaskAssignedEvent(
this.id.value,
agentId,
this.props.priority
));
}
public complete(result: TaskResult): void {
if (!this.props.assignedAgentId) {
throw new Error('Cannot complete unassigned task');
}
this.props.status = TaskStatus.completed();
this.props.updatedAt = new Date();
this.applyEvent(new TaskCompletedEvent(
this.id.value,
result,
this.calculateDuration()
));
}
// Getters
get description(): string { return this.props.description; }
get priority(): Priority { return this.props.priority; }
get status(): TaskStatus { return this.props.status; }
get assignedAgentId(): string | undefined { return this.props.assignedAgentId; }
get createdAt(): Date { return this.props.createdAt; }
get updatedAt(): Date { return this.props.updatedAt; }
private calculateDuration(): number {
return this.props.updatedAt.getTime() - this.props.createdAt.getTime();
}
}
```
### Task Value Objects
```typescript
// src/core/domains/task-management/value-objects/task-id.vo.ts
export class TaskId extends ValueObject<string> {
private constructor(value: string) {
super({ value });
}
static create(): TaskId {
return new TaskId(crypto.randomUUID());
}
static fromString(id: string): TaskId {
if (!id || id.length === 0) {
throw new Error('TaskId cannot be empty');
}
return new TaskId(id);
}
get value(): string {
return this.props.value;
}
}
// src/core/domains/task-management/value-objects/task-status.vo.ts
type TaskStatusType = 'pending' | 'assigned' | 'in_progress' | 'completed' | 'failed';
export class TaskStatus extends ValueObject<TaskStatusType> {
private constructor(status: TaskStatusType) {
super({ value: status });
}
static pending(): TaskStatus { return new TaskStatus('pending'); }
static assigned(): TaskStatus { return new TaskStatus('assigned'); }
static inProgress(): TaskStatus { return new TaskStatus('in_progress'); }
static completed(): TaskStatus { return new TaskStatus('completed'); }
static failed(): TaskStatus { return new TaskStatus('failed'); }
get value(): TaskStatusType {
return this.props.value;
}
public isPending(): boolean { return this.value === 'pending'; }
public isAssigned(): boolean { return this.value === 'assigned'; }
public isInProgress(): boolean { return this.value === 'in_progress'; }
public isCompleted(): boolean { return this.value === 'completed'; }
public isFailed(): boolean { return this.value === 'failed'; }
}
// src/core/domains/task-management/value-objects/priority.vo.ts
type PriorityLevel = 'low' | 'medium' | 'high' | 'critical';
export class Priority extends ValueObject<PriorityLevel> {
private constructor(level: PriorityLevel) {
super({ value: level });
}
static low(): Priority { return new Priority('low'); }
static medium(): Priority { return new Priority('medium'); }
static high(): Priority { return new Priority('high'); }
static critical(): Priority { return new Priority('critical'); }
get value(): PriorityLevel {
return this.props.value;
}
public getNumericValue(): number {
const priorities = { low: 1, medium: 2, high: 3, critical: 4 };
return priorities[this.value];
}
}
```
## Domain Services
### Task Scheduling Service
```typescript
// src/core/domains/task-management/services/task-scheduling.service.ts
import { Injectable } from '../../../shared/infrastructure/dependency-container';
import { Task } from '../entities/task.entity';
import { Priority } from '../value-objects/priority.vo';
@Injectable()
export class TaskSchedulingService {
public prioritizeTasks(tasks: Task[]): Task[] {
return tasks.sort((a, b) =>
b.priority.getNumericValue() - a.priority.getNumericValue()
);
}
public canSchedule(task: Task, agentCapacity: number): boolean {
if (agentCapacity <= 0) return false;
// Critical tasks always schedulable
if (task.priority.equals(Priority.critical())) return true;
// Other logic based on capacity
return true;
}
public calculateEstimatedDuration(task: Task): number {
// Simple heuristic - would use ML in real implementation
const baseTime = 300000; // 5 minutes
const priorityMultiplier = {
low: 0.5,
medium: 1.0,
high: 1.5,
critical: 2.0
};
return baseTime * priorityMultiplier[task.priority.value];
}
}
```
## Repository Interfaces & Implementations
### Task Repository Interface
```typescript
// src/core/domains/task-management/repositories/task.repository.ts
export interface ITaskRepository {
save(task: Task): Promise<void>;
findById(id: TaskId): Promise<Task | null>;
findByAgentId(agentId: string): Promise<Task[]>;
findByStatus(status: TaskStatus): Promise<Task[]>;
findPendingTasks(): Promise<Task[]>;
delete(id: TaskId): Promise<void>;
}
```
### SQLite Implementation
```typescript
// src/core/domains/task-management/repositories/sqlite-task.repository.ts
@Injectable()
export class SqliteTaskRepository implements ITaskRepository {
constructor(
@Inject('Database') private db: Database,
@Inject('Logger') private logger: ILogger
) {}
async save(task: Task): Promise<void> {
const sql = `
INSERT OR REPLACE INTO tasks (
id, description, priority, status, assigned_agent_id, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?)
`;
await this.db.run(sql, [
task.id.value,
task.description,
task.priority.value,
task.status.value,
task.assignedAgentId,
task.createdAt.toISOString(),
task.updatedAt.toISOString()
]);
this.logger.debug(`Task saved: ${task.id.value}`);
}
async findById(id: TaskId): Promise<Task | null> {
const sql = 'SELECT * FROM tasks WHERE id = ?';
const row = await this.db.get(sql, [id.value]);
return row ? this.mapRowToTask(row) : null;
}
async findPendingTasks(): Promise<Task[]> {
const sql = 'SELECT * FROM tasks WHERE status = ? ORDER BY priority DESC, created_at ASC';
const rows = await this.db.all(sql, ['pending']);
return rows.map(row => this.mapRowToTask(row));
}
private mapRowToTask(row: any): Task {
return Task.reconstitute({
id: TaskId.fromString(row.id),
description: row.description,
priority: Priority.fromString(row.priority),
status: TaskStatus.fromString(row.status),
assignedAgentId: row.assigned_agent_id,
createdAt: new Date(row.created_at),
updatedAt: new Date(row.updated_at)
});
}
}
```
## Application Layer
### Use Case Implementation
```typescript
// src/core/application/use-cases/assign-task.use-case.ts
@Injectable()
export class AssignTaskUseCase {
constructor(
@Inject('TaskRepository') private taskRepository: ITaskRepository,
@Inject('AgentRepository') private agentRepository: IAgentRepository,
@Inject('DomainEventBus') private eventBus: DomainEventBus,
@Inject('Logger') private logger: ILogger
) {}
async execute(command: AssignTaskCommand): Promise<AssignTaskResult> {
try {
// 1. Validate command
await this.validateCommand(command);
// 2. Load aggregates
const task = await this.taskRepository.findById(command.taskId);
if (!task) {
throw new TaskNotFoundError(command.taskId);
}
const agent = await this.agentRepository.findById(command.agentId);
if (!agent) {
throw new AgentNotFoundError(command.agentId);
}
// 3. Business logic
if (!agent.canAcceptTask(task)) {
throw new AgentCannotAcceptTaskError(command.agentId, command.taskId);
}
task.assignTo(command.agentId);
agent.acceptTask(task.id);
// 4. Persist changes
await Promise.all([
this.taskRepository.save(task),
this.agentRepository.save(agent)
]);
// 5. Publish domain events
const events = [
...task.getUncommittedEvents(),
...agent.getUncommittedEvents()
];
for (const event of events) {
await this.eventBus.publish(event);
}
task.markEventsAsCommitted();
agent.markEventsAsCommitted();
// 6. Return result
this.logger.info(`Task ${command.taskId.value} assigned to agent ${command.agentId}`);
return AssignTaskResult.success({
taskId: task.id,
agentId: command.agentId,
assignedAt: new Date()
});
} catch (error) {
this.logger.error(`Failed to assign task ${command.taskId.value}:`, error);
return AssignTaskResult.failure(error);
}
}
private async validateCommand(command: AssignTaskCommand): Promise<void> {
if (!command.taskId) {
throw new ValidationError('Task ID is required');
}
if (!command.agentId) {
throw new ValidationError('Agent ID is required');
}
}
}
```
## Dependency Injection Setup
### Container Configuration
```typescript
// src/core/shared/infrastructure/dependency-container.ts
import { Container } from 'inversify';
import { TYPES } from './types';
export class DependencyContainer {
private container: Container;
constructor() {
this.container = new Container();
this.setupBindings();
}
private setupBindings(): void {
// Repositories
this.container.bind<ITaskRepository>(TYPES.TaskRepository)
.to(SqliteTaskRepository)
.inSingletonScope();
this.container.bind<IAgentRepository>(TYPES.AgentRepository)
.to(SqliteAgentRepository)
.inSingletonScope();
// Services
this.container.bind<TaskSchedulingService>(TYPES.TaskSchedulingService)
.to(TaskSchedulingService)
.inSingletonScope();
// Use Cases
this.container.bind<AssignTaskUseCase>(TYPES.AssignTaskUseCase)
.to(AssignTaskUseCase)
.inSingletonScope();
// Infrastructure
this.container.bind<ILogger>(TYPES.Logger)
.to(ConsoleLogger)
.inSingletonScope();
this.container.bind<DomainEventBus>(TYPES.DomainEventBus)
.to(InMemoryDomainEventBus)
.inSingletonScope();
}
get<T>(serviceIdentifier: symbol): T {
return this.container.get<T>(serviceIdentifier);
}
bind<T>(serviceIdentifier: symbol): BindingToSyntax<T> {
return this.container.bind<T>(serviceIdentifier);
}
}
```
## Modern TypeScript Configuration
### Strict TypeScript Setup
```json
// tsconfig.json
{
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "NodeNext",
"moduleResolution": "NodeNext",
"declaration": true,
"outDir": "./dist",
"strict": true,
"exactOptionalPropertyTypes": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedIndexedAccess": true,
"noImplicitOverride": true,
"experimentalDecorators": true,
"emitDecoratorMetadata": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"baseUrl": ".",
"paths": {
"@/*": ["src/*"],
"@core/*": ["src/core/*"],
"@shared/*": ["src/core/shared/*"],
"@domains/*": ["src/core/domains/*"]
}
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "**/*.test.ts", "**/*.spec.ts"]
}
```
## Testing Implementation
### Domain Unit Tests
```typescript
// src/core/domains/task-management/__tests__/entities/task.entity.test.ts
describe('Task Entity', () => {
let task: Task;
beforeEach(() => {
task = Task.create('Test task', Priority.medium());
});
describe('creation', () => {
it('should create task with pending status', () => {
expect(task.status.isPending()).toBe(true);
expect(task.description).toBe('Test task');
expect(task.priority.equals(Priority.medium())).toBe(true);
});
it('should generate unique ID', () => {
const task1 = Task.create('Task 1', Priority.low());
const task2 = Task.create('Task 2', Priority.low());
expect(task1.id.equals(task2.id)).toBe(false);
});
});
describe('assignment', () => {
it('should assign to agent and change status', () => {
const agentId = 'agent-123';
task.assignTo(agentId);
expect(task.assignedAgentId).toBe(agentId);
expect(task.status.isAssigned()).toBe(true);
});
it('should emit TaskAssignedEvent when assigned', () => {
const agentId = 'agent-123';
task.assignTo(agentId);
const events = task.getUncommittedEvents();
expect(events).toHaveLength(1);
expect(events[0]).toBeInstanceOf(TaskAssignedEvent);
});
it('should not allow assignment of completed task', () => {
task.assignTo('agent-123');
task.complete(TaskResult.success('done'));
expect(() => task.assignTo('agent-456'))
.toThrow('Cannot assign completed task');
});
});
});
```
### Integration Tests
```typescript
// src/core/domains/task-management/__tests__/integration/task-repository.integration.test.ts
describe('TaskRepository Integration', () => {
let repository: SqliteTaskRepository;
let db: Database;
beforeEach(async () => {
db = new Database(':memory:');
await setupTasksTable(db);
repository = new SqliteTaskRepository(db, new ConsoleLogger());
});
afterEach(async () => {
await db.close();
});
it('should save and retrieve task', async () => {
const task = Task.create('Test task', Priority.high());
await repository.save(task);
const retrieved = await repository.findById(task.id);
expect(retrieved).toBeDefined();
expect(retrieved!.id.equals(task.id)).toBe(true);
expect(retrieved!.description).toBe('Test task');
expect(retrieved!.priority.equals(Priority.high())).toBe(true);
});
it('should find pending tasks ordered by priority', async () => {
const lowTask = Task.create('Low priority', Priority.low());
const highTask = Task.create('High priority', Priority.high());
await repository.save(lowTask);
await repository.save(highTask);
const pending = await repository.findPendingTasks();
expect(pending).toHaveLength(2);
expect(pending[0].id.equals(highTask.id)).toBe(true); // High priority first
expect(pending[1].id.equals(lowTask.id)).toBe(true);
});
});
```
## Performance Optimizations
### Entity Caching
```typescript
// src/core/shared/infrastructure/entity-cache.ts
@Injectable()
export class EntityCache<T extends Entity<any>> {
private cache = new Map<string, { entity: T; timestamp: number }>();
private readonly ttl: number = 300000; // 5 minutes
set(id: string, entity: T): void {
this.cache.set(id, { entity, timestamp: Date.now() });
}
get(id: string): T | null {
const cached = this.cache.get(id);
if (!cached) return null;
// Check TTL
if (Date.now() - cached.timestamp > this.ttl) {
this.cache.delete(id);
return null;
}
return cached.entity;
}
invalidate(id: string): void {
this.cache.delete(id);
}
clear(): void {
this.cache.clear();
}
}
```
## Success Metrics
- [ ] **Domain Isolation**: 100% clean dependency boundaries
- [ ] **Test Coverage**: >90% unit test coverage for domain logic
- [ ] **Type Safety**: Strict TypeScript compilation with zero any types
- [ ] **Performance**: <50ms average use case execution time
- [ ] **Memory Efficiency**: <100MB heap usage for core domains
- [ ] **Plugin Architecture**: Modular domain loading capability
## Related V3 Skills
- `v3-ddd-architecture` - DDD architectural design
- `v3-mcp-optimization` - MCP server integration
- `v3-memory-unification` - AgentDB repository integration
- `v3-swarm-coordination` - Swarm domain implementation
## Usage Examples
### Complete Core Implementation
```bash
# Full core module implementation
Task("Core implementation",
"Implement all core domains with DDD patterns and comprehensive testing",
"core-implementer")
```
### Domain-Specific Implementation
```bash
# Single domain implementation
Task("Task domain implementation",
"Implement task management domain with entities, services, and repositories",
"core-implementer")
```

View File

@ -0,0 +1,442 @@
---
name: "V3 DDD Architecture"
description: "Domain-Driven Design architecture for Codex-flow v3. Implements modular, bounded context architecture with clean separation of concerns and microkernel pattern."
---
# V3 DDD Architecture
## What This Skill Does
Designs and implements Domain-Driven Design (DDD) architecture for Codex-flow v3, decomposing god objects into bounded contexts, implementing clean architecture patterns, and enabling modular, testable code structure.
## Quick Start
```bash
# Initialize DDD architecture analysis
Task("Architecture analysis", "Analyze current architecture and design DDD boundaries", "core-architect")
# Domain modeling (parallel)
Task("Domain decomposition", "Break down orchestrator god object into domains", "core-architect")
Task("Context mapping", "Map bounded contexts and relationships", "core-architect")
Task("Interface design", "Design clean domain interfaces", "core-architect")
```
## DDD Implementation Strategy
### Current Architecture Analysis
```
├── PROBLEMATIC: core/orchestrator.ts (1,440 lines - GOD OBJECT)
│ ├── Task management responsibilities
│ ├── Session management responsibilities
│ ├── Health monitoring responsibilities
│ ├── Lifecycle management responsibilities
│ └── Event coordination responsibilities
└── TARGET: Modular DDD Architecture
├── core/domains/
│ ├── task-management/
│ ├── session-management/
│ ├── health-monitoring/
│ ├── lifecycle-management/
│ └── event-coordination/
└── core/shared/
├── interfaces/
├── value-objects/
└── domain-events/
```
### Domain Boundaries
#### 1. Task Management Domain
```typescript
// core/domains/task-management/
interface TaskManagementDomain {
// Entities
Task: TaskEntity;
TaskQueue: TaskQueueEntity;
// Value Objects
TaskId: TaskIdVO;
TaskStatus: TaskStatusVO;
Priority: PriorityVO;
// Services
TaskScheduler: TaskSchedulingService;
TaskValidator: TaskValidationService;
// Repository
TaskRepository: ITaskRepository;
}
```
#### 2. Session Management Domain
```typescript
// core/domains/session-management/
interface SessionManagementDomain {
// Entities
Session: SessionEntity;
SessionState: SessionStateEntity;
// Value Objects
SessionId: SessionIdVO;
SessionStatus: SessionStatusVO;
// Services
SessionLifecycle: SessionLifecycleService;
SessionPersistence: SessionPersistenceService;
// Repository
SessionRepository: ISessionRepository;
}
```
#### 3. Health Monitoring Domain
```typescript
// core/domains/health-monitoring/
interface HealthMonitoringDomain {
// Entities
HealthCheck: HealthCheckEntity;
Metric: MetricEntity;
// Value Objects
HealthStatus: HealthStatusVO;
Threshold: ThresholdVO;
// Services
HealthCollector: HealthCollectionService;
AlertManager: AlertManagementService;
// Repository
MetricsRepository: IMetricsRepository;
}
```
## Microkernel Architecture Pattern
### Core Kernel
```typescript
// core/kernel/Codex-flow-kernel.ts
export class ClaudeFlowKernel {
private domains: Map<string, Domain> = new Map();
private eventBus: DomainEventBus;
private dependencyContainer: Container;
async initialize(): Promise<void> {
// Load core domains
await this.loadDomain('task-management', new TaskManagementDomain());
await this.loadDomain('session-management', new SessionManagementDomain());
await this.loadDomain('health-monitoring', new HealthMonitoringDomain());
// Wire up domain events
this.setupDomainEventHandlers();
}
async loadDomain(name: string, domain: Domain): Promise<void> {
await domain.initialize(this.dependencyContainer);
this.domains.set(name, domain);
}
getDomain<T extends Domain>(name: string): T {
const domain = this.domains.get(name);
if (!domain) {
throw new DomainNotLoadedError(name);
}
return domain as T;
}
}
```
### Plugin Architecture
```typescript
// core/plugins/
interface DomainPlugin {
name: string;
version: string;
dependencies: string[];
initialize(kernel: ClaudeFlowKernel): Promise<void>;
shutdown(): Promise<void>;
}
// Example: Swarm Coordination Plugin
export class SwarmCoordinationPlugin implements DomainPlugin {
name = 'swarm-coordination';
version = '3.0.0';
dependencies = ['task-management', 'session-management'];
async initialize(kernel: ClaudeFlowKernel): Promise<void> {
const taskDomain = kernel.getDomain<TaskManagementDomain>('task-management');
const sessionDomain = kernel.getDomain<SessionManagementDomain>('session-management');
// Register swarm coordination services
this.swarmCoordinator = new UnifiedSwarmCoordinator(taskDomain, sessionDomain);
kernel.registerService('swarm-coordinator', this.swarmCoordinator);
}
}
```
## Domain Events & Integration
### Event-Driven Communication
```typescript
// core/shared/domain-events/
abstract class DomainEvent {
public readonly eventId: string;
public readonly aggregateId: string;
public readonly occurredOn: Date;
public readonly eventVersion: number;
constructor(aggregateId: string) {
this.eventId = crypto.randomUUID();
this.aggregateId = aggregateId;
this.occurredOn = new Date();
this.eventVersion = 1;
}
}
// Task domain events
export class TaskAssignedEvent extends DomainEvent {
constructor(
taskId: string,
public readonly agentId: string,
public readonly priority: Priority
) {
super(taskId);
}
}
export class TaskCompletedEvent extends DomainEvent {
constructor(
taskId: string,
public readonly result: TaskResult,
public readonly duration: number
) {
super(taskId);
}
}
// Event handlers
@EventHandler(TaskCompletedEvent)
export class TaskCompletedHandler {
constructor(
private metricsRepository: IMetricsRepository,
private sessionService: SessionLifecycleService
) {}
async handle(event: TaskCompletedEvent): Promise<void> {
// Update metrics
await this.metricsRepository.recordTaskCompletion(
event.aggregateId,
event.duration
);
// Update session state
await this.sessionService.markTaskCompleted(
event.aggregateId,
event.result
);
}
}
```
## Clean Architecture Layers
```typescript
// Architecture layers
┌─────────────────────────────────────────┐
│ Presentation │ ← CLI, API, UI
├─────────────────────────────────────────┤
│ Application │ ← Use Cases, Commands
├─────────────────────────────────────────┤
│ Domain │ ← Entities, Services, Events
├─────────────────────────────────────────┤
│ Infrastructure │ ← DB, MCP, External APIs
└─────────────────────────────────────────┘
// Dependency direction: Outside → Inside
// Domain layer has NO external dependencies
```
### Application Layer (Use Cases)
```typescript
// core/application/use-cases/
export class AssignTaskUseCase {
constructor(
private taskRepository: ITaskRepository,
private agentRepository: IAgentRepository,
private eventBus: DomainEventBus
) {}
async execute(command: AssignTaskCommand): Promise<TaskResult> {
// 1. Validate command
await this.validateCommand(command);
// 2. Load aggregates
const task = await this.taskRepository.findById(command.taskId);
const agent = await this.agentRepository.findById(command.agentId);
// 3. Business logic (in domain)
task.assignTo(agent);
// 4. Persist changes
await this.taskRepository.save(task);
// 5. Publish domain events
task.getUncommittedEvents().forEach(event =>
this.eventBus.publish(event)
);
// 6. Return result
return TaskResult.success(task);
}
}
```
## Module Configuration
### Bounded Context Modules
```typescript
// core/domains/task-management/module.ts
export const taskManagementModule = {
name: 'task-management',
entities: [
TaskEntity,
TaskQueueEntity
],
valueObjects: [
TaskIdVO,
TaskStatusVO,
PriorityVO
],
services: [
TaskSchedulingService,
TaskValidationService
],
repositories: [
{ provide: ITaskRepository, useClass: SqliteTaskRepository }
],
eventHandlers: [
TaskAssignedHandler,
TaskCompletedHandler
]
};
```
## Migration Strategy
### Phase 1: Extract Domain Services
```typescript
// Extract services from orchestrator.ts
const extractionPlan = {
week1: [
'TaskManager → task-management domain',
'SessionManager → session-management domain'
],
week2: [
'HealthMonitor → health-monitoring domain',
'LifecycleManager → lifecycle-management domain'
],
week3: [
'EventCoordinator → event-coordination domain',
'Wire up domain events'
]
};
```
### Phase 2: Implement Clean Interfaces
```typescript
// Clean separation with dependency injection
export class TaskController {
constructor(
@Inject('AssignTaskUseCase') private assignTask: AssignTaskUseCase,
@Inject('CompleteTaskUseCase') private completeTask: CompleteTaskUseCase
) {}
async assign(request: AssignTaskRequest): Promise<TaskResponse> {
const command = AssignTaskCommand.fromRequest(request);
const result = await this.assignTask.execute(command);
return TaskResponse.fromResult(result);
}
}
```
### Phase 3: Plugin System
```typescript
// Enable plugin-based extensions
const pluginSystem = {
core: ['task-management', 'session-management', 'health-monitoring'],
optional: ['swarm-coordination', 'learning-integration', 'performance-monitoring']
};
```
## Testing Strategy
### Domain Testing (London School TDD)
```typescript
// Pure domain logic testing
describe('Task Entity', () => {
let task: TaskEntity;
let mockAgent: jest.Mocked<AgentEntity>;
beforeEach(() => {
task = new TaskEntity(TaskId.create(), 'Test task');
mockAgent = createMock<AgentEntity>();
});
it('should assign to agent when valid', () => {
mockAgent.canAcceptTask.mockReturnValue(true);
task.assignTo(mockAgent);
expect(task.assignedAgent).toBe(mockAgent);
expect(task.status.value).toBe('assigned');
});
it('should emit TaskAssignedEvent when assigned', () => {
mockAgent.canAcceptTask.mockReturnValue(true);
task.assignTo(mockAgent);
const events = task.getUncommittedEvents();
expect(events).toHaveLength(1);
expect(events[0]).toBeInstanceOf(TaskAssignedEvent);
});
});
```
## Success Metrics
- [ ] **God Object Elimination**: orchestrator.ts (1,440 lines) → 5 focused domains (<300 lines each)
- [ ] **Bounded Context Isolation**: 100% domain independence
- [ ] **Plugin Architecture**: Core + optional modules loading
- [ ] **Clean Architecture**: Dependency inversion maintained
- [ ] **Event-Driven Communication**: Loose coupling between domains
- [ ] **Test Coverage**: >90% domain logic coverage
## Related V3 Skills
- `v3-core-implementation` - Implementation of DDD domains
- `v3-memory-unification` - AgentDB integration within bounded contexts
- `v3-swarm-coordination` - Swarm coordination as domain plugin
- `v3-performance-optimization` - Performance optimization across domains
## Usage Examples
### Complete Domain Extraction
```bash
# Full DDD architecture implementation
Task("DDD architecture implementation",
"Extract orchestrator into DDD domains with clean architecture",
"core-architect")
```
### Plugin Development
```bash
# Create domain plugin
npm run create:plugin -- --name swarm-coordination --template domain
```

View File

@ -0,0 +1,241 @@
---
name: "V3 Deep Integration"
description: "Deep agentic-flow@alpha integration implementing ADR-001. Eliminates 10,000+ duplicate lines by building Codex-flow as specialized extension rather than parallel implementation."
---
# V3 Deep Integration
## What This Skill Does
Transforms Codex-flow from parallel implementation to specialized extension of agentic-flow@alpha, eliminating massive code duplication while achieving performance improvements and feature parity.
## Quick Start
```bash
# Initialize deep integration
Task("Integration architecture", "Design agentic-flow@alpha adapter layer", "v3-integration-architect")
# Feature integration (parallel)
Task("SONA integration", "Integrate 5 SONA learning modes", "v3-integration-architect")
Task("Flash Attention", "Implement 2.49x-7.47x speedup", "v3-integration-architect")
Task("AgentDB coordination", "Setup 150x-12,500x search", "v3-integration-architect")
```
## Code Deduplication Strategy
### Current Overlap → Integration
```
┌─────────────────────────────────────────┐
│ Codex-flow agentic-flow │
├─────────────────────────────────────────┤
│ SwarmCoordinator → Swarm System │ 80% overlap (eliminate)
│ AgentManager → Agent Lifecycle │ 70% overlap (eliminate)
│ TaskScheduler → Task Execution │ 60% overlap (eliminate)
│ SessionManager → Session Mgmt │ 50% overlap (eliminate)
└─────────────────────────────────────────┘
TARGET: <5,000 lines (vs 15,000+ currently)
```
## agentic-flow@alpha Feature Integration
### SONA Learning Modes
```typescript
class SONAIntegration {
async initializeMode(mode: SONAMode): Promise<void> {
switch(mode) {
case 'real-time': // ~0.05ms adaptation
case 'balanced': // general purpose
case 'research': // deep exploration
case 'edge': // resource-constrained
case 'batch': // high-throughput
}
await this.agenticFlow.sona.setMode(mode);
}
}
```
### Flash Attention Integration
```typescript
class FlashAttentionIntegration {
async optimizeAttention(): Promise<AttentionResult> {
return this.agenticFlow.attention.flashAttention({
speedupTarget: '2.49x-7.47x',
memoryReduction: '50-75%',
mechanisms: ['multi-head', 'linear', 'local', 'global']
});
}
}
```
### AgentDB Coordination
```typescript
class AgentDBIntegration {
async setupCrossAgentMemory(): Promise<void> {
await this.agentdb.enableCrossAgentSharing({
indexType: 'HNSW',
speedupTarget: '150x-12500x',
dimensions: 1536
});
}
}
```
### MCP Tools Integration
```typescript
class MCPToolsIntegration {
async integrateBuiltinTools(): Promise<void> {
// Leverage 213 pre-built tools
const tools = await this.agenticFlow.mcp.getAvailableTools();
await this.registerClaudeFlowSpecificTools(tools);
// Use 19 hook types
const hookTypes = await this.agenticFlow.hooks.getTypes();
await this.configureClaudeFlowHooks(hookTypes);
}
}
```
## Migration Implementation
### Phase 1: Adapter Layer
```typescript
import { Agent as AgenticFlowAgent } from 'agentic-flow@alpha';
export class ClaudeFlowAgent extends AgenticFlowAgent {
async handleClaudeFlowTask(task: ClaudeTask): Promise<TaskResult> {
return this.executeWithSONA(task);
}
// Backward compatibility
async legacyCompatibilityLayer(oldAPI: any): Promise<any> {
return this.adaptToNewAPI(oldAPI);
}
}
```
### Phase 2: System Migration
```typescript
class SystemMigration {
async migrateSwarmCoordination(): Promise<void> {
// Replace SwarmCoordinator (800+ lines) with agentic-flow Swarm
const swarmConfig = await this.extractSwarmConfig();
await this.agenticFlow.swarm.initialize(swarmConfig);
}
async migrateAgentManagement(): Promise<void> {
// Replace AgentManager (1,736+ lines) with agentic-flow lifecycle
const agents = await this.extractActiveAgents();
for (const agent of agents) {
await this.agenticFlow.agent.create(agent);
}
}
async migrateTaskExecution(): Promise<void> {
// Replace TaskScheduler with agentic-flow task graph
const tasks = await this.extractTasks();
await this.agenticFlow.task.executeGraph(this.buildTaskGraph(tasks));
}
}
```
### Phase 3: Cleanup
```typescript
class CodeCleanup {
async removeDeprecatedCode(): Promise<void> {
// Remove massive duplicate implementations
await this.removeFile('src/core/SwarmCoordinator.ts'); // 800+ lines
await this.removeFile('src/agents/AgentManager.ts'); // 1,736+ lines
await this.removeFile('src/task/TaskScheduler.ts'); // 500+ lines
// Total reduction: 10,000+ → <5,000 lines
}
}
```
## RL Algorithm Integration
```typescript
class RLIntegration {
algorithms = [
'PPO', 'DQN', 'A2C', 'MCTS', 'Q-Learning',
'SARSA', 'Actor-Critic', 'Decision-Transformer'
];
async optimizeAgentBehavior(): Promise<void> {
for (const algorithm of this.algorithms) {
await this.agenticFlow.rl.train(algorithm, {
episodes: 1000,
rewardFunction: this.claudeFlowRewardFunction
});
}
}
}
```
## Performance Integration
### Flash Attention Targets
```typescript
const attentionBenchmark = {
baseline: 'current attention mechanism',
target: '2.49x-7.47x improvement',
memoryReduction: '50-75%',
implementation: 'agentic-flow@alpha Flash Attention'
};
```
### AgentDB Search Performance
```typescript
const searchBenchmark = {
baseline: 'linear search in current systems',
target: '150x-12,500x via HNSW indexing',
implementation: 'agentic-flow@alpha AgentDB'
};
```
## Backward Compatibility
### Gradual Migration
```typescript
class BackwardCompatibility {
// Phase 1: Dual operation
async enableDualOperation(): Promise<void> {
this.oldSystem.continue();
this.newSystem.initialize();
this.syncState(this.oldSystem, this.newSystem);
}
// Phase 2: Feature-by-feature migration
async migrateGradually(): Promise<void> {
const features = this.getAllFeatures();
for (const feature of features) {
await this.migrateFeature(feature);
await this.validateFeatureParity(feature);
}
}
// Phase 3: Complete transition
async completeTransition(): Promise<void> {
await this.validateFullParity();
await this.deprecateOldSystem();
}
}
```
## Success Metrics
- **Code Reduction**: <5,000 lines orchestration (vs 15,000+)
- **Performance**: 2.49x-7.47x Flash Attention speedup
- **Search**: 150x-12,500x AgentDB improvement
- **Memory**: 50-75% usage reduction
- **Feature Parity**: 100% v2 functionality maintained
- **SONA**: <0.05ms adaptation time
- **Integration**: All 213 MCP tools + 19 hook types available
## Related V3 Skills
- `v3-memory-unification` - Memory system integration
- `v3-performance-optimization` - Performance target validation
- `v3-swarm-coordination` - Swarm system migration
- `v3-security-overhaul` - Secure integration patterns

View File

@ -0,0 +1,777 @@
---
name: "V3 MCP Optimization"
description: "MCP server optimization and transport layer enhancement for Codex-flow v3. Implements connection pooling, load balancing, tool registry optimization, and performance monitoring for sub-100ms response times."
---
# V3 MCP Optimization
## What This Skill Does
Optimizes Codex-flow v3 MCP (Model Context Protocol) server implementation with advanced transport layer optimizations, connection pooling, load balancing, and comprehensive performance monitoring to achieve sub-100ms response times.
## Quick Start
```bash
# Initialize MCP optimization analysis
Task("MCP architecture", "Analyze current MCP server performance and bottlenecks", "mcp-specialist")
# Optimization implementation (parallel)
Task("Connection pooling", "Implement MCP connection pooling and reuse", "mcp-specialist")
Task("Load balancing", "Add dynamic load balancing for MCP tools", "mcp-specialist")
Task("Transport optimization", "Optimize transport layer performance", "mcp-specialist")
```
## MCP Performance Architecture
### Current State Analysis
```
Current MCP Issues:
├── Cold Start Latency: ~1.8s MCP server init
├── Connection Overhead: New connection per request
├── Tool Registry: Linear search O(n) for 213+ tools
├── Transport Layer: No connection reuse
└── Memory Usage: No cleanup of idle connections
Target Performance:
├── Startup Time: <400ms (4.5x improvement)
├── Tool Lookup: <5ms (O(1) hash table)
├── Connection Reuse: 90%+ connection pool hits
├── Response Time: <100ms p95
└── Memory Efficiency: 50% reduction
```
### MCP Server Architecture
```typescript
// src/core/mcp/mcp-server.ts
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
interface OptimizedMCPConfig {
// Connection pooling
maxConnections: number;
idleTimeoutMs: number;
connectionReuseEnabled: boolean;
// Tool registry
toolCacheEnabled: boolean;
toolIndexType: 'hash' | 'trie';
// Performance
requestTimeoutMs: number;
batchingEnabled: boolean;
compressionEnabled: boolean;
// Monitoring
metricsEnabled: boolean;
healthCheckIntervalMs: number;
}
export class OptimizedMCPServer {
private server: Server;
private connectionPool: ConnectionPool;
private toolRegistry: FastToolRegistry;
private loadBalancer: MCPLoadBalancer;
private metrics: MCPMetrics;
constructor(config: OptimizedMCPConfig) {
this.server = new Server({
name: 'Codex-flow-v3',
version: '3.0.0'
}, {
capabilities: {
tools: { listChanged: true },
resources: { subscribe: true, listChanged: true },
prompts: { listChanged: true }
}
});
this.connectionPool = new ConnectionPool(config);
this.toolRegistry = new FastToolRegistry(config.toolIndexType);
this.loadBalancer = new MCPLoadBalancer();
this.metrics = new MCPMetrics(config.metricsEnabled);
}
async start(): Promise<void> {
// Pre-warm connection pool
await this.connectionPool.preWarm();
// Pre-build tool index
await this.toolRegistry.buildIndex();
// Setup request handlers with optimizations
this.setupOptimizedHandlers();
// Start health monitoring
this.startHealthMonitoring();
// Start server
const transport = new StdioServerTransport();
await this.server.connect(transport);
this.metrics.recordStartup();
}
}
```
## Connection Pool Implementation
### Advanced Connection Pooling
```typescript
// src/core/mcp/connection-pool.ts
interface PooledConnection {
id: string;
connection: MCPConnection;
lastUsed: number;
usageCount: number;
isHealthy: boolean;
}
export class ConnectionPool {
private pool: Map<string, PooledConnection> = new Map();
private readonly config: ConnectionPoolConfig;
private healthChecker: HealthChecker;
constructor(config: ConnectionPoolConfig) {
this.config = {
maxConnections: 50,
minConnections: 5,
idleTimeoutMs: 300000, // 5 minutes
maxUsageCount: 1000,
healthCheckIntervalMs: 30000,
...config
};
this.healthChecker = new HealthChecker(this.config.healthCheckIntervalMs);
}
async getConnection(endpoint: string): Promise<MCPConnection> {
const start = performance.now();
// Try to get from pool first
const pooled = this.findAvailableConnection(endpoint);
if (pooled) {
pooled.lastUsed = Date.now();
pooled.usageCount++;
this.recordMetric('pool_hit', performance.now() - start);
return pooled.connection;
}
// Check pool capacity
if (this.pool.size >= this.config.maxConnections) {
await this.evictLeastUsedConnection();
}
// Create new connection
const connection = await this.createConnection(endpoint);
const pooledConn: PooledConnection = {
id: this.generateConnectionId(),
connection,
lastUsed: Date.now(),
usageCount: 1,
isHealthy: true
};
this.pool.set(pooledConn.id, pooledConn);
this.recordMetric('pool_miss', performance.now() - start);
return connection;
}
async releaseConnection(connection: MCPConnection): Promise<void> {
// Mark connection as available for reuse
const pooled = this.findConnectionById(connection.id);
if (pooled) {
// Check if connection should be retired
if (pooled.usageCount >= this.config.maxUsageCount) {
await this.removeConnection(pooled.id);
}
}
}
async preWarm(): Promise<void> {
const connections: Promise<MCPConnection>[] = [];
for (let i = 0; i < this.config.minConnections; i++) {
connections.push(this.createConnection('default'));
}
await Promise.all(connections);
}
private async evictLeastUsedConnection(): Promise<void> {
let oldestConn: PooledConnection | null = null;
let oldestTime = Date.now();
for (const conn of this.pool.values()) {
if (conn.lastUsed < oldestTime) {
oldestTime = conn.lastUsed;
oldestConn = conn;
}
}
if (oldestConn) {
await this.removeConnection(oldestConn.id);
}
}
private findAvailableConnection(endpoint: string): PooledConnection | null {
for (const conn of this.pool.values()) {
if (conn.isHealthy &&
conn.connection.endpoint === endpoint &&
Date.now() - conn.lastUsed < this.config.idleTimeoutMs) {
return conn;
}
}
return null;
}
}
```
## Fast Tool Registry
### O(1) Tool Lookup Implementation
```typescript
// src/core/mcp/fast-tool-registry.ts
interface ToolIndexEntry {
name: string;
handler: ToolHandler;
metadata: ToolMetadata;
usageCount: number;
avgLatencyMs: number;
}
export class FastToolRegistry {
private toolIndex: Map<string, ToolIndexEntry> = new Map();
private categoryIndex: Map<string, string[]> = new Map();
private fuzzyMatcher: FuzzyMatcher;
private cache: LRUCache<string, ToolIndexEntry>;
constructor(indexType: 'hash' | 'trie' = 'hash') {
this.fuzzyMatcher = new FuzzyMatcher();
this.cache = new LRUCache<string, ToolIndexEntry>(1000); // Cache 1000 most used tools
}
async buildIndex(): Promise<void> {
const start = performance.now();
// Load all available tools
const tools = await this.loadAllTools();
// Build hash index for O(1) lookup
for (const tool of tools) {
const entry: ToolIndexEntry = {
name: tool.name,
handler: tool.handler,
metadata: tool.metadata,
usageCount: 0,
avgLatencyMs: 0
};
this.toolIndex.set(tool.name, entry);
// Build category index
const category = tool.metadata.category || 'general';
if (!this.categoryIndex.has(category)) {
this.categoryIndex.set(category, []);
}
this.categoryIndex.get(category)!.push(tool.name);
}
// Build fuzzy search index
await this.fuzzyMatcher.buildIndex(tools.map(t => t.name));
console.log(`Tool index built in ${(performance.now() - start).toFixed(2)}ms for ${tools.length} tools`);
}
findTool(name: string): ToolIndexEntry | null {
// Try cache first
const cached = this.cache.get(name);
if (cached) return cached;
// Try exact match
const exact = this.toolIndex.get(name);
if (exact) {
this.cache.set(name, exact);
return exact;
}
// Try fuzzy match
const fuzzyMatches = this.fuzzyMatcher.search(name, 1);
if (fuzzyMatches.length > 0) {
const match = this.toolIndex.get(fuzzyMatches[0]);
if (match) {
this.cache.set(name, match);
return match;
}
}
return null;
}
findToolsByCategory(category: string): ToolIndexEntry[] {
const toolNames = this.categoryIndex.get(category) || [];
return toolNames
.map(name => this.toolIndex.get(name))
.filter(entry => entry !== undefined) as ToolIndexEntry[];
}
getMostUsedTools(limit: number = 10): ToolIndexEntry[] {
return Array.from(this.toolIndex.values())
.sort((a, b) => b.usageCount - a.usageCount)
.slice(0, limit);
}
recordToolUsage(toolName: string, latencyMs: number): void {
const entry = this.toolIndex.get(toolName);
if (entry) {
entry.usageCount++;
// Moving average for latency
entry.avgLatencyMs = (entry.avgLatencyMs + latencyMs) / 2;
}
}
}
```
## Load Balancing & Request Distribution
### Intelligent Load Balancer
```typescript
// src/core/mcp/load-balancer.ts
interface ServerInstance {
id: string;
endpoint: string;
load: number;
responseTime: number;
isHealthy: boolean;
maxConnections: number;
currentConnections: number;
}
export class MCPLoadBalancer {
private servers: Map<string, ServerInstance> = new Map();
private routingStrategy: RoutingStrategy = 'least-connections';
addServer(server: ServerInstance): void {
this.servers.set(server.id, server);
}
selectServer(toolCategory?: string): ServerInstance | null {
const healthyServers = Array.from(this.servers.values())
.filter(server => server.isHealthy);
if (healthyServers.length === 0) return null;
switch (this.routingStrategy) {
case 'round-robin':
return this.roundRobinSelection(healthyServers);
case 'least-connections':
return this.leastConnectionsSelection(healthyServers);
case 'response-time':
return this.responseTimeSelection(healthyServers);
case 'weighted':
return this.weightedSelection(healthyServers, toolCategory);
default:
return healthyServers[0];
}
}
private leastConnectionsSelection(servers: ServerInstance[]): ServerInstance {
return servers.reduce((least, current) =>
current.currentConnections < least.currentConnections ? current : least
);
}
private responseTimeSelection(servers: ServerInstance[]): ServerInstance {
return servers.reduce((fastest, current) =>
current.responseTime < fastest.responseTime ? current : fastest
);
}
private weightedSelection(servers: ServerInstance[], category?: string): ServerInstance {
// Prefer servers with lower load and better response time
const scored = servers.map(server => ({
server,
score: this.calculateServerScore(server, category)
}));
scored.sort((a, b) => b.score - a.score);
return scored[0].server;
}
private calculateServerScore(server: ServerInstance, category?: string): number {
const loadFactor = 1 - (server.currentConnections / server.maxConnections);
const responseFactor = 1 / (server.responseTime + 1);
const categoryBonus = this.getCategoryBonus(server, category);
return loadFactor * 0.4 + responseFactor * 0.4 + categoryBonus * 0.2;
}
updateServerMetrics(serverId: string, metrics: Partial<ServerInstance>): void {
const server = this.servers.get(serverId);
if (server) {
Object.assign(server, metrics);
}
}
}
```
## Transport Layer Optimization
### High-Performance Transport
```typescript
// src/core/mcp/optimized-transport.ts
export class OptimizedTransport {
private compression: boolean = true;
private batching: boolean = true;
private batchBuffer: MCPMessage[] = [];
private batchTimeout: NodeJS.Timeout | null = null;
constructor(private config: TransportConfig) {}
async send(message: MCPMessage): Promise<void> {
if (this.batching && this.canBatch(message)) {
this.addToBatch(message);
return;
}
await this.sendImmediate(message);
}
private async sendImmediate(message: MCPMessage): Promise<void> {
const start = performance.now();
// Compress if enabled
const payload = this.compression
? await this.compress(message)
: message;
// Send through transport
await this.transport.send(payload);
// Record metrics
this.recordLatency(performance.now() - start);
}
private addToBatch(message: MCPMessage): void {
this.batchBuffer.push(message);
// Start batch timeout if not already running
if (!this.batchTimeout) {
this.batchTimeout = setTimeout(
() => this.flushBatch(),
this.config.batchTimeoutMs || 10
);
}
// Flush if batch is full
if (this.batchBuffer.length >= this.config.maxBatchSize) {
this.flushBatch();
}
}
private async flushBatch(): Promise<void> {
if (this.batchBuffer.length === 0) return;
const batch = this.batchBuffer.splice(0);
this.batchTimeout = null;
// Send as single batched message
await this.sendImmediate({
type: 'batch',
messages: batch
});
}
private canBatch(message: MCPMessage): boolean {
// Don't batch urgent messages or responses
return message.type !== 'response' &&
message.priority !== 'high' &&
message.type !== 'error';
}
private async compress(data: any): Promise<Buffer> {
// Use fast compression for smaller messages
return gzipSync(JSON.stringify(data));
}
}
```
## Performance Monitoring
### Real-time MCP Metrics
```typescript
// src/core/mcp/metrics.ts
interface MCPMetrics {
requestCount: number;
errorCount: number;
avgResponseTime: number;
p95ResponseTime: number;
connectionPoolHits: number;
connectionPoolMisses: number;
toolLookupTime: number;
startupTime: number;
}
export class MCPMetricsCollector {
private metrics: MCPMetrics;
private responseTimeBuffer: number[] = [];
private readonly bufferSize = 1000;
constructor() {
this.metrics = this.createInitialMetrics();
}
recordRequest(latencyMs: number): void {
this.metrics.requestCount++;
this.updateResponseTimes(latencyMs);
}
recordError(): void {
this.metrics.errorCount++;
}
recordConnectionPoolHit(): void {
this.metrics.connectionPoolHits++;
}
recordConnectionPoolMiss(): void {
this.metrics.connectionPoolMisses++;
}
recordToolLookup(latencyMs: number): void {
this.metrics.toolLookupTime = this.updateMovingAverage(
this.metrics.toolLookupTime,
latencyMs
);
}
recordStartup(latencyMs: number): void {
this.metrics.startupTime = latencyMs;
}
getMetrics(): MCPMetrics {
return { ...this.metrics };
}
getHealthStatus(): HealthStatus {
const errorRate = this.metrics.errorCount / this.metrics.requestCount;
const poolHitRate = this.metrics.connectionPoolHits /
(this.metrics.connectionPoolHits + this.metrics.connectionPoolMisses);
return {
status: this.determineHealthStatus(errorRate, poolHitRate),
errorRate,
poolHitRate,
avgResponseTime: this.metrics.avgResponseTime,
p95ResponseTime: this.metrics.p95ResponseTime
};
}
private updateResponseTimes(latency: number): void {
this.responseTimeBuffer.push(latency);
if (this.responseTimeBuffer.length > this.bufferSize) {
this.responseTimeBuffer.shift();
}
this.metrics.avgResponseTime = this.calculateAverage(this.responseTimeBuffer);
this.metrics.p95ResponseTime = this.calculatePercentile(this.responseTimeBuffer, 95);
}
private calculatePercentile(arr: number[], percentile: number): number {
const sorted = arr.slice().sort((a, b) => a - b);
const index = Math.ceil((percentile / 100) * sorted.length) - 1;
return sorted[index] || 0;
}
private determineHealthStatus(errorRate: number, poolHitRate: number): 'healthy' | 'warning' | 'critical' {
if (errorRate > 0.1 || poolHitRate < 0.5) return 'critical';
if (errorRate > 0.05 || poolHitRate < 0.7) return 'warning';
return 'healthy';
}
}
```
## Tool Registry Optimization
### Pre-compiled Tool Index
```typescript
// src/core/mcp/tool-precompiler.ts
export class ToolPrecompiler {
async precompileTools(): Promise<CompiledToolRegistry> {
const tools = await this.loadAllTools();
// Create optimized lookup structures
const nameIndex = new Map<string, Tool>();
const categoryIndex = new Map<string, Tool[]>();
const fuzzyIndex = new Map<string, string[]>();
for (const tool of tools) {
// Exact name index
nameIndex.set(tool.name, tool);
// Category index
const category = tool.metadata.category || 'general';
if (!categoryIndex.has(category)) {
categoryIndex.set(category, []);
}
categoryIndex.get(category)!.push(tool);
// Pre-compute fuzzy variations
const variations = this.generateFuzzyVariations(tool.name);
for (const variation of variations) {
if (!fuzzyIndex.has(variation)) {
fuzzyIndex.set(variation, []);
}
fuzzyIndex.get(variation)!.push(tool.name);
}
}
return {
nameIndex,
categoryIndex,
fuzzyIndex,
totalTools: tools.length,
compiledAt: new Date()
};
}
private generateFuzzyVariations(name: string): string[] {
const variations: string[] = [];
// Common typos and abbreviations
variations.push(name.toLowerCase());
variations.push(name.replace(/[-_]/g, ''));
variations.push(name.replace(/[aeiou]/gi, '')); // Consonants only
// Add more fuzzy matching logic as needed
return variations;
}
}
```
## Advanced Caching Strategy
### Multi-Level Caching
```typescript
// src/core/mcp/multi-level-cache.ts
export class MultiLevelCache {
private l1Cache: Map<string, any> = new Map(); // In-memory, fastest
private l2Cache: LRUCache<string, any>; // LRU cache, larger capacity
private l3Cache: DiskCache; // Persistent disk cache
constructor(config: CacheConfig) {
this.l2Cache = new LRUCache<string, any>({
max: config.l2MaxEntries || 10000,
ttl: config.l2TTL || 300000 // 5 minutes
});
this.l3Cache = new DiskCache(config.l3Path || './.cache/mcp');
}
async get(key: string): Promise<any | null> {
// Try L1 cache first (fastest)
if (this.l1Cache.has(key)) {
return this.l1Cache.get(key);
}
// Try L2 cache
const l2Value = this.l2Cache.get(key);
if (l2Value) {
// Promote to L1
this.l1Cache.set(key, l2Value);
return l2Value;
}
// Try L3 cache (disk)
const l3Value = await this.l3Cache.get(key);
if (l3Value) {
// Promote to L2 and L1
this.l2Cache.set(key, l3Value);
this.l1Cache.set(key, l3Value);
return l3Value;
}
return null;
}
async set(key: string, value: any, options?: CacheOptions): Promise<void> {
// Set in all levels
this.l1Cache.set(key, value);
this.l2Cache.set(key, value);
if (options?.persistent) {
await this.l3Cache.set(key, value);
}
// Manage L1 cache size
if (this.l1Cache.size > 1000) {
const firstKey = this.l1Cache.keys().next().value;
this.l1Cache.delete(firstKey);
}
}
}
```
## Success Metrics
### Performance Targets
- [ ] **Startup Time**: <400ms MCP server initialization (4.5x improvement)
- [ ] **Response Time**: <100ms p95 for tool execution
- [ ] **Tool Lookup**: <5ms average lookup time
- [ ] **Connection Pool**: >90% hit rate
- [ ] **Memory Usage**: 50% reduction in idle memory
- [ ] **Error Rate**: <1% failed requests
- [ ] **Throughput**: >1000 requests/second
### Monitoring Dashboards
```typescript
const mcpDashboard = {
metrics: [
'Request latency (p50, p95, p99)',
'Error rate by tool category',
'Connection pool utilization',
'Tool lookup performance',
'Memory usage trends',
'Cache hit rates (L1, L2, L3)'
],
alerts: [
'Response time >200ms for 5 minutes',
'Error rate >5% for 1 minute',
'Pool hit rate <70% for 10 minutes',
'Memory usage >500MB for 5 minutes'
]
};
```
## Related V3 Skills
- `v3-core-implementation` - Core domain integration with MCP
- `v3-performance-optimization` - Overall performance optimization
- `v3-swarm-coordination` - MCP integration with swarm coordination
- `v3-memory-unification` - Memory sharing via MCP tools
## Usage Examples
### Complete MCP Optimization
```bash
# Full MCP server optimization
Task("MCP optimization implementation",
"Implement all MCP performance optimizations with monitoring",
"mcp-specialist")
```
### Specific Optimization
```bash
# Connection pool optimization
Task("MCP connection pooling",
"Implement advanced connection pooling with health monitoring",
"mcp-specialist")
```

View File

@ -0,0 +1,174 @@
---
name: "V3 Memory Unification"
description: "Unify 6+ memory systems into AgentDB with HNSW indexing for 150x-12,500x search improvements. Implements ADR-006 (Unified Memory Service) and ADR-009 (Hybrid Memory Backend)."
---
# V3 Memory Unification
## What This Skill Does
Consolidates disparate memory systems into unified AgentDB backend with HNSW vector search, achieving 150x-12,500x search performance improvements while maintaining backward compatibility.
## Quick Start
```bash
# Initialize memory unification
Task("Memory architecture", "Design AgentDB unification strategy", "v3-memory-specialist")
# AgentDB integration
Task("AgentDB setup", "Configure HNSW indexing and vector search", "v3-memory-specialist")
# Data migration
Task("Memory migration", "Migrate SQLite/Markdown to AgentDB", "v3-memory-specialist")
```
## Systems to Unify
### Legacy Systems → AgentDB
```
┌─────────────────────────────────────────┐
│ • MemoryManager (basic operations) │
│ • DistributedMemorySystem (clustering) │
│ • SwarmMemory (agent-specific) │
│ • AdvancedMemoryManager (features) │
│ • SQLiteBackend (structured) │
│ • MarkdownBackend (file-based) │
│ • HybridBackend (combination) │
└─────────────────────────────────────────┘
┌─────────────────────────────────────────┐
│ 🚀 AgentDB with HNSW │
│ • 150x-12,500x faster search │
│ • Unified query interface │
│ • Cross-agent memory sharing │
│ • SONA learning integration │
└─────────────────────────────────────────┘
```
## Implementation Architecture
### Unified Memory Service
```typescript
class UnifiedMemoryService implements IMemoryBackend {
constructor(
private agentdb: AgentDBAdapter,
private indexer: HNSWIndexer,
private migrator: DataMigrator
) {}
async store(entry: MemoryEntry): Promise<void> {
await this.agentdb.store(entry);
await this.indexer.index(entry);
}
async query(query: MemoryQuery): Promise<MemoryEntry[]> {
if (query.semantic) {
return this.indexer.search(query); // 150x-12,500x faster
}
return this.agentdb.query(query);
}
}
```
### HNSW Vector Search
```typescript
class HNSWIndexer {
constructor(dimensions: number = 1536) {
this.index = new HNSWIndex({
dimensions,
efConstruction: 200,
M: 16,
speedupTarget: '150x-12500x'
});
}
async search(query: MemoryQuery): Promise<MemoryEntry[]> {
const embedding = await this.embedContent(query.content);
const results = this.index.search(embedding, query.limit || 10);
return this.retrieveEntries(results);
}
}
```
## Migration Strategy
### Phase 1: Foundation
```typescript
// AgentDB adapter setup
const agentdb = new AgentDBAdapter({
dimensions: 1536,
indexType: 'HNSW',
speedupTarget: '150x-12500x'
});
```
### Phase 2: Data Migration
```typescript
// SQLite → AgentDB
const migrateFromSQLite = async () => {
const entries = await sqlite.getAll();
for (const entry of entries) {
const embedding = await generateEmbedding(entry.content);
await agentdb.store({ ...entry, embedding });
}
};
// Markdown → AgentDB
const migrateFromMarkdown = async () => {
const files = await glob('**/*.md');
for (const file of files) {
const content = await fs.readFile(file, 'utf-8');
await agentdb.store({
id: generateId(),
content,
embedding: await generateEmbedding(content),
metadata: { originalFile: file }
});
}
};
```
## SONA Integration
### Learning Pattern Storage
```typescript
class SONAMemoryIntegration {
async storePattern(pattern: LearningPattern): Promise<void> {
await this.memory.store({
id: pattern.id,
content: pattern.data,
metadata: {
sonaMode: pattern.mode,
reward: pattern.reward,
adaptationTime: pattern.adaptationTime
},
embedding: await this.generateEmbedding(pattern.data)
});
}
async retrieveSimilarPatterns(query: string): Promise<LearningPattern[]> {
return this.memory.query({
type: 'semantic',
content: query,
filters: { type: 'learning_pattern' }
});
}
}
```
## Performance Targets
- **Search Speed**: 150x-12,500x improvement via HNSW
- **Memory Usage**: 50-75% reduction through optimization
- **Query Latency**: <100ms for 1M+ entries
- **Cross-Agent Sharing**: Real-time memory synchronization
- **SONA Integration**: <0.05ms adaptation time
## Success Metrics
- [ ] All 7 legacy memory systems migrated to AgentDB
- [ ] 150x-12,500x search performance validated
- [ ] 50-75% memory usage reduction achieved
- [ ] Backward compatibility maintained
- [ ] SONA learning patterns integrated
- [ ] Cross-agent memory sharing operational

View File

@ -0,0 +1,390 @@
---
name: "V3 Performance Optimization"
description: "Achieve aggressive v3 performance targets: 2.49x-7.47x Flash Attention speedup, 150x-12,500x search improvements, 50-75% memory reduction. Comprehensive benchmarking and optimization suite."
---
# V3 Performance Optimization
## What This Skill Does
Validates and optimizes Codex-flow v3 to achieve industry-leading performance through Flash Attention, AgentDB HNSW indexing, and comprehensive system optimization with continuous benchmarking.
## Quick Start
```bash
# Initialize performance optimization
Task("Performance baseline", "Establish v2 performance benchmarks", "v3-performance-engineer")
# Target validation (parallel)
Task("Flash Attention", "Validate 2.49x-7.47x speedup target", "v3-performance-engineer")
Task("Search optimization", "Validate 150x-12,500x search improvement", "v3-performance-engineer")
Task("Memory optimization", "Achieve 50-75% memory reduction", "v3-performance-engineer")
```
## Performance Target Matrix
### Flash Attention Revolution
```
┌─────────────────────────────────────────┐
│ FLASH ATTENTION │
├─────────────────────────────────────────┤
│ Baseline: Standard attention │
│ Target: 2.49x - 7.47x speedup │
│ Memory: 50-75% reduction │
│ Latency: Sub-millisecond processing │
└─────────────────────────────────────────┘
```
### Search Performance Revolution
```
┌─────────────────────────────────────────┐
│ SEARCH OPTIMIZATION │
├─────────────────────────────────────────┤
│ Current: O(n) linear search │
│ Target: 150x - 12,500x improvement │
│ Method: HNSW indexing │
│ Latency: <100ms for 1M+ entries
└─────────────────────────────────────────┘
```
## Comprehensive Benchmark Suite
### Startup Performance
```typescript
class StartupBenchmarks {
async benchmarkColdStart(): Promise<BenchmarkResult> {
const startTime = performance.now();
await this.initializeCLI();
await this.initializeMCPServer();
await this.spawnTestAgent();
const totalTime = performance.now() - startTime;
return {
total: totalTime,
target: 500, // ms
achieved: totalTime < 500
};
}
}
```
### Memory Operation Benchmarks
```typescript
class MemoryBenchmarks {
async benchmarkVectorSearch(): Promise<SearchBenchmark> {
const queries = this.generateTestQueries(10000);
// Baseline: Current linear search
const baselineTime = await this.timeOperation(() =>
this.currentMemory.searchAll(queries)
);
// Target: HNSW search
const hnswTime = await this.timeOperation(() =>
this.agentDBMemory.hnswSearchAll(queries)
);
const improvement = baselineTime / hnswTime;
return {
baseline: baselineTime,
hnsw: hnswTime,
improvement,
targetRange: [150, 12500],
achieved: improvement >= 150
};
}
async benchmarkMemoryUsage(): Promise<MemoryBenchmark> {
const baseline = process.memoryUsage().heapUsed;
await this.loadTestDataset();
const withData = process.memoryUsage().heapUsed;
await this.enableOptimization();
const optimized = process.memoryUsage().heapUsed;
const reduction = (withData - optimized) / withData;
return {
baseline,
withData,
optimized,
reductionPercent: reduction * 100,
targetReduction: [50, 75],
achieved: reduction >= 0.5
};
}
}
```
### Swarm Coordination Benchmarks
```typescript
class SwarmBenchmarks {
async benchmark15AgentCoordination(): Promise<SwarmBenchmark> {
const agents = await this.spawn15Agents();
// Coordination latency
const coordinationTime = await this.timeOperation(() =>
this.coordinateSwarmTask(agents)
);
// Task decomposition
const decompositionTime = await this.timeOperation(() =>
this.decomposeComplexTask()
);
// Consensus achievement
const consensusTime = await this.timeOperation(() =>
this.achieveSwarmConsensus(agents)
);
return {
coordination: coordinationTime,
decomposition: decompositionTime,
consensus: consensusTime,
agentCount: 15,
efficiency: this.calculateEfficiency(agents)
};
}
}
```
### Flash Attention Benchmarks
```typescript
class AttentionBenchmarks {
async benchmarkFlashAttention(): Promise<AttentionBenchmark> {
const sequences = this.generateSequences([512, 1024, 2048, 4096]);
const results = [];
for (const sequence of sequences) {
// Baseline attention
const baselineResult = await this.benchmarkStandardAttention(sequence);
// Flash attention
const flashResult = await this.benchmarkFlashAttention(sequence);
results.push({
sequenceLength: sequence.length,
speedup: baselineResult.time / flashResult.time,
memoryReduction: (baselineResult.memory - flashResult.memory) / baselineResult.memory,
targetSpeedup: [2.49, 7.47],
achieved: this.checkTarget(flashResult, [2.49, 7.47])
});
}
return {
results,
averageSpeedup: this.calculateAverage(results, 'speedup'),
averageMemoryReduction: this.calculateAverage(results, 'memoryReduction')
};
}
}
```
### SONA Learning Benchmarks
```typescript
class SONABenchmarks {
async benchmarkAdaptationTime(): Promise<SONABenchmark> {
const scenarios = [
'pattern_recognition',
'task_optimization',
'error_correction',
'performance_tuning'
];
const results = [];
for (const scenario of scenarios) {
const startTime = performance.hrtime.bigint();
await this.sona.adapt(scenario);
const endTime = performance.hrtime.bigint();
const adaptationTimeMs = Number(endTime - startTime) / 1000000;
results.push({
scenario,
adaptationTime: adaptationTimeMs,
target: 0.05, // ms
achieved: adaptationTimeMs <= 0.05
});
}
return {
scenarios: results,
averageTime: results.reduce((sum, r) => sum + r.adaptationTime, 0) / results.length,
successRate: results.filter(r => r.achieved).length / results.length
};
}
}
```
## Performance Monitoring Dashboard
### Real-time Metrics
```typescript
class PerformanceMonitor {
async collectMetrics(): Promise<PerformanceSnapshot> {
return {
timestamp: Date.now(),
flashAttention: await this.measureFlashAttention(),
searchPerformance: await this.measureSearchSpeed(),
memoryUsage: await this.measureMemoryEfficiency(),
startupTime: await this.measureStartupLatency(),
sonaAdaptation: await this.measureSONASpeed(),
swarmCoordination: await this.measureSwarmEfficiency()
};
}
async generateReport(): Promise<PerformanceReport> {
const snapshot = await this.collectMetrics();
return {
summary: this.generateSummary(snapshot),
achievements: this.checkTargetAchievements(snapshot),
trends: this.analyzeTrends(),
recommendations: this.generateOptimizations(),
regressions: await this.detectRegressions()
};
}
}
```
### Continuous Regression Detection
```typescript
class PerformanceRegression {
async detectRegressions(): Promise<RegressionReport> {
const current = await this.runFullBenchmark();
const baseline = await this.getBaseline();
const regressions = [];
for (const [metric, currentValue] of Object.entries(current)) {
const baselineValue = baseline[metric];
const change = (currentValue - baselineValue) / baselineValue;
if (change < -0.05) { // 5% regression threshold
regressions.push({
metric,
baseline: baselineValue,
current: currentValue,
regressionPercent: change * 100,
severity: this.classifyRegression(change)
});
}
}
return {
hasRegressions: regressions.length > 0,
regressions,
recommendations: this.generateRegressionFixes(regressions)
};
}
}
```
## Optimization Strategies
### Memory Optimization
```typescript
class MemoryOptimization {
async optimizeMemoryUsage(): Promise<OptimizationResult> {
// Implement memory pooling
await this.setupMemoryPools();
// Enable garbage collection tuning
await this.optimizeGarbageCollection();
// Implement object reuse patterns
await this.setupObjectPools();
// Enable memory compression
await this.enableMemoryCompression();
return this.validateMemoryReduction();
}
}
```
### CPU Optimization
```typescript
class CPUOptimization {
async optimizeCPUUsage(): Promise<OptimizationResult> {
// Implement worker thread pools
await this.setupWorkerThreads();
// Enable CPU-specific optimizations
await this.enableSIMDInstructions();
// Implement task batching
await this.optimizeTaskBatching();
return this.validateCPUImprovement();
}
}
```
## Target Validation Framework
### Performance Gates
```typescript
class PerformanceGates {
async validateAllTargets(): Promise<ValidationReport> {
const results = await Promise.all([
this.validateFlashAttention(), // 2.49x-7.47x
this.validateSearchPerformance(), // 150x-12,500x
this.validateMemoryReduction(), // 50-75%
this.validateStartupTime(), // <500ms
this.validateSONAAdaptation() // <0.05ms
]);
return {
allTargetsAchieved: results.every(r => r.achieved),
results,
overallScore: this.calculateOverallScore(results),
recommendations: this.generateRecommendations(results)
};
}
}
```
## Success Metrics
### Primary Targets
- [ ] **Flash Attention**: 2.49x-7.47x speedup validated
- [ ] **Search Performance**: 150x-12,500x improvement confirmed
- [ ] **Memory Reduction**: 50-75% usage optimization achieved
- [ ] **Startup Time**: <500ms cold start consistently
- [ ] **SONA Adaptation**: <0.05ms learning response time
- [ ] **15-Agent Coordination**: Efficient parallel execution
### Continuous Monitoring
- [ ] **Performance Dashboard**: Real-time metrics collection
- [ ] **Regression Testing**: Automated performance validation
- [ ] **Trend Analysis**: Performance evolution tracking
- [ ] **Alert System**: Immediate regression notification
## Related V3 Skills
- `v3-integration-deep` - Performance integration with agentic-flow
- `v3-memory-unification` - Memory performance optimization
- `v3-swarm-coordination` - Swarm performance coordination
- `v3-security-overhaul` - Secure performance patterns
## Usage Examples
### Complete Performance Validation
```bash
# Full performance suite
npm run benchmark:v3
# Specific target validation
npm run benchmark:flash-attention
npm run benchmark:agentdb-search
npm run benchmark:memory-optimization
# Continuous monitoring
npm run monitor:performance
```

View File

@ -0,0 +1,82 @@
---
name: "V3 Security Overhaul"
description: "Complete security architecture overhaul for Codex-flow v3. Addresses critical CVEs (CVE-1, CVE-2, CVE-3) and implements secure-by-default patterns. Use for security-first v3 implementation."
---
# V3 Security Overhaul
## What This Skill Does
Orchestrates comprehensive security overhaul for Codex-flow v3, addressing critical vulnerabilities and establishing security-first development practices using specialized v3 security agents.
## Quick Start
```bash
# Initialize V3 security domain (parallel)
Task("Security architecture", "Design v3 threat model and security boundaries", "v3-security-architect")
Task("CVE remediation", "Fix CVE-1, CVE-2, CVE-3 critical vulnerabilities", "security-auditor")
Task("Security testing", "Implement TDD London School security framework", "test-architect")
```
## Critical Security Fixes
### CVE-1: Vulnerable Dependencies
```bash
npm update @anthropic-ai/Codex@^2.0.31
npm audit --audit-level high
```
### CVE-2: Weak Password Hashing
```typescript
// ❌ Old: SHA-256 with hardcoded salt
const hash = crypto.createHash('sha256').update(password + salt).digest('hex');
// ✅ New: bcrypt with 12 rounds
import bcrypt from 'bcrypt';
const hash = await bcrypt.hash(password, 12);
```
### CVE-3: Hardcoded Credentials
```typescript
// ✅ Generate secure random credentials
const apiKey = crypto.randomBytes(32).toString('hex');
```
## Security Patterns
### Input Validation (Zod)
```typescript
import { z } from 'zod';
const TaskSchema = z.object({
taskId: z.string().uuid(),
content: z.string().max(10000),
agentType: z.enum(['security', 'core', 'integration'])
});
```
### Path Sanitization
```typescript
function securePath(userPath: string, allowedPrefix: string): string {
const resolved = path.resolve(allowedPrefix, userPath);
if (!resolved.startsWith(path.resolve(allowedPrefix))) {
throw new SecurityError('Path traversal detected');
}
return resolved;
}
```
### Safe Command Execution
```typescript
import { execFile } from 'child_process';
// ✅ Safe: No shell interpretation
const { stdout } = await execFile('git', [userInput], { shell: false });
```
## Success Metrics
- **Security Score**: 90/100 (npm audit + custom scans)
- **CVE Resolution**: 100% of critical vulnerabilities fixed
- **Test Coverage**: >95% security-critical code
- **Implementation**: All secure patterns documented and tested

View File

@ -0,0 +1,340 @@
---
name: "V3 Swarm Coordination"
description: "15-agent hierarchical mesh coordination for v3 implementation. Orchestrates parallel execution across security, core, and integration domains following 10 ADRs with 14-week timeline."
---
# V3 Swarm Coordination
## What This Skill Does
Orchestrates the complete 15-agent hierarchical mesh swarm for Codex-flow v3 implementation, coordinating parallel execution across domains while maintaining dependencies and timeline adherence.
## Quick Start
```bash
# Initialize 15-agent v3 swarm
Task("Swarm initialization", "Initialize hierarchical mesh for v3 implementation", "v3-queen-coordinator")
# Security domain (Phase 1 - Critical priority)
Task("Security architecture", "Design v3 threat model and security boundaries", "v3-security-architect")
Task("CVE remediation", "Fix CVE-1, CVE-2, CVE-3 vulnerabilities", "security-auditor")
Task("Security testing", "Implement TDD security framework", "test-architect")
# Core domain (Phase 2 - Parallel execution)
Task("Memory unification", "Implement AgentDB 150x improvement", "v3-memory-specialist")
Task("Integration architecture", "Deep agentic-flow@alpha integration", "v3-integration-architect")
Task("Performance validation", "Validate 2.49x-7.47x targets", "v3-performance-engineer")
```
## 15-Agent Swarm Architecture
### Hierarchical Mesh Topology
```
👑 QUEEN COORDINATOR
(Agent #1)
┌────────────────────┼────────────────────┐
│ │ │
🛡️ SECURITY 🧠 CORE 🔗 INTEGRATION
(Agents #2-4) (Agents #5-9) (Agents #10-12)
│ │ │
└────────────────────┼────────────────────┘
┌────────────────────┼────────────────────┐
│ │ │
🧪 QUALITY ⚡ PERFORMANCE 🚀 DEPLOYMENT
(Agent #13) (Agent #14) (Agent #15)
```
### Agent Roster
| ID | Agent | Domain | Phase | Responsibility |
|----|-------|--------|-------|----------------|
| 1 | Queen Coordinator | Orchestration | All | GitHub issues, dependencies, timeline |
| 2 | Security Architect | Security | Foundation | Threat modeling, CVE planning |
| 3 | Security Implementer | Security | Foundation | CVE fixes, secure patterns |
| 4 | Security Tester | Security | Foundation | TDD security testing |
| 5 | Core Architect | Core | Systems | DDD architecture, coordination |
| 6 | Core Implementer | Core | Systems | Core module implementation |
| 7 | Memory Specialist | Core | Systems | AgentDB unification |
| 8 | Swarm Specialist | Core | Systems | Unified coordination engine |
| 9 | MCP Specialist | Core | Systems | MCP server optimization |
| 10 | Integration Architect | Integration | Integration | agentic-flow@alpha deep integration |
| 11 | CLI/Hooks Developer | Integration | Integration | CLI modernization |
| 12 | Neural/Learning Dev | Integration | Integration | SONA integration |
| 13 | TDD Test Engineer | Quality | All | London School TDD |
| 14 | Performance Engineer | Performance | Optimization | Benchmarking validation |
| 15 | Release Engineer | Deployment | Release | CI/CD and v3.0.0 release |
## Implementation Phases
### Phase 1: Foundation (Week 1-2)
**Active Agents**: #1, #2-4, #5-6
```typescript
const phase1 = async () => {
// Parallel security and architecture foundation
await Promise.all([
// Security domain (critical priority)
Task("Security architecture", "Complete threat model and security boundaries", "v3-security-architect"),
Task("CVE-1 fix", "Update vulnerable dependencies", "security-implementer"),
Task("CVE-2 fix", "Replace weak password hashing", "security-implementer"),
Task("CVE-3 fix", "Remove hardcoded credentials", "security-implementer"),
Task("Security testing", "TDD London School security framework", "test-architect"),
// Core architecture foundation
Task("DDD architecture", "Design domain boundaries and structure", "core-architect"),
Task("Type modernization", "Update type system for v3", "core-implementer")
]);
};
```
### Phase 2: Core Systems (Week 3-6)
**Active Agents**: #1, #5-9, #13
```typescript
const phase2 = async () => {
// Parallel core system implementation
await Promise.all([
Task("Memory unification", "Implement AgentDB with 150x-12,500x improvement", "v3-memory-specialist"),
Task("Swarm coordination", "Merge 4 coordination systems into unified engine", "swarm-specialist"),
Task("MCP optimization", "Optimize MCP server performance", "mcp-specialist"),
Task("Core implementation", "Implement DDD modular architecture", "core-implementer"),
Task("TDD core tests", "Comprehensive test coverage for core systems", "test-architect")
]);
};
```
### Phase 3: Integration (Week 7-10)
**Active Agents**: #1, #10-12, #13-14
```typescript
const phase3 = async () => {
// Parallel integration and optimization
await Promise.all([
Task("agentic-flow integration", "Eliminate 10,000+ duplicate lines", "v3-integration-architect"),
Task("CLI modernization", "Enhance CLI with hooks system", "cli-hooks-developer"),
Task("SONA integration", "Implement <0.05ms learning adaptation", "neural-learning-developer"),
Task("Performance benchmarking", "Validate 2.49x-7.47x targets", "v3-performance-engineer"),
Task("Integration testing", "End-to-end system validation", "test-architect")
]);
};
```
### Phase 4: Release (Week 11-14)
**Active Agents**: All 15
```typescript
const phase4 = async () => {
// Full swarm final optimization
await Promise.all([
Task("Performance optimization", "Final optimization pass", "v3-performance-engineer"),
Task("Release preparation", "CI/CD pipeline and v3.0.0 release", "release-engineer"),
Task("Final testing", "Complete test coverage validation", "test-architect"),
// All agents: Final polish and optimization
...agents.map(agent =>
Task("Final polish", `Agent ${agent.id} final optimization`, agent.name)
)
]);
};
```
## Coordination Patterns
### Dependency Management
```typescript
class DependencyCoordination {
private dependencies = new Map([
// Security first (no dependencies)
[2, []], [3, [2]], [4, [2, 3]],
// Core depends on security foundation
[5, [2]], [6, [5]], [7, [5]], [8, [5, 7]], [9, [5]],
// Integration depends on core systems
[10, [5, 7, 8]], [11, [5, 10]], [12, [7, 10]],
// Quality and performance cross-cutting
[13, [2, 5]], [14, [5, 7, 8, 10]], [15, [13, 14]]
]);
async coordinateExecution(): Promise<void> {
const completed = new Set<number>();
while (completed.size < 15) {
const ready = this.getReadyAgents(completed);
if (ready.length === 0) {
throw new Error('Deadlock detected in dependency chain');
}
// Execute ready agents in parallel
await Promise.all(ready.map(agentId => this.executeAgent(agentId)));
ready.forEach(id => completed.add(id));
}
}
}
```
### GitHub Integration
```typescript
class GitHubCoordination {
async initializeV3Milestone(): Promise<void> {
await gh.createMilestone({
title: 'Codex-Flow v3.0.0 Implementation',
description: '15-agent swarm implementation of 10 ADRs',
dueDate: this.calculate14WeekDeadline()
});
}
async createEpicIssues(): Promise<void> {
const epics = [
{ title: 'Security Overhaul (CVE-1,2,3)', agents: [2, 3, 4] },
{ title: 'Memory Unification (AgentDB)', agents: [7] },
{ title: 'agentic-flow Integration', agents: [10] },
{ title: 'Performance Optimization', agents: [14] },
{ title: 'DDD Architecture', agents: [5, 6] }
];
for (const epic of epics) {
await gh.createIssue({
title: epic.title,
labels: ['epic', 'v3', ...epic.agents.map(id => `agent-${id}`)],
assignees: epic.agents.map(id => this.getAgentGithubUser(id))
});
}
}
async trackProgress(): Promise<void> {
// Hourly progress updates from each agent
setInterval(async () => {
for (const agent of this.agents) {
await this.postAgentProgress(agent);
}
}, 3600000); // 1 hour
}
}
```
### Communication Bus
```typescript
class SwarmCommunication {
private bus = new QuicSwarmBus({
maxAgents: 15,
messageTimeout: 30000,
retryAttempts: 3
});
async broadcastToSecurityDomain(message: SwarmMessage): Promise<void> {
await this.bus.broadcast(message, {
targetAgents: [2, 3, 4],
priority: 'critical'
});
}
async coordinateCoreSystems(message: SwarmMessage): Promise<void> {
await this.bus.broadcast(message, {
targetAgents: [5, 6, 7, 8, 9],
priority: 'high'
});
}
async notifyIntegrationTeam(message: SwarmMessage): Promise<void> {
await this.bus.broadcast(message, {
targetAgents: [10, 11, 12],
priority: 'medium'
});
}
}
```
## Performance Coordination
### Parallel Efficiency Monitoring
```typescript
class EfficiencyMonitor {
async measureParallelEfficiency(): Promise<EfficiencyReport> {
const agentUtilization = await this.measureAgentUtilization();
const coordinationOverhead = await this.measureCoordinationCost();
return {
totalEfficiency: agentUtilization.average,
target: 0.85, // >85% utilization
achieved: agentUtilization.average > 0.85,
bottlenecks: this.identifyBottlenecks(agentUtilization),
recommendations: this.generateOptimizations()
};
}
}
```
### Load Balancing
```typescript
class SwarmLoadBalancer {
async balanceWorkload(): Promise<void> {
const workloads = await this.analyzeAgentWorkloads();
for (const [agentId, load] of workloads.entries()) {
if (load > this.getCapacityThreshold(agentId)) {
await this.redistributeWork(agentId);
}
}
}
async redistributeWork(overloadedAgent: number): Promise<void> {
const availableAgents = this.getAvailableAgents();
const tasks = await this.getAgentTasks(overloadedAgent);
// Redistribute tasks to available agents
for (const task of tasks) {
const bestAgent = this.selectOptimalAgent(task, availableAgents);
await this.reassignTask(task, bestAgent);
}
}
}
```
## Success Metrics
### Swarm Coordination
- [ ] **Parallel Efficiency**: >85% agent utilization time
- [ ] **Dependency Resolution**: Zero deadlocks or blocking issues
- [ ] **Communication Latency**: <100ms inter-agent messaging
- [ ] **Timeline Adherence**: 14-week delivery maintained
- [ ] **GitHub Integration**: <4h automated issue response
### Implementation Targets
- [ ] **ADR Coverage**: All 10 ADRs implemented successfully
- [ ] **Performance**: 2.49x-7.47x Flash Attention achieved
- [ ] **Search**: 150x-12,500x AgentDB improvement validated
- [ ] **Code Reduction**: <5,000 lines (vs 15,000+)
- [ ] **Security**: 90/100 security score achieved
## Related V3 Skills
- `v3-security-overhaul` - Security domain coordination
- `v3-memory-unification` - Memory system coordination
- `v3-integration-deep` - Integration domain coordination
- `v3-performance-optimization` - Performance domain coordination
## Usage Examples
### Initialize Complete V3 Swarm
```bash
# Queen Coordinator initializes full swarm
Task("V3 swarm initialization",
"Initialize 15-agent hierarchical mesh for complete v3 implementation",
"v3-queen-coordinator")
```
### Phase-based Execution
```bash
# Phase 1: Security-first foundation
npm run v3:phase1:security
# Phase 2: Core systems parallel
npm run v3:phase2:core-systems
# Phase 3: Integration and optimization
npm run v3:phase3:integration
# Phase 4: Release preparation
npm run v3:phase4:release
```

View File

@ -0,0 +1,649 @@
---
name: "Verification & Quality Assurance"
description: "Comprehensive truth scoring, code quality verification, and automatic rollback system with 0.95 accuracy threshold for ensuring high-quality agent outputs and codebase reliability."
version: "2.0.0"
category: "quality-assurance"
tags: ["verification", "truth-scoring", "quality", "rollback", "metrics", "ci-cd"]
---
# Verification & Quality Assurance Skill
## What This Skill Does
This skill provides a comprehensive verification and quality assurance system that ensures code quality and correctness through:
- **Truth Scoring**: Real-time reliability metrics (0.0-1.0 scale) for code, agents, and tasks
- **Verification Checks**: Automated code correctness, security, and best practices validation
- **Automatic Rollback**: Instant reversion of changes that fail verification (default threshold: 0.95)
- **Quality Metrics**: Statistical analysis with trends, confidence intervals, and improvement tracking
- **CI/CD Integration**: Export capabilities for continuous integration pipelines
- **Real-time Monitoring**: Live dashboards and watch modes for ongoing verification
## Prerequisites
- Codex Flow installed (`npx Codex-flow@alpha`)
- Git repository (for rollback features)
- Node.js 18+ (for dashboard features)
## Quick Start
```bash
# View current truth scores
npx Codex-flow@alpha truth
# Run verification check
npx Codex-flow@alpha verify check
# Verify specific file with custom threshold
npx Codex-flow@alpha verify check --file src/app.js --threshold 0.98
# Rollback last failed verification
npx Codex-flow@alpha verify rollback --last-good
```
---
## Complete Guide
### Truth Scoring System
#### View Truth Metrics
Display comprehensive quality and reliability metrics for your codebase and agent tasks.
**Basic Usage:**
```bash
# View current truth scores (default: table format)
npx Codex-flow@alpha truth
# View scores for specific time period
npx Codex-flow@alpha truth --period 7d
# View scores for specific agent
npx Codex-flow@alpha truth --agent coder --period 24h
# Find files/tasks below threshold
npx Codex-flow@alpha truth --threshold 0.8
```
**Output Formats:**
```bash
# Table format (default)
npx Codex-flow@alpha truth --format table
# JSON for programmatic access
npx Codex-flow@alpha truth --format json
# CSV for spreadsheet analysis
npx Codex-flow@alpha truth --format csv
# HTML report with visualizations
npx Codex-flow@alpha truth --format html --export report.html
```
**Real-time Monitoring:**
```bash
# Watch mode with live updates
npx Codex-flow@alpha truth --watch
# Export metrics automatically
npx Codex-flow@alpha truth --export .Codex-flow/metrics/truth-$(date +%Y%m%d).json
```
#### Truth Score Dashboard
Example dashboard output:
```
📊 Truth Metrics Dashboard
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Overall Truth Score: 0.947 ✅
Trend: ↗️ +2.3% (7d)
Top Performers:
verification-agent 0.982 ⭐
code-analyzer 0.971 ⭐
test-generator 0.958 ✅
Needs Attention:
refactor-agent 0.821 ⚠️
docs-generator 0.794 ⚠️
Recent Tasks:
task-456 0.991 ✅ "Implement auth"
task-455 0.967 ✅ "Add tests"
task-454 0.743 ❌ "Refactor API"
```
#### Metrics Explained
**Truth Scores (0.0-1.0):**
- `1.0-0.95`: Excellent ⭐ (production-ready)
- `0.94-0.85`: Good ✅ (acceptable quality)
- `0.84-0.75`: Warning ⚠️ (needs attention)
- `<0.75`: Critical ❌ (requires immediate action)
**Trend Indicators:**
- ↗️ Improving (positive trend)
- → Stable (consistent performance)
- ↘️ Declining (quality regression detected)
**Statistics:**
- **Mean Score**: Average truth score across all measurements
- **Median Score**: Middle value (less affected by outliers)
- **Standard Deviation**: Consistency of scores (lower = more consistent)
- **Confidence Interval**: Statistical reliability of measurements
### Verification Checks
#### Run Verification
Execute comprehensive verification checks on code, tasks, or agent outputs.
**File Verification:**
```bash
# Verify single file
npx Codex-flow@alpha verify check --file src/app.js
# Verify directory recursively
npx Codex-flow@alpha verify check --directory src/
# Verify with auto-fix enabled
npx Codex-flow@alpha verify check --file src/utils.js --auto-fix
# Verify current working directory
npx Codex-flow@alpha verify check
```
**Task Verification:**
```bash
# Verify specific task output
npx Codex-flow@alpha verify check --task task-123
# Verify with custom threshold
npx Codex-flow@alpha verify check --task task-456 --threshold 0.99
# Verbose output for debugging
npx Codex-flow@alpha verify check --task task-789 --verbose
```
**Batch Verification:**
```bash
# Verify multiple files in parallel
npx Codex-flow@alpha verify batch --files "*.js" --parallel
# Verify with pattern matching
npx Codex-flow@alpha verify batch --pattern "src/**/*.ts"
# Integration test suite
npx Codex-flow@alpha verify integration --test-suite full
```
#### Verification Criteria
The verification system evaluates:
1. **Code Correctness**
- Syntax validation
- Type checking (TypeScript)
- Logic flow analysis
- Error handling completeness
2. **Best Practices**
- Code style adherence
- SOLID principles
- Design patterns usage
- Modularity and reusability
3. **Security**
- Vulnerability scanning
- Secret detection
- Input validation
- Authentication/authorization checks
4. **Performance**
- Algorithmic complexity
- Memory usage patterns
- Database query optimization
- Bundle size impact
5. **Documentation**
- JSDoc/TypeDoc completeness
- README accuracy
- API documentation
- Code comments quality
#### JSON Output for CI/CD
```bash
# Get structured JSON output
npx Codex-flow@alpha verify check --json > verification.json
# Example JSON structure:
{
"overallScore": 0.947,
"passed": true,
"threshold": 0.95,
"checks": [
{
"name": "code-correctness",
"score": 0.98,
"passed": true
},
{
"name": "security",
"score": 0.91,
"passed": false,
"issues": [...]
}
]
}
```
### Automatic Rollback
#### Rollback Failed Changes
Automatically revert changes that fail verification checks.
**Basic Rollback:**
```bash
# Rollback to last known good state
npx Codex-flow@alpha verify rollback --last-good
# Rollback to specific commit
npx Codex-flow@alpha verify rollback --to-commit abc123
# Interactive rollback with preview
npx Codex-flow@alpha verify rollback --interactive
```
**Smart Rollback:**
```bash
# Rollback only failed files (preserve good changes)
npx Codex-flow@alpha verify rollback --selective
# Rollback with automatic backup
npx Codex-flow@alpha verify rollback --backup-first
# Dry-run mode (preview without executing)
npx Codex-flow@alpha verify rollback --dry-run
```
**Rollback Performance:**
- Git-based rollback: <1 second
- Selective file rollback: <500ms
- Backup creation: Automatic before rollback
### Verification Reports
#### Generate Reports
Create detailed verification reports with metrics and visualizations.
**Report Formats:**
```bash
# JSON report
npx Codex-flow@alpha verify report --format json
# HTML report with charts
npx Codex-flow@alpha verify report --export metrics.html --format html
# CSV for data analysis
npx Codex-flow@alpha verify report --format csv --export metrics.csv
# Markdown summary
npx Codex-flow@alpha verify report --format markdown
```
**Time-based Reports:**
```bash
# Last 24 hours
npx Codex-flow@alpha verify report --period 24h
# Last 7 days
npx Codex-flow@alpha verify report --period 7d
# Last 30 days with trends
npx Codex-flow@alpha verify report --period 30d --include-trends
# Custom date range
npx Codex-flow@alpha verify report --from 2025-01-01 --to 2025-01-31
```
**Report Content:**
- Overall truth scores
- Per-agent performance metrics
- Task completion quality
- Verification pass/fail rates
- Rollback frequency
- Quality improvement trends
- Statistical confidence intervals
### Interactive Dashboard
#### Launch Dashboard
Run interactive web-based verification dashboard with real-time updates.
```bash
# Launch dashboard on default port (3000)
npx Codex-flow@alpha verify dashboard
# Custom port
npx Codex-flow@alpha verify dashboard --port 8080
# Export dashboard data
npx Codex-flow@alpha verify dashboard --export
# Dashboard with auto-refresh
npx Codex-flow@alpha verify dashboard --refresh 5s
```
**Dashboard Features:**
- Real-time truth score updates (WebSocket)
- Interactive charts and graphs
- Agent performance comparison
- Task history timeline
- Rollback history viewer
- Export to PDF/HTML
- Filter by time period/agent/score
### Configuration
#### Default Configuration
Set verification preferences in `.Codex-flow/config.json`:
```json
{
"verification": {
"threshold": 0.95,
"autoRollback": true,
"gitIntegration": true,
"hooks": {
"preCommit": true,
"preTask": true,
"postEdit": true
},
"checks": {
"codeCorrectness": true,
"security": true,
"performance": true,
"documentation": true,
"bestPractices": true
}
},
"truth": {
"defaultFormat": "table",
"defaultPeriod": "24h",
"warningThreshold": 0.85,
"criticalThreshold": 0.75,
"autoExport": {
"enabled": true,
"path": ".Codex-flow/metrics/truth-daily.json"
}
}
}
```
#### Threshold Configuration
**Adjust verification strictness:**
```bash
# Strict mode (99% accuracy required)
npx Codex-flow@alpha verify check --threshold 0.99
# Lenient mode (90% acceptable)
npx Codex-flow@alpha verify check --threshold 0.90
# Set default threshold
npx Codex-flow@alpha config set verification.threshold 0.98
```
**Per-environment thresholds:**
```json
{
"verification": {
"thresholds": {
"production": 0.99,
"staging": 0.95,
"development": 0.90
}
}
}
```
### Integration Examples
#### CI/CD Integration
**GitHub Actions:**
```yaml
name: Quality Verification
on: [push, pull_request]
jobs:
verify:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Dependencies
run: npm install
- name: Run Verification
run: |
npx Codex-flow@alpha verify check --json > verification.json
- name: Check Truth Score
run: |
score=$(jq '.overallScore' verification.json)
if (( $(echo "$score < 0.95" | bc -l) )); then
echo "Truth score too low: $score"
exit 1
fi
- name: Upload Report
uses: actions/upload-artifact@v3
with:
name: verification-report
path: verification.json
```
**GitLab CI:**
```yaml
verify:
stage: test
script:
- npx Codex-flow@alpha verify check --threshold 0.95 --json > verification.json
- |
score=$(jq '.overallScore' verification.json)
if [ $(echo "$score < 0.95" | bc) -eq 1 ]; then
echo "Verification failed with score: $score"
exit 1
fi
artifacts:
paths:
- verification.json
reports:
junit: verification.json
```
#### Swarm Integration
Run verification automatically during swarm operations:
```bash
# Swarm with verification enabled
npx Codex-flow@alpha swarm --verify --threshold 0.98
# Hive Mind with auto-rollback
npx Codex-flow@alpha hive-mind --verify --rollback-on-fail
# Training pipeline with verification
npx Codex-flow@alpha train --verify --threshold 0.99
```
#### Pair Programming Integration
Enable real-time verification during collaborative development:
```bash
# Pair with verification
npx Codex-flow@alpha pair --verify --real-time
# Pair with custom threshold
npx Codex-flow@alpha pair --verify --threshold 0.97 --auto-fix
```
### Advanced Workflows
#### Continuous Verification
Monitor codebase continuously during development:
```bash
# Watch directory for changes
npx Codex-flow@alpha verify watch --directory src/
# Watch with auto-fix
npx Codex-flow@alpha verify watch --directory src/ --auto-fix
# Watch with notifications
npx Codex-flow@alpha verify watch --notify --threshold 0.95
```
#### Monitoring Integration
Send metrics to external monitoring systems:
```bash
# Export to Prometheus
npx Codex-flow@alpha truth --format json | \
curl -X POST https://pushgateway.example.com/metrics/job/Codex-flow \
-d @-
# Send to DataDog
npx Codex-flow@alpha verify report --format json | \
curl -X POST "https://api.datadoghq.com/api/v1/series?api_key=${DD_API_KEY}" \
-H "Content-Type: application/json" \
-d @-
# Custom webhook
npx Codex-flow@alpha truth --format json | \
curl -X POST https://metrics.example.com/api/truth \
-H "Content-Type: application/json" \
-d @-
```
#### Pre-commit Hooks
Automatically verify before commits:
```bash
# Install pre-commit hook
npx Codex-flow@alpha verify install-hook --pre-commit
# .git/hooks/pre-commit example:
#!/bin/bash
npx Codex-flow@alpha verify check --threshold 0.95 --json > /tmp/verify.json
score=$(jq '.overallScore' /tmp/verify.json)
if (( $(echo "$score < 0.95" | bc -l) )); then
echo "❌ Verification failed with score: $score"
echo "Run 'npx Codex-flow@alpha verify check --verbose' for details"
exit 1
fi
echo "✅ Verification passed with score: $score"
```
### Performance Metrics
**Verification Speed:**
- Single file check: <100ms
- Directory scan: <500ms (per 100 files)
- Full codebase analysis: <5s (typical project)
- Truth score calculation: <50ms
**Rollback Speed:**
- Git-based rollback: <1s
- Selective file rollback: <500ms
- Backup creation: <2s
**Dashboard Performance:**
- Initial load: <1s
- Real-time updates: <100ms latency (WebSocket)
- Chart rendering: 60 FPS
### Troubleshooting
#### Common Issues
**Low Truth Scores:**
```bash
# Get detailed breakdown
npx Codex-flow@alpha truth --verbose --threshold 0.0
# Check specific criteria
npx Codex-flow@alpha verify check --verbose
# View agent-specific issues
npx Codex-flow@alpha truth --agent <agent-name> --format json
```
**Rollback Failures:**
```bash
# Check git status
git status
# View rollback history
npx Codex-flow@alpha verify rollback --history
# Manual rollback
git reset --hard HEAD~1
```
**Verification Timeouts:**
```bash
# Increase timeout
npx Codex-flow@alpha verify check --timeout 60s
# Verify in batches
npx Codex-flow@alpha verify batch --batch-size 10
```
### Exit Codes
Verification commands return standard exit codes:
- `0`: Verification passed (score ≥ threshold)
- `1`: Verification failed (score < threshold)
- `2`: Error during verification (invalid input, system error)
### Related Commands
- `npx Codex-flow@alpha pair` - Collaborative development with verification
- `npx Codex-flow@alpha train` - Training with verification feedback
- `npx Codex-flow@alpha swarm` - Multi-agent coordination with quality checks
- `npx Codex-flow@alpha report` - Generate comprehensive project reports
### Best Practices
1. **Set Appropriate Thresholds**: Use 0.99 for critical code, 0.95 for standard, 0.90 for experimental
2. **Enable Auto-rollback**: Prevent bad code from persisting
3. **Monitor Trends**: Track improvement over time, not just current scores
4. **Integrate with CI/CD**: Make verification part of your pipeline
5. **Use Watch Mode**: Get immediate feedback during development
6. **Export Metrics**: Track quality metrics in your monitoring system
7. **Review Rollbacks**: Understand why changes were rejected
8. **Train Agents**: Use verification feedback to improve agent performance
### Additional Resources
- Truth Scoring Algorithm: See `/docs/truth-scoring.md`
- Verification Criteria: See `/docs/verification-criteria.md`
- Integration Examples: See `/examples/verification/`
- API Reference: See `/docs/api/verification.md`

215
apis/admin/group.api Normal file
View File

@ -0,0 +1,215 @@
syntax = "v1"
info (
title: "Group API"
desc: "API for user group and node group management"
author: "Tension"
email: "tension@ppanel.com"
version: "0.0.1"
)
import (
"../types.api"
"./server.api"
)
type (
// ===== 节点组管理 =====
// GetNodeGroupListRequest
GetNodeGroupListRequest {
Page int `form:"page"`
Size int `form:"size"`
GroupId string `form:"group_id,omitempty"`
}
// GetNodeGroupListResponse
GetNodeGroupListResponse {
Total int64 `json:"total"`
List []NodeGroup `json:"list"`
}
// CreateNodeGroupRequest
CreateNodeGroupRequest {
Name string `json:"name" validate:"required"`
Description string `json:"description"`
Sort int `json:"sort"`
ForCalculation *bool `json:"for_calculation"`
IsExpiredGroup *bool `json:"is_expired_group"`
ExpiredDaysLimit *int `json:"expired_days_limit"`
MaxTrafficGBExpired *int64 `json:"max_traffic_gb_expired,omitempty"`
SpeedLimit *int `json:"speed_limit"`
MinTrafficGB *int64 `json:"min_traffic_gb,omitempty"`
MaxTrafficGB *int64 `json:"max_traffic_gb,omitempty"`
}
// UpdateNodeGroupRequest
UpdateNodeGroupRequest {
Id int64 `json:"id" validate:"required"`
Name string `json:"name"`
Description string `json:"description"`
Sort int `json:"sort"`
ForCalculation *bool `json:"for_calculation"`
IsExpiredGroup *bool `json:"is_expired_group"`
ExpiredDaysLimit *int `json:"expired_days_limit"`
MaxTrafficGBExpired *int64 `json:"max_traffic_gb_expired,omitempty"`
SpeedLimit *int `json:"speed_limit"`
MinTrafficGB *int64 `json:"min_traffic_gb,omitempty"`
MaxTrafficGB *int64 `json:"max_traffic_gb,omitempty"`
}
// DeleteNodeGroupRequest
DeleteNodeGroupRequest {
Id int64 `json:"id" validate:"required"`
}
// ===== 分组配置管理 =====
// GetGroupConfigRequest
GetGroupConfigRequest {
Keys []string `form:"keys,omitempty"`
}
// GetGroupConfigResponse
GetGroupConfigResponse {
Enabled bool `json:"enabled"`
Mode string `json:"mode"`
Config map[string]interface{} `json:"config"`
State RecalculationState `json:"state"`
}
// UpdateGroupConfigRequest
UpdateGroupConfigRequest {
Enabled bool `json:"enabled"`
Mode string `json:"mode"`
Config map[string]interface{} `json:"config"`
}
// RecalculationState
RecalculationState {
State string `json:"state"`
Progress int `json:"progress"`
Total int `json:"total"`
}
// ===== 分组操作 =====
// RecalculateGroupRequest
RecalculateGroupRequest {
Mode string `json:"mode" validate:"required"`
TriggerType string `json:"trigger_type"` // "manual" or "scheduled"
}
// GetGroupHistoryRequest
GetGroupHistoryRequest {
Page int `form:"page"`
Size int `form:"size"`
GroupMode string `form:"group_mode,omitempty"`
TriggerType string `form:"trigger_type,omitempty"`
}
// GetGroupHistoryResponse
GetGroupHistoryResponse {
Total int64 `json:"total"`
List []GroupHistory `json:"list"`
}
// GetGroupHistoryDetailRequest
GetGroupHistoryDetailRequest {
Id int64 `form:"id" validate:"required"`
}
// GetGroupHistoryDetailResponse
GetGroupHistoryDetailResponse {
GroupHistoryDetail
}
// PreviewUserNodesRequest
PreviewUserNodesRequest {
UserId int64 `form:"user_id" validate:"required"`
}
// PreviewUserNodesResponse
PreviewUserNodesResponse {
UserId int64 `json:"user_id"`
NodeGroups []NodeGroupItem `json:"node_groups"`
}
// NodeGroupItem
NodeGroupItem {
Id int64 `json:"id"`
Name string `json:"name"`
Nodes []Node `json:"nodes"`
}
// ExportGroupResultRequest
ExportGroupResultRequest {
HistoryId *int64 `form:"history_id,omitempty"`
}
// ===== Reset Groups =====
// ResetGroupsRequest
ResetGroupsRequest {
Confirm bool `json:"confirm" validate:"required"`
}
// ===== 套餐分组映射 =====
// SubscribeGroupMappingItem
SubscribeGroupMappingItem {
SubscribeName string `json:"subscribe_name"`
NodeGroupName string `json:"node_group_name"`
}
// GetSubscribeGroupMappingRequest
GetSubscribeGroupMappingRequest {}
// GetSubscribeGroupMappingResponse
GetSubscribeGroupMappingResponse {
List []SubscribeGroupMappingItem `json:"list"`
}
)
@server (
prefix: v1/admin/group
group: admin/group
jwt: JwtAuth
middleware: AuthMiddleware
)
service ppanel {
// ===== 节点组管理 =====
@doc "Get node group list"
@handler GetNodeGroupList
get /node/list (GetNodeGroupListRequest) returns (GetNodeGroupListResponse)
@doc "Create node group"
@handler CreateNodeGroup
post /node (CreateNodeGroupRequest)
@doc "Update node group"
@handler UpdateNodeGroup
put /node (UpdateNodeGroupRequest)
@doc "Delete node group"
@handler DeleteNodeGroup
delete /node (DeleteNodeGroupRequest)
// ===== 分组配置管理 =====
@doc "Get group config"
@handler GetGroupConfig
get /config (GetGroupConfigRequest) returns (GetGroupConfigResponse)
@doc "Update group config"
@handler UpdateGroupConfig
put /config (UpdateGroupConfigRequest)
// ===== 分组操作 =====
@doc "Recalculate group"
@handler RecalculateGroup
post /recalculate (RecalculateGroupRequest)
@doc "Get recalculation status"
@handler GetRecalculationStatus
get /recalculation/status returns (RecalculationState)
@doc "Get group history"
@handler GetGroupHistory
get /history (GetGroupHistoryRequest) returns (GetGroupHistoryResponse)
@doc "Export group result"
@handler ExportGroupResult
get /export (ExportGroupResultRequest)
// Routes with query parameters
@doc "Get group history detail"
@handler GetGroupHistoryDetail
get /history/detail (GetGroupHistoryDetailRequest) returns (GetGroupHistoryDetailResponse)
@doc "Preview user nodes"
@handler PreviewUserNodes
get /preview (PreviewUserNodesRequest) returns (PreviewUserNodesResponse)
@doc "Reset all groups"
@handler ResetGroups
post /reset (ResetGroupsRequest)
@doc "Get subscribe group mapping"
@handler GetSubscribeGroupMapping
get /subscribe/mapping (GetSubscribeGroupMappingRequest) returns (GetSubscribeGroupMappingResponse)
}

View File

@ -163,5 +163,9 @@ service ppanel {
@doc "Query quota task list" @doc "Query quota task list"
@handler QueryQuotaTaskList @handler QueryQuotaTaskList
get /quota/list (QueryQuotaTaskListRequest) returns (QueryQuotaTaskListResponse) get /quota/list (QueryQuotaTaskListRequest) returns (QueryQuotaTaskListResponse)
@doc "Query quota task status"
@handler QueryQuotaTaskStatus
post /quota/status (QueryQuotaTaskStatusRequest) returns (QueryQuotaTaskStatusResponse)
} }

View File

@ -80,36 +80,40 @@ type (
Protocols []Protocol `json:"protocols"` Protocols []Protocol `json:"protocols"`
} }
Node { Node {
Id int64 `json:"id"` Id int64 `json:"id"`
Name string `json:"name"` Name string `json:"name"`
Tags []string `json:"tags"` Tags []string `json:"tags"`
Port uint16 `json:"port"` Port uint16 `json:"port"`
Address string `json:"address"` Address string `json:"address"`
ServerId int64 `json:"server_id"` ServerId int64 `json:"server_id"`
Protocol string `json:"protocol"` Protocol string `json:"protocol"`
Enabled *bool `json:"enabled"` Enabled *bool `json:"enabled"`
Sort int `json:"sort,omitempty"` Sort int `json:"sort,omitempty"`
CreatedAt int64 `json:"created_at"` NodeGroupId int64 `json:"node_group_id,omitempty"`
UpdatedAt int64 `json:"updated_at"` NodeGroupIds []int64 `json:"node_group_ids,omitempty"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
} }
CreateNodeRequest { CreateNodeRequest {
Name string `json:"name"` Name string `json:"name"`
Tags []string `json:"tags,omitempty"` Tags []string `json:"tags,omitempty"`
Port uint16 `json:"port"` Port uint16 `json:"port"`
Address string `json:"address"` Address string `json:"address"`
ServerId int64 `json:"server_id"` ServerId int64 `json:"server_id"`
Protocol string `json:"protocol"` Protocol string `json:"protocol"`
Enabled *bool `json:"enabled"` Enabled *bool `json:"enabled"`
NodeGroupIds []int64 `json:"node_group_ids,omitempty"`
} }
UpdateNodeRequest { UpdateNodeRequest {
Id int64 `json:"id"` Id int64 `json:"id"`
Name string `json:"name"` Name string `json:"name"`
Tags []string `json:"tags,omitempty"` Tags []string `json:"tags,omitempty"`
Port uint16 `json:"port"` Port uint16 `json:"port"`
Address string `json:"address"` Address string `json:"address"`
ServerId int64 `json:"server_id"` ServerId int64 `json:"server_id"`
Protocol string `json:"protocol"` Protocol string `json:"protocol"`
Enabled *bool `json:"enabled"` Enabled *bool `json:"enabled"`
NodeGroupIds []int64 `json:"node_group_ids,omitempty"`
} }
ToggleNodeStatusRequest { ToggleNodeStatusRequest {
Id int64 `json:"id"` Id int64 `json:"id"`
@ -119,9 +123,10 @@ type (
Id int64 `json:"id"` Id int64 `json:"id"`
} }
FilterNodeListRequest { FilterNodeListRequest {
Page int `form:"page"` Page int `form:"page"`
Size int `form:"size"` Size int `form:"size"`
Search string `form:"search,omitempty"` Search string `form:"search,omitempty"`
NodeGroupId *int64 `form:"node_group_id,omitempty"`
} }
FilterNodeListResponse { FilterNodeListResponse {
Total int64 `json:"total"` Total int64 `json:"total"`

View File

@ -48,6 +48,9 @@ type (
Quota int64 `json:"quota"` Quota int64 `json:"quota"`
Nodes []int64 `json:"nodes"` Nodes []int64 `json:"nodes"`
NodeTags []string `json:"node_tags"` NodeTags []string `json:"node_tags"`
NodeGroupIds []int64 `json:"node_group_ids,omitempty"`
NodeGroupId int64 `json:"node_group_id"`
TrafficLimit []TrafficLimit `json:"traffic_limit"`
Show *bool `json:"show"` Show *bool `json:"show"`
Sell *bool `json:"sell"` Sell *bool `json:"sell"`
DeductionRatio int64 `json:"deduction_ratio"` DeductionRatio int64 `json:"deduction_ratio"`
@ -55,6 +58,7 @@ type (
ResetCycle int64 `json:"reset_cycle"` ResetCycle int64 `json:"reset_cycle"`
RenewalReset *bool `json:"renewal_reset"` RenewalReset *bool `json:"renewal_reset"`
ShowOriginalPrice bool `json:"show_original_price"` ShowOriginalPrice bool `json:"show_original_price"`
AutoCreateGroup bool `json:"auto_create_group"`
} }
UpdateSubscribeRequest { UpdateSubscribeRequest {
Id int64 `json:"id" validate:"required"` Id int64 `json:"id" validate:"required"`
@ -72,6 +76,9 @@ type (
Quota int64 `json:"quota"` Quota int64 `json:"quota"`
Nodes []int64 `json:"nodes"` Nodes []int64 `json:"nodes"`
NodeTags []string `json:"node_tags"` NodeTags []string `json:"node_tags"`
NodeGroupIds []int64 `json:"node_group_ids,omitempty"`
NodeGroupId int64 `json:"node_group_id"`
TrafficLimit []TrafficLimit `json:"traffic_limit"`
Show *bool `json:"show"` Show *bool `json:"show"`
Sell *bool `json:"sell"` Sell *bool `json:"sell"`
Sort int64 `json:"sort"` Sort int64 `json:"sort"`
@ -85,10 +92,11 @@ type (
Sort []SortItem `json:"sort"` Sort []SortItem `json:"sort"`
} }
GetSubscribeListRequest { GetSubscribeListRequest {
Page int64 `form:"page" validate:"required"` Page int64 `form:"page" validate:"required"`
Size int64 `form:"size" validate:"required"` Size int64 `form:"size" validate:"required"`
Language string `form:"language,omitempty"` Language string `form:"language,omitempty"`
Search string `form:"search,omitempty"` Search string `form:"search,omitempty"`
NodeGroupId int64 `form:"node_group_id,omitempty"`
} }
SubscribeItem { SubscribeItem {
Subscribe Subscribe

View File

@ -77,22 +77,27 @@ type (
IsAdmin bool `json:"is_admin"` IsAdmin bool `json:"is_admin"`
} }
UserSubscribeDetail { UserSubscribeDetail {
Id int64 `json:"id"` Id int64 `json:"id"`
UserId int64 `json:"user_id"` UserId int64 `json:"user_id"`
User User `json:"user"` User User `json:"user"`
OrderId int64 `json:"order_id"` OrderId int64 `json:"order_id"`
SubscribeId int64 `json:"subscribe_id"` SubscribeId int64 `json:"subscribe_id"`
Subscribe Subscribe `json:"subscribe"` Subscribe Subscribe `json:"subscribe"`
StartTime int64 `json:"start_time"` NodeGroupId int64 `json:"node_group_id"`
ExpireTime int64 `json:"expire_time"` GroupLocked bool `json:"group_locked"`
ResetTime int64 `json:"reset_time"` StartTime int64 `json:"start_time"`
Traffic int64 `json:"traffic"` ExpireTime int64 `json:"expire_time"`
Download int64 `json:"download"` ResetTime int64 `json:"reset_time"`
Upload int64 `json:"upload"` Traffic int64 `json:"traffic"`
Token string `json:"token"` Download int64 `json:"download"`
Status uint8 `json:"status"` Upload int64 `json:"upload"`
CreatedAt int64 `json:"created_at"` Token string `json:"token"`
UpdatedAt int64 `json:"updated_at"` Status uint8 `json:"status"`
EffectiveSpeed int64 `json:"effective_speed"`
IsThrottled bool `json:"is_throttled"`
ThrottleRule string `json:"throttle_rule,omitempty"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
} }
BatchDeleteUserRequest { BatchDeleteUserRequest {
Ids []int64 `json:"ids" validate:"required"` Ids []int64 `json:"ids" validate:"required"`

View File

@ -11,13 +11,16 @@ info (
type ( type (
// User login request // User login request
UserLoginRequest { UserLoginRequest {
Identifier string `json:"identifier"` Identifier string `json:"identifier"`
Email string `json:"email" validate:"required"` Email string `json:"email" validate:"required"`
Password string `json:"password" validate:"required"` Password string `json:"password" validate:"required"`
IP string `header:"X-Original-Forwarded-For"` IP string `header:"X-Original-Forwarded-For"`
UserAgent string `header:"User-Agent"` UserAgent string `header:"User-Agent"`
LoginType string `header:"Login-Type"` LoginType string `header:"Login-Type"`
CfToken string `json:"cf_token,optional"` CfToken string `json:"cf_token,optional"`
CaptchaId string `json:"captcha_id,optional"`
CaptchaCode string `json:"captcha_code,optional"`
SliderToken string `json:"slider_token,optional"`
} }
// Check user is exist request // Check user is exist request
CheckUserRequest { CheckUserRequest {
@ -29,37 +32,43 @@ type (
} }
// User login response // User login response
UserRegisterRequest { UserRegisterRequest {
Identifier string `json:"identifier"` Identifier string `json:"identifier"`
Email string `json:"email" validate:"required"` Email string `json:"email" validate:"required"`
Password string `json:"password" validate:"required"` Password string `json:"password" validate:"required"`
Invite string `json:"invite,optional"` Invite string `json:"invite,optional"`
Code string `json:"code,optional"` Code string `json:"code,optional"`
IP string `header:"X-Original-Forwarded-For"` IP string `header:"X-Original-Forwarded-For"`
UserAgent string `header:"User-Agent"` UserAgent string `header:"User-Agent"`
LoginType string `header:"Login-Type"` LoginType string `header:"Login-Type"`
CfToken string `json:"cf_token,optional"` CfToken string `json:"cf_token,optional"`
CaptchaId string `json:"captcha_id,optional"`
CaptchaCode string `json:"captcha_code,optional"`
SliderToken string `json:"slider_token,optional"`
} }
// User login response // User reset password request
ResetPasswordRequest { ResetPasswordRequest {
Identifier string `json:"identifier"` Identifier string `json:"identifier"`
Email string `json:"email" validate:"required"` Email string `json:"email" validate:"required"`
Password string `json:"password" validate:"required"` Password string `json:"password" validate:"required"`
Code string `json:"code,optional"` Code string `json:"code,optional"`
IP string `header:"X-Original-Forwarded-For"` IP string `header:"X-Original-Forwarded-For"`
UserAgent string `header:"User-Agent"` UserAgent string `header:"User-Agent"`
LoginType string `header:"Login-Type"` LoginType string `header:"Login-Type"`
CfToken string `json:"cf_token,optional"` CfToken string `json:"cf_token,optional"`
CaptchaId string `json:"captcha_id,optional"`
CaptchaCode string `json:"captcha_code,optional"`
SliderToken string `json:"slider_token,optional"`
} }
// Email login request // Email login request
EmailLoginRequest { EmailLoginRequest {
Identifier string `json:"identifier"` Identifier string `json:"identifier" form:"identifier"`
Email string `json:"email" validate:"required"` Email string `json:"email" form:"email" validate:"required,email"`
Code string `json:"code" validate:"required"` Code string `json:"code" form:"code" validate:"required"`
Invite string `json:"invite,optional"` Invite string `json:"invite,optional" form:"invite"`
IP string `header:"X-Original-Forwarded-For"` IP string `header:"X-Original-Forwarded-For"`
UserAgent string `header:"User-Agent"` UserAgent string `header:"User-Agent"`
LoginType string `header:"Login-Type"` LoginType string `header:"Login-Type"`
CfToken string `json:"cf_token,optional"` CfToken string `json:"cf_token,optional" form:"cf_token"`
} }
LoginResponse { LoginResponse {
Token string `json:"token"` Token string `json:"token"`
@ -86,6 +95,9 @@ type (
UserAgent string `header:"User-Agent"` UserAgent string `header:"User-Agent"`
LoginType string `header:"Login-Type"` LoginType string `header:"Login-Type"`
CfToken string `json:"cf_token,optional"` CfToken string `json:"cf_token,optional"`
CaptchaId string `json:"captcha_id,optional"`
CaptchaCode string `json:"captcha_code,optional"`
SliderToken string `json:"slider_token,optional"`
} }
// Check user is exist request // Check user is exist request
TelephoneCheckUserRequest { TelephoneCheckUserRequest {
@ -108,6 +120,9 @@ type (
UserAgent string `header:"User-Agent"` UserAgent string `header:"User-Agent"`
LoginType string `header:"Login-Type,optional"` LoginType string `header:"Login-Type,optional"`
CfToken string `json:"cf_token,optional"` CfToken string `json:"cf_token,optional"`
CaptchaId string `json:"captcha_id,optional"`
CaptchaCode string `json:"captcha_code,optional"`
SliderToken string `json:"slider_token,optional"`
} }
// User login response // User login response
TelephoneResetPasswordRequest { TelephoneResetPasswordRequest {
@ -120,6 +135,9 @@ type (
UserAgent string `header:"User-Agent"` UserAgent string `header:"User-Agent"`
LoginType string `header:"Login-Type,optional"` LoginType string `header:"Login-Type,optional"`
CfToken string `json:"cf_token,optional"` CfToken string `json:"cf_token,optional"`
CaptchaId string `json:"captcha_id,optional"`
CaptchaCode string `json:"captcha_code,optional"`
SliderToken string `json:"slider_token,optional"`
} }
AppleLoginCallbackRequest { AppleLoginCallbackRequest {
Code string `form:"code"` Code string `form:"code"`
@ -137,6 +155,21 @@ type (
CfToken string `json:"cf_token,optional"` CfToken string `json:"cf_token,optional"`
ShortCode string `json:"short_code,optional"` ShortCode string `json:"short_code,optional"`
} }
GenerateCaptchaResponse {
Id string `json:"id"`
Image string `json:"image"`
Type string `json:"type"`
BlockImage string `json:"block_image,omitempty"`
}
SliderVerifyCaptchaRequest {
Id string `json:"id" validate:"required"`
X int `json:"x" validate:"required"`
Y int `json:"y" validate:"required"`
Trail string `json:"trail"`
}
SliderVerifyCaptchaResponse {
Token string `json:"token"`
}
) )
@server ( @server (
@ -174,16 +207,47 @@ service ppanel {
get /check/telephone (TelephoneCheckUserRequest) returns (TelephoneCheckUserResponse) get /check/telephone (TelephoneCheckUserRequest) returns (TelephoneCheckUserResponse)
@doc "User Telephone register" @doc "User Telephone register"
@handler TelephoneUserRegister @handler TelephoneRegister
post /register/telephone (TelephoneRegisterRequest) returns (LoginResponse) post /register/telephone (TelephoneRegisterRequest) returns (LoginResponse)
@doc "Reset password" @doc "Reset password by telephone"
@handler TelephoneResetPassword @handler TelephoneResetPassword
post /reset/telephone (TelephoneResetPasswordRequest) returns (LoginResponse) post /reset/telephone (TelephoneResetPasswordRequest) returns (LoginResponse)
@doc "Device Login" @doc "Device Login"
@handler DeviceLogin @handler DeviceLogin
post /login/device (DeviceLoginRequest) returns (LoginResponse) post /login/device (DeviceLoginRequest) returns (LoginResponse)
@doc "Generate captcha"
@handler GenerateCaptcha
post /captcha/generate returns (GenerateCaptchaResponse)
@doc "Verify slider captcha"
@handler SliderVerifyCaptcha
post /captcha/slider/verify (SliderVerifyCaptchaRequest) returns (SliderVerifyCaptchaResponse)
}
@server (
prefix: v1/auth/admin
group: auth/admin
middleware: DeviceMiddleware
)
service ppanel {
@doc "Admin login"
@handler AdminLogin
post /login (UserLoginRequest) returns (LoginResponse)
@doc "Admin reset password"
@handler AdminResetPassword
post /reset (ResetPasswordRequest) returns (LoginResponse)
@doc "Generate captcha"
@handler AdminGenerateCaptcha
post /captcha/generate returns (GenerateCaptchaResponse)
@doc "Verify slider captcha"
@handler AdminSliderVerifyCaptcha
post /captcha/slider/verify (SliderVerifyCaptchaRequest) returns (SliderVerifyCaptchaResponse)
} }
@server ( @server (
@ -203,4 +267,3 @@ service ppanel {
@handler AppleLoginCallback @handler AppleLoginCallback
post /callback/apple (AppleLoginCallbackRequest) post /callback/apple (AppleLoginCallbackRequest)
} }

View File

@ -12,10 +12,12 @@ import "./types.api"
type ( type (
VeifyConfig { VeifyConfig {
TurnstileSiteKey string `json:"turnstile_site_key"` CaptchaType string `json:"captcha_type"`
EnableLoginVerify bool `json:"enable_login_verify"` TurnstileSiteKey string `json:"turnstile_site_key"`
EnableRegisterVerify bool `json:"enable_register_verify"` EnableUserLoginCaptcha bool `json:"enable_user_login_captcha"`
EnableResetPasswordVerify bool `json:"enable_reset_password_verify"` EnableUserRegisterCaptcha bool `json:"enable_user_register_captcha"`
EnableAdminLoginCaptcha bool `json:"enable_admin_login_captcha"`
EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"`
} }
GetGlobalConfigResponse { GetGlobalConfigResponse {
Site SiteConfig `json:"site"` Site SiteConfig `json:"site"`
@ -94,6 +96,48 @@ type (
Message string `json:"message,omitempty"` Message string `json:"message,omitempty"`
Timestamp int64 `json:"timestamp,omitempty"` Timestamp int64 `json:"timestamp,omitempty"`
} }
GetDownloadLinkRequest {
InviteCode string `form:"invite_code,optional"`
Platform string `form:"platform" validate:"required,oneof=windows mac ios android"`
}
GetDownloadLinkResponse {
Url string `json:"url"`
}
ContactRequest {
Name string `json:"name" validate:"required,max=100"`
Email string `json:"email" validate:"required,email"`
OtherContact string `json:"other_contact" validate:"max=200"`
Notes string `json:"notes" validate:"max=2000"`
}
ReportLogMessageRequest {
Platform string `json:"platform" validate:"required,max=32"`
AppVersion string `json:"app_version" validate:"required,max=64"`
OsName string `json:"os_name" validate:"max=64"`
OsVersion string `json:"os_version" validate:"max=64"`
DeviceId string `json:"device_id" validate:"required,max=255"`
UserId int64 `json:"user_id"`
SessionId string `json:"session_id" validate:"max=255"`
Level uint8 `json:"level"`
ErrorCode string `json:"error_code" validate:"max=128"`
Message string `json:"message" validate:"required,max=65535"`
Stack string `json:"stack" validate:"max=1048576"`
Context interface{} `json:"context"`
OccurredAt int64 `json:"occurred_at"`
}
ReportLogMessageResponse {
Id int64 `json:"id"`
}
LegacyCheckVerificationCodeRequest {
Method string `json:"method" form:"method" validate:"omitempty,oneof=email mobile"`
Account string `json:"account" form:"account"`
Email string `json:"email" form:"email"`
Code string `json:"code" form:"code" validate:"required"`
Type uint8 `json:"type" form:"type" validate:"required"`
}
LegacyCheckVerificationCodeResponse {
Status bool `json:"status"`
Exist bool `json:"exist"`
}
) )
@server ( @server (
@ -141,5 +185,25 @@ service ppanel {
@doc "Heartbeat" @doc "Heartbeat"
@handler Heartbeat @handler Heartbeat
get /heartbeat returns (HeartbeatResponse) get /heartbeat returns (HeartbeatResponse)
@doc "Get Download Link"
@handler GetDownloadLink
get /client/download (GetDownloadLinkRequest) returns (GetDownloadLinkResponse)
@doc "Submit Contact"
@handler SubmitContact
post /contact (ContactRequest)
@doc "Report log message"
@handler ReportLogMessage
post /log/report (ReportLogMessageRequest) returns (ReportLogMessageResponse)
@doc "Check verification code (legacy v1)"
@handler CheckCodeLegacy
post /check_code (LegacyCheckVerificationCodeRequest) returns (LegacyCheckVerificationCodeResponse)
@doc "Check verification code (legacy v2, consume code)"
@handler CheckCodeLegacyV2
post /check_code/v2 (LegacyCheckVerificationCodeRequest) returns (LegacyCheckVerificationCodeResponse)
} }

View File

@ -71,5 +71,9 @@ service ppanel {
@doc "Get user subscribe node info" @doc "Get user subscribe node info"
@handler QueryUserSubscribeNodeList @handler QueryUserSubscribeNodeList
get /node/list returns (QueryUserSubscribeNodeListResponse) get /node/list returns (QueryUserSubscribeNodeListResponse)
@doc "Get subscribe group list"
@handler QuerySubscribeGroupList
get /group/list returns (QuerySubscribeGroupListResponse)
} }

View File

@ -220,6 +220,22 @@ type (
FriendlyCount int64 `json:"friendly_count"` FriendlyCount int64 `json:"friendly_count"`
HistoryCount int64 `json:"history_count"` HistoryCount int64 `json:"history_count"`
} }
GetUserTrafficStatsRequest {
UserSubscribeId string `form:"user_subscribe_id" validate:"required"`
Days int `form:"days" validate:"required,oneof=7 30"`
}
DailyTrafficStats {
Date string `json:"date"`
Upload int64 `json:"upload"`
Download int64 `json:"download"`
Total int64 `json:"total"`
}
GetUserTrafficStatsResponse {
List []DailyTrafficStats `json:"list"`
TotalUpload int64 `json:"total_upload"`
TotalDownload int64 `json:"total_download"`
TotalTraffic int64 `json:"total_traffic"`
}
) )
@server ( @server (
@ -374,11 +390,15 @@ service ppanel {
@doc "Get Subscribe Status" @doc "Get Subscribe Status"
@handler GetSubscribeStatus @handler GetSubscribeStatus
get /subscribe_status (GetSubscribeStatusRequest) returns (GetSubscribeStatusResponse) post /subscribe_status (GetSubscribeStatusRequest) returns (GetSubscribeStatusResponse)
@doc "Get User Invite Stats" @doc "Get User Invite Stats"
@handler GetUserInviteStats @handler GetUserInviteStats
get /invite_stats (GetUserInviteStatsRequest) returns (GetUserInviteStatsResponse) get /invite_stats (GetUserInviteStatsRequest) returns (GetUserInviteStatsResponse)
@doc "Get User Traffic Statistics"
@handler GetUserTrafficStats
get /traffic_stats (GetUserTrafficStatsRequest) returns (GetUserTrafficStatsResponse)
} }
@server ( @server (

View File

@ -170,11 +170,13 @@ type (
DeviceLimit int64 `json:"device_limit"` DeviceLimit int64 `json:"device_limit"`
} }
VerifyConfig { VerifyConfig {
TurnstileSiteKey string `json:"turnstile_site_key"` CaptchaType string `json:"captcha_type"` // local or turnstile
TurnstileSecret string `json:"turnstile_secret"` TurnstileSiteKey string `json:"turnstile_site_key"`
EnableLoginVerify bool `json:"enable_login_verify"` TurnstileSecret string `json:"turnstile_secret"`
EnableRegisterVerify bool `json:"enable_register_verify"` EnableUserLoginCaptcha bool `json:"enable_user_login_captcha"` // User login captcha
EnableResetPasswordVerify bool `json:"enable_reset_password_verify"` EnableUserRegisterCaptcha bool `json:"enable_user_register_captcha"` // User register captcha
EnableAdminLoginCaptcha bool `json:"enable_admin_login_captcha"` // Admin login captcha
EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"` // User reset password captcha
} }
NodeConfig { NodeConfig {
NodeSecret string `json:"node_secret"` NodeSecret string `json:"node_secret"`
@ -226,6 +228,12 @@ type (
Quantity int64 `json:"quantity"` Quantity int64 `json:"quantity"`
Discount float64 `json:"discount"` Discount float64 `json:"discount"`
} }
TrafficLimit {
StatType string `json:"stat_type"`
StatValue int64 `json:"stat_value"`
TrafficUsage int64 `json:"traffic_usage"`
SpeedLimit int64 `json:"speed_limit"`
}
Subscribe { Subscribe {
Id int64 `json:"id"` Id int64 `json:"id"`
Name string `json:"name"` Name string `json:"name"`
@ -243,6 +251,9 @@ type (
Quota int64 `json:"quota"` Quota int64 `json:"quota"`
Nodes []int64 `json:"nodes"` Nodes []int64 `json:"nodes"`
NodeTags []string `json:"node_tags"` NodeTags []string `json:"node_tags"`
NodeGroupIds []int64 `json:"node_group_ids,omitempty"`
NodeGroupId int64 `json:"node_group_id"`
TrafficLimit []TrafficLimit `json:"traffic_limit"`
Show bool `json:"show"` Show bool `json:"show"`
Sell bool `json:"sell"` Sell bool `json:"sell"`
Sort int64 `json:"sort"` Sort int64 `json:"sort"`
@ -556,6 +567,7 @@ type (
FamilyId int64 `json:"family_id"` FamilyId int64 `json:"family_id"`
OwnerUserId int64 `json:"owner_user_id"` OwnerUserId int64 `json:"owner_user_id"`
OwnerIdentifier string `json:"owner_identifier"` OwnerIdentifier string `json:"owner_identifier"`
OwnerAuthType string `json:"owner_auth_type"`
Status string `json:"status"` Status string `json:"status"`
ActiveMemberCount int64 `json:"active_member_count"` ActiveMemberCount int64 `json:"active_member_count"`
MaxMembers int64 `json:"max_members"` MaxMembers int64 `json:"max_members"`
@ -565,7 +577,9 @@ type (
FamilyMemberItem { FamilyMemberItem {
UserId int64 `json:"user_id"` UserId int64 `json:"user_id"`
Identifier string `json:"identifier"` Identifier string `json:"identifier"`
AuthType string `json:"auth_type"`
DeviceNo string `json:"device_no"` DeviceNo string `json:"device_no"`
DeviceType string `json:"device_type"`
Role uint8 `json:"role"` Role uint8 `json:"role"`
RoleName string `json:"role_name"` RoleName string `json:"role_name"`
Status uint8 `json:"status"` Status uint8 `json:"status"`
@ -951,5 +965,42 @@ type (
ResetUserSubscribeTokenRequest { ResetUserSubscribeTokenRequest {
UserSubscribeId int64 `json:"user_subscribe_id"` UserSubscribeId int64 `json:"user_subscribe_id"`
} }
// ===== 分组功能类型定义 =====
// NodeGroup 节点组
NodeGroup {
Id int64 `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Sort int `json:"sort"`
ForCalculation bool `json:"for_calculation"`
IsExpiredGroup bool `json:"is_expired_group"`
ExpiredDaysLimit int `json:"expired_days_limit"`
MaxTrafficGBExpired int64 `json:"max_traffic_gb_expired,omitempty"`
SpeedLimit int `json:"speed_limit"`
MinTrafficGB int64 `json:"min_traffic_gb,omitempty"`
MaxTrafficGB int64 `json:"max_traffic_gb,omitempty"`
NodeCount int64 `json:"node_count,omitempty"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
}
// GroupHistory 分组历史记录
GroupHistory {
Id int64 `json:"id"`
GroupMode string `json:"group_mode"`
TriggerType string `json:"trigger_type"`
TotalUsers int `json:"total_users"`
SuccessCount int `json:"success_count"`
FailedCount int `json:"failed_count"`
StartTime *int64 `json:"start_time,omitempty"`
EndTime *int64 `json:"end_time,omitempty"`
Operator string `json:"operator,omitempty"`
ErrorLog string `json:"error_log,omitempty"`
CreatedAt int64 `json:"created_at"`
}
// GroupHistoryDetail 分组历史详情
GroupHistoryDetail {
GroupHistory
ConfigSnapshot map[string]interface{} `json:"config_snapshot,omitempty"`
}
) )

View File

@ -15,10 +15,10 @@ Logger: # 日志配置
Level: debug # 日志级别: debug, info, warn, error, panic, fatal Level: debug # 日志级别: debug, info, warn, error, panic, fatal
MySQL: MySQL:
Addr: 103.150.215.44:3306 # host 网络模式; bridge 模式改为 mysql:3306 Addr: 154.12.35.103:3306 # host 网络模式; bridge 模式改为 mysql:3306
Username: root # MySQL用户名 Username: root # MySQL用户名
Password: jpcV41ppanel # MySQL密码与 .env MYSQL_ROOT_PASSWORD 一致 Password: jpcV41ppanel # MySQL密码与 .env MYSQL_ROOT_PASSWORD 一致
Dbname: hifast # MySQL数据库名 Dbname: ppanel # MySQL数据库名
Config: charset=utf8mb4&parseTime=true&loc=Asia%2FShanghai Config: charset=utf8mb4&parseTime=true&loc=Asia%2FShanghai
MaxIdleConns: 10 MaxIdleConns: 10
MaxOpenConns: 100 MaxOpenConns: 100

3
go.mod
View File

@ -94,6 +94,7 @@ require (
github.com/gin-contrib/sse v1.0.0 // indirect github.com/gin-contrib/sse v1.0.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/glog v1.2.0 // indirect github.com/golang/glog v1.2.0 // indirect
github.com/golang/mock v1.6.0 // indirect github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
@ -118,6 +119,7 @@ require (
github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mojocn/base64Captcha v1.3.8 // indirect
github.com/openzipkin/zipkin-go v0.4.2 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect
github.com/oschwald/maxminddb-golang v1.13.0 // indirect github.com/oschwald/maxminddb-golang v1.13.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect
@ -140,6 +142,7 @@ require (
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.13.0 // indirect golang.org/x/arch v0.13.0 // indirect
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d // indirect golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d // indirect
golang.org/x/image v0.23.0 // indirect
golang.org/x/net v0.34.0 // indirect golang.org/x/net v0.34.0 // indirect
golang.org/x/sys v0.30.0 // indirect golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect golang.org/x/text v0.22.0 // indirect

31
go.sum
View File

@ -159,6 +159,8 @@ github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeD
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8= github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8=
github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
@ -274,6 +276,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mojocn/base64Captcha v1.3.8 h1:rrN9BhCwXKS8ht1e21kvR3iTaMgf4qPC9sRoV52bqEg=
github.com/mojocn/base64Captcha v1.3.8/go.mod h1:QFZy927L8HVP3+VV5z2b1EAEiv1KxVJKZbAucVgLUy4=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
@ -405,12 +409,17 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d h1:N0hmiNbwsSNwHBAvR3QB5w25pUwH4tK0Y/RltD1j1h4= golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d h1:N0hmiNbwsSNwHBAvR3QB5w25pUwH4tK0Y/RltD1j1h4=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68=
golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -419,6 +428,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -434,7 +446,10 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -448,6 +463,10 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -466,14 +485,21 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -481,7 +507,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -499,6 +528,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -0,0 +1,28 @@
-- Purpose: Rollback node group management tables
-- Author: Tension
-- Date: 2025-02-23
-- Updated: 2025-03-06
-- ===== Remove system configuration entries =====
DELETE FROM `system` WHERE `category` = 'group' AND `key` IN ('enabled', 'mode', 'auto_create_group');
-- ===== Remove columns and indexes from subscribe table =====
ALTER TABLE `subscribe` DROP INDEX IF EXISTS `idx_node_group_id`;
ALTER TABLE `subscribe` DROP COLUMN IF EXISTS `node_group_id`;
ALTER TABLE `subscribe` DROP COLUMN IF EXISTS `node_group_ids`;
-- ===== Remove columns and indexes from user_subscribe table =====
ALTER TABLE `user_subscribe` DROP INDEX IF EXISTS `idx_node_group_id`;
ALTER TABLE `user_subscribe` DROP COLUMN IF EXISTS `node_group_id`;
-- ===== Remove columns and indexes from nodes table =====
ALTER TABLE `nodes` DROP COLUMN IF EXISTS `node_group_ids`;
-- ===== Drop group_history_detail table =====
DROP TABLE IF EXISTS `group_history_detail`;
-- ===== Drop group_history table =====
DROP TABLE IF EXISTS `group_history`;
-- ===== Drop node_group table =====
DROP TABLE IF EXISTS `node_group`;

View File

@ -0,0 +1,130 @@
-- Purpose: Add node group management tables with multi-group support
-- Author: Tension
-- Date: 2025-02-23
-- Updated: 2025-03-06
-- ===== Create node_group table =====
DROP TABLE IF EXISTS `node_group`;
CREATE TABLE IF NOT EXISTS `node_group` (
`id` bigint NOT NULL AUTO_INCREMENT,
`name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT 'Name',
`description` varchar(500) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT 'Group Description',
`sort` int NOT NULL DEFAULT '0' COMMENT 'Sort Order',
`for_calculation` tinyint(1) NOT NULL DEFAULT 1 COMMENT 'For Grouping Calculation: 0=false, 1=true',
`min_traffic_gb` bigint DEFAULT 0 COMMENT 'Minimum Traffic (GB) for this node group',
`max_traffic_gb` bigint DEFAULT 0 COMMENT 'Maximum Traffic (GB) for this node group',
`created_at` datetime(3) DEFAULT NULL COMMENT 'Create Time',
`updated_at` datetime(3) DEFAULT NULL COMMENT 'Update Time',
PRIMARY KEY (`id`),
KEY `idx_sort` (`sort`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci COMMENT='Node Groups';
-- ===== Create group_history table =====
DROP TABLE IF EXISTS `group_history`;
CREATE TABLE IF NOT EXISTS `group_history` (
`id` bigint NOT NULL AUTO_INCREMENT,
`group_mode` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT 'Group Mode: average/subscribe/traffic',
`trigger_type` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT 'Trigger Type: manual/auto/schedule',
`state` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT 'State: pending/running/completed/failed',
`total_users` int NOT NULL DEFAULT '0' COMMENT 'Total Users',
`success_count` int NOT NULL DEFAULT '0' COMMENT 'Success Count',
`failed_count` int NOT NULL DEFAULT '0' COMMENT 'Failed Count',
`start_time` datetime(3) DEFAULT NULL COMMENT 'Start Time',
`end_time` datetime(3) DEFAULT NULL COMMENT 'End Time',
`operator` varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT 'Operator',
`error_message` text CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci COMMENT 'Error Message',
`created_at` datetime(3) DEFAULT NULL COMMENT 'Create Time',
PRIMARY KEY (`id`),
KEY `idx_group_mode` (`group_mode`),
KEY `idx_trigger_type` (`trigger_type`),
KEY `idx_state` (`state`),
KEY `idx_created_at` (`created_at`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci COMMENT='Group Calculation History';
-- ===== Create group_history_detail table =====
-- Note: user_group_id column removed, using user_data JSON field instead
DROP TABLE IF EXISTS `group_history_detail`;
CREATE TABLE IF NOT EXISTS `group_history_detail` (
`id` bigint NOT NULL AUTO_INCREMENT,
`history_id` bigint NOT NULL COMMENT 'History ID',
`node_group_id` bigint NOT NULL COMMENT 'Node Group ID',
`user_count` int NOT NULL DEFAULT '0' COMMENT 'User Count',
`node_count` int NOT NULL DEFAULT '0' COMMENT 'Node Count',
`user_data` TEXT COMMENT 'User data JSON (id and email/phone)',
`created_at` datetime(3) DEFAULT NULL COMMENT 'Create Time',
PRIMARY KEY (`id`),
KEY `idx_history_id` (`history_id`),
KEY `idx_node_group_id` (`node_group_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci COMMENT='Group History Details';
-- ===== Add columns to nodes table =====
SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'nodes' AND COLUMN_NAME = 'node_group_ids');
SET @sql = IF(@column_exists = 0,
'ALTER TABLE `nodes` ADD COLUMN `node_group_ids` JSON COMMENT ''Node Group IDs (JSON array, multiple groups)''',
'SELECT ''Column node_group_ids already exists''');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- ===== Add node_group_id column to user_subscribe table =====
SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'user_subscribe' AND COLUMN_NAME = 'node_group_id');
SET @sql = IF(@column_exists = 0,
'ALTER TABLE `user_subscribe` ADD COLUMN `node_group_id` bigint NOT NULL DEFAULT 0 COMMENT ''Node Group ID (single ID)''',
'SELECT ''Column node_group_id already exists''');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- ===== Add index for user_subscribe.node_group_id =====
SET @index_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'user_subscribe' AND INDEX_NAME = 'idx_node_group_id');
SET @sql = IF(@index_exists = 0,
'ALTER TABLE `user_subscribe` ADD INDEX `idx_node_group_id` (`node_group_id`)',
'SELECT ''Index idx_node_group_id already exists''');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- ===== Add group_locked column to user_subscribe table =====
SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'user_subscribe' AND COLUMN_NAME = 'group_locked');
SET @sql = IF(@column_exists = 0,
'ALTER TABLE `user_subscribe` ADD COLUMN `group_locked` tinyint(1) NOT NULL DEFAULT 0 COMMENT ''Group Locked''',
'SELECT ''Column group_locked already exists in user_subscribe table''');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- ===== Add columns to subscribe table =====
SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'subscribe' AND COLUMN_NAME = 'node_group_ids');
SET @sql = IF(@column_exists = 0,
'ALTER TABLE `subscribe` ADD COLUMN `node_group_ids` JSON COMMENT ''Node Group IDs (JSON array, multiple groups)''',
'SELECT ''Column node_group_ids already exists''');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- ===== Add default node_group_id column to subscribe table =====
SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'subscribe' AND COLUMN_NAME = 'node_group_id');
SET @sql = IF(@column_exists = 0,
'ALTER TABLE `subscribe` ADD COLUMN `node_group_id` bigint NOT NULL DEFAULT 0 COMMENT ''Default Node Group ID (single ID)''',
'SELECT ''Column node_group_id already exists in subscribe table''');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- ===== Add index for subscribe.node_group_id =====
SET @index_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'subscribe' AND INDEX_NAME = 'idx_node_group_id');
SET @sql = IF(@index_exists = 0,
'ALTER TABLE `subscribe` ADD INDEX `idx_node_group_id` (`node_group_id`)',
'SELECT ''Index idx_node_group_id already exists in subscribe table''');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- ===== Insert system configuration entries =====
INSERT INTO `system` (`category`, `key`, `value`, `desc`) VALUES
('group', 'enabled', 'false', 'Group Management Enabled'),
('group', 'mode', 'average', 'Group Mode: average/subscribe/traffic'),
('group', 'auto_create_group', 'false', 'Auto-create user group when creating subscribe product')
ON DUPLICATE KEY UPDATE
`value` = VALUES(`value`),
`desc` = VALUES(`desc`);

View File

@ -0,0 +1,17 @@
-- Rollback: restore old verify configuration fields
INSERT INTO `system` (`category`, `key`, `value`, `type`, `desc`) VALUES
('verify', 'EnableLoginVerify', 'false', 'bool', 'is enable login verify'),
('verify', 'EnableRegisterVerify', 'false', 'bool', 'is enable register verify'),
('verify', 'EnableResetPasswordVerify', 'false', 'bool', 'is enable reset password verify')
ON DUPLICATE KEY UPDATE
`value` = VALUES(`value`),
`desc` = VALUES(`desc`);
-- Remove new captcha configuration fields
DELETE FROM `system` WHERE `category` = 'verify' AND `key` IN (
'CaptchaType',
'EnableUserLoginCaptcha',
'EnableUserRegisterCaptcha',
'EnableAdminLoginCaptcha',
'EnableUserResetPasswordCaptcha'
);

View File

@ -0,0 +1,17 @@
-- Add new captcha configuration fields
INSERT INTO `system` (`category`, `key`, `value`, `type`, `desc`) VALUES
('verify', 'CaptchaType', 'local', 'string', 'Captcha type: local or turnstile'),
('verify', 'EnableUserLoginCaptcha', 'false', 'bool', 'Enable captcha for user login'),
('verify', 'EnableUserRegisterCaptcha', 'false', 'bool', 'Enable captcha for user registration'),
('verify', 'EnableAdminLoginCaptcha', 'false', 'bool', 'Enable captcha for admin login'),
('verify', 'EnableUserResetPasswordCaptcha', 'false', 'bool', 'Enable captcha for user reset password')
ON DUPLICATE KEY UPDATE
`value` = VALUES(`value`),
`desc` = VALUES(`desc`);
-- Remove old verify configuration fields
DELETE FROM `system` WHERE `category` = 'verify' AND `key` IN (
'EnableLoginVerify',
'EnableRegisterVerify',
'EnableResetPasswordVerify'
);

View File

@ -0,0 +1,12 @@
-- 回滚 user_subscribe 表的过期流量字段
ALTER TABLE `user_subscribe`
DROP COLUMN `expired_upload`,
DROP COLUMN `expired_download`;
-- 回滚 node_group 表的过期节点组字段
ALTER TABLE `node_group`
DROP INDEX `idx_is_expired_group`,
DROP COLUMN `speed_limit`,
DROP COLUMN `max_traffic_gb_expired`,
DROP COLUMN `expired_days_limit`,
DROP COLUMN `is_expired_group`;

View File

@ -0,0 +1,14 @@
-- 为 node_group 表添加过期节点组相关字段
ALTER TABLE `node_group`
ADD COLUMN `is_expired_group` tinyint(1) NOT NULL DEFAULT 0 COMMENT 'Is Expired Group: 0=normal, 1=expired group' AFTER `for_calculation`,
ADD COLUMN `expired_days_limit` int NOT NULL DEFAULT 7 COMMENT 'Expired days limit (days)' AFTER `is_expired_group`,
ADD COLUMN `max_traffic_gb_expired` bigint DEFAULT 0 COMMENT 'Max traffic for expired users (GB)' AFTER `expired_days_limit`,
ADD COLUMN `speed_limit` int NOT NULL DEFAULT 0 COMMENT 'Speed limit (KB/s)' AFTER `max_traffic_gb_expired`;
-- 添加索引
ALTER TABLE `node_group` ADD INDEX `idx_is_expired_group` (`is_expired_group`);
-- 为 user_subscribe 表添加过期流量统计字段
ALTER TABLE `user_subscribe`
ADD COLUMN `expired_download` bigint NOT NULL DEFAULT 0 COMMENT 'Expired period download traffic (bytes)' AFTER `upload`,
ADD COLUMN `expired_upload` bigint NOT NULL DEFAULT 0 COMMENT 'Expired period upload traffic (bytes)' AFTER `expired_download`;

View File

@ -0,0 +1,6 @@
-- Purpose: Rollback traffic_limit rules from subscribe
-- Author: Claude Code
-- Date: 2026-03-12
-- ===== Remove traffic_limit column from subscribe table =====
ALTER TABLE `subscribe` DROP COLUMN IF EXISTS `traffic_limit`;

View File

@ -0,0 +1,22 @@
-- Purpose: Add traffic_limit rules to subscribe
-- Author: Claude Code
-- Date: 2026-03-12
-- ===== Add traffic_limit column to subscribe table =====
SET @column_exists = (
SELECT COUNT(*)
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA = DATABASE()
AND TABLE_NAME = 'subscribe'
AND COLUMN_NAME = 'traffic_limit'
);
SET @sql = IF(
@column_exists = 0,
'ALTER TABLE `subscribe` ADD COLUMN `traffic_limit` TEXT NULL COMMENT ''Traffic Limit Rules (JSON)'' AFTER `node_group_id`',
'SELECT ''Column traffic_limit already exists in subscribe table'''
);
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Create node group
func CreateNodeGroupHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.CreateNodeGroupRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewCreateNodeGroupLogic(c.Request.Context(), svcCtx)
err := l.CreateNodeGroup(&req)
result.HttpResult(c, nil, err)
}
}

View File

@ -0,0 +1,29 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Delete node group
func DeleteNodeGroupHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.DeleteNodeGroupRequest
if err := c.ShouldBind(&req); err != nil {
result.ParamErrorResult(c, err)
return
}
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewDeleteNodeGroupLogic(c.Request.Context(), svcCtx)
err := l.DeleteNodeGroup(&req)
result.HttpResult(c, nil, err)
}
}

View File

@ -0,0 +1,36 @@
package group
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Export group result
func ExportGroupResultHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.ExportGroupResultRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewExportGroupResultLogic(c.Request.Context(), svcCtx)
data, filename, err := l.ExportGroupResult(&req)
if err != nil {
result.HttpResult(c, nil, err)
return
}
// 设置响应头
c.Header("Content-Type", "text/csv")
c.Header("Content-Disposition", "attachment; filename="+filename)
c.Data(http.StatusOK, "text/csv", data)
}
}

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Get group config
func GetGroupConfigHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.GetGroupConfigRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewGetGroupConfigLogic(c.Request.Context(), svcCtx)
resp, err := l.GetGroupConfig(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Get group history detail
func GetGroupHistoryDetailHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.GetGroupHistoryDetailRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewGetGroupHistoryDetailLogic(c.Request.Context(), svcCtx)
resp, err := l.GetGroupHistoryDetail(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Get group history
func GetGroupHistoryHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.GetGroupHistoryRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewGetGroupHistoryLogic(c.Request.Context(), svcCtx)
resp, err := l.GetGroupHistory(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Get node group list
func GetNodeGroupListHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.GetNodeGroupListRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewGetNodeGroupListLogic(c.Request.Context(), svcCtx)
resp, err := l.GetNodeGroupList(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,18 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/pkg/result"
)
// Get recalculation status
func GetRecalculationStatusHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
l := group.NewGetRecalculationStatusLogic(c.Request.Context(), svcCtx)
resp, err := l.GetRecalculationStatus()
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Get subscribe group mapping
func GetSubscribeGroupMappingHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.GetSubscribeGroupMappingRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewGetSubscribeGroupMappingLogic(c.Request.Context(), svcCtx)
resp, err := l.GetSubscribeGroupMapping(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Preview user nodes
func PreviewUserNodesHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.PreviewUserNodesRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewPreviewUserNodesLogic(c.Request.Context(), svcCtx)
resp, err := l.PreviewUserNodes(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Recalculate group
func RecalculateGroupHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.RecalculateGroupRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewRecalculateGroupLogic(c.Request.Context(), svcCtx)
err := l.RecalculateGroup(&req)
result.HttpResult(c, nil, err)
}
}

View File

@ -0,0 +1,17 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/pkg/result"
)
// Reset all groups
func ResetGroupsHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
l := group.NewResetGroupsLogic(c.Request.Context(), svcCtx)
err := l.ResetGroups()
result.HttpResult(c, nil, err)
}
}

View File

@ -0,0 +1,26 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Update group config
func UpdateGroupConfigHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.UpdateGroupConfigRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewUpdateGroupConfigLogic(c.Request.Context(), svcCtx)
err := l.UpdateGroupConfig(&req)
result.HttpResult(c, nil, err)
}
}

View File

@ -0,0 +1,33 @@
package group
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Update node group
func UpdateNodeGroupHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.UpdateNodeGroupRequest
if err := c.ShouldBindUri(&req); err != nil {
result.ParamErrorResult(c, err)
return
}
if err := c.ShouldBind(&req); err != nil {
result.ParamErrorResult(c, err)
return
}
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := group.NewUpdateNodeGroupLogic(c.Request.Context(), svcCtx)
err := l.UpdateNodeGroup(&req)
result.HttpResult(c, nil, err)
}
}

View File

@ -0,0 +1,18 @@
package admin
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth/admin"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/pkg/result"
)
// Generate captcha
func AdminGenerateCaptchaHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
l := admin.NewAdminGenerateCaptchaLogic(c.Request.Context(), svcCtx)
resp, err := l.AdminGenerateCaptcha()
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,30 @@
package admin
import (
"github.com/gin-gonic/gin"
adminLogic "github.com/perfect-panel/server/internal/logic/auth/admin"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Admin login
func AdminLoginHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.UserLoginRequest
_ = c.ShouldBind(&req)
// get client ip
req.IP = c.ClientIP()
req.UserAgent = c.Request.UserAgent()
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := adminLogic.NewAdminLoginLogic(c.Request.Context(), svcCtx)
resp, err := l.AdminLogin(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,29 @@
package admin
import (
"github.com/gin-gonic/gin"
adminLogic "github.com/perfect-panel/server/internal/logic/auth/admin"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Admin reset password
func AdminResetPasswordHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.ResetPasswordRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
// get client ip
req.IP = c.ClientIP()
req.UserAgent = c.Request.UserAgent()
l := adminLogic.NewAdminResetPasswordLogic(c.Request.Context(), svcCtx)
resp, err := l.AdminResetPassword(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,26 @@
package admin
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth/admin"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Verify slider captcha
func AdminSliderVerifyCaptchaHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.SliderVerifyCaptchaRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := admin.NewAdminSliderVerifyCaptchaLogic(c.Request.Context(), svcCtx)
resp, err := l.AdminSliderVerifyCaptcha(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,18 @@
package auth
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/pkg/result"
)
// Generate captcha
func GenerateCaptchaHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
l := auth.NewGenerateCaptchaLogic(c.Request.Context(), svcCtx)
resp, err := l.GenerateCaptcha()
result.HttpResult(c, resp, err)
}
}

View File

@ -1,16 +1,11 @@
package auth package auth
import ( import (
"time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/logic/auth"
"github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result" "github.com/perfect-panel/server/pkg/result"
"github.com/perfect-panel/server/pkg/turnstile"
"github.com/perfect-panel/server/pkg/xerr"
"github.com/pkg/errors"
) )
// Reset password // Reset password
@ -25,17 +20,8 @@ func ResetPasswordHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
} }
// get client ip // get client ip
req.IP = c.ClientIP() req.IP = c.ClientIP()
if svcCtx.Config.Verify.ResetPasswordVerify { req.UserAgent = c.Request.UserAgent()
verifyTurns := turnstile.New(turnstile.Config{
Secret: svcCtx.Config.Verify.TurnstileSecret,
Timeout: 3 * time.Second,
})
if verify, err := verifyTurns.Verify(c, req.CfToken, req.IP); err != nil || !verify {
err = errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "error: %v, verify: %v", err, verify)
result.HttpResult(c, nil, err)
return
}
}
l := auth.NewResetPasswordLogic(c.Request.Context(), svcCtx) l := auth.NewResetPasswordLogic(c.Request.Context(), svcCtx)
resp, err := l.ResetPassword(&req) resp, err := l.ResetPassword(&req)
result.HttpResult(c, resp, err) result.HttpResult(c, resp, err)

View File

@ -0,0 +1,26 @@
package auth
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Verify slider captcha
func SliderVerifyCaptchaHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.SliderVerifyCaptchaRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := auth.NewSliderVerifyCaptchaLogic(c.Request.Context(), svcCtx)
resp, err := l.SliderVerifyCaptcha(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -0,0 +1,26 @@
package auth
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// User Telephone register
func TelephoneRegisterHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.TelephoneRegisterRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := auth.NewTelephoneRegisterLogic(c.Request.Context(), svcCtx)
resp, err := l.TelephoneRegister(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -1,14 +1,13 @@
package auth package auth
import ( import (
"time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/logic/auth"
"github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/captcha"
"github.com/perfect-panel/server/pkg/result" "github.com/perfect-panel/server/pkg/result"
"github.com/perfect-panel/server/pkg/turnstile" "github.com/perfect-panel/server/pkg/tool"
"github.com/perfect-panel/server/pkg/xerr" "github.com/perfect-panel/server/pkg/xerr"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -25,17 +24,44 @@ func TelephoneResetPasswordHandler(svcCtx *svc.ServiceContext) func(c *gin.Conte
} }
// get client ip // get client ip
req.IP = c.ClientIP() req.IP = c.ClientIP()
if svcCtx.Config.Verify.ResetPasswordVerify {
verifyTurns := turnstile.New(turnstile.Config{ // Get verify config from database
Secret: svcCtx.Config.Verify.TurnstileSecret, verifyCfg, err := svcCtx.SystemModel.GetVerifyConfig(c.Request.Context())
Timeout: 3 * time.Second, if err != nil {
result.HttpResult(c, nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "get verify config failed: %v", err))
return
}
var config struct {
CaptchaType string `json:"captcha_type"`
EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"`
TurnstileSecret string `json:"turnstile_secret"`
}
tool.SystemConfigSliceReflectToStruct(verifyCfg, &config)
// Verify captcha if enabled
if config.EnableUserResetPasswordCaptcha {
captchaService := captcha.NewService(captcha.Config{
Type: captcha.CaptchaType(config.CaptchaType),
TurnstileSecret: config.TurnstileSecret,
RedisClient: svcCtx.Redis,
}) })
if verify, err := verifyTurns.Verify(c.Request.Context(), req.CfToken, req.IP); err != nil || !verify {
err = errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "error: %v, verify: %v", err, verify) var token, code string
result.HttpResult(c, nil, err) if config.CaptchaType == "turnstile" {
token = req.CfToken
} else {
token = req.CaptchaId
code = req.CaptchaCode
}
verified, err := captchaService.Verify(c.Request.Context(), token, code, req.IP)
if err != nil || !verified {
result.HttpResult(c, nil, errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "captcha verification failed: %v", err))
return return
} }
} }
l := auth.NewTelephoneResetPasswordLogic(c, svcCtx) l := auth.NewTelephoneResetPasswordLogic(c, svcCtx)
resp, err := l.TelephoneResetPassword(&req) resp, err := l.TelephoneResetPassword(&req)
result.HttpResult(c, resp, err) result.HttpResult(c, resp, err)

View File

@ -1,16 +1,11 @@
package auth package auth
import ( import (
"time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/logic/auth"
"github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result" "github.com/perfect-panel/server/pkg/result"
"github.com/perfect-panel/server/pkg/turnstile"
"github.com/perfect-panel/server/pkg/xerr"
"github.com/pkg/errors"
) )
// User Telephone register // User Telephone register
@ -26,17 +21,7 @@ func TelephoneUserRegisterHandler(svcCtx *svc.ServiceContext) func(c *gin.Contex
// get client ip // get client ip
req.IP = c.ClientIP() req.IP = c.ClientIP()
req.UserAgent = c.Request.UserAgent() req.UserAgent = c.Request.UserAgent()
if svcCtx.Config.Verify.RegisterVerify {
verifyTurns := turnstile.New(turnstile.Config{
Secret: svcCtx.Config.Verify.TurnstileSecret,
Timeout: 3 * time.Second,
})
if verify, err := verifyTurns.Verify(c, req.CfToken, req.IP); err != nil || !verify {
err = errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "error: %v, verify: %v", err, verify)
result.HttpResult(c, nil, err)
return
}
}
l := auth.NewTelephoneUserRegisterLogic(c.Request.Context(), svcCtx) l := auth.NewTelephoneUserRegisterLogic(c.Request.Context(), svcCtx)
resp, err := l.TelephoneUserRegister(&req) resp, err := l.TelephoneUserRegister(&req)
result.HttpResult(c, resp, err) result.HttpResult(c, resp, err)

View File

@ -1,16 +1,11 @@
package auth package auth
import ( import (
"time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/logic/auth"
"github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result" "github.com/perfect-panel/server/pkg/result"
"github.com/perfect-panel/server/pkg/turnstile"
"github.com/perfect-panel/server/pkg/xerr"
"github.com/pkg/errors"
) )
// User login // User login
@ -21,17 +16,7 @@ func UserLoginHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
// get client ip // get client ip
req.IP = c.ClientIP() req.IP = c.ClientIP()
req.UserAgent = c.Request.UserAgent() req.UserAgent = c.Request.UserAgent()
if svcCtx.Config.Verify.LoginVerify && !svcCtx.Config.Debug {
verifyTurns := turnstile.New(turnstile.Config{
Secret: svcCtx.Config.Verify.TurnstileSecret,
Timeout: 3 * time.Second,
})
if verify, err := verifyTurns.Verify(c, req.CfToken, req.IP); err != nil || !verify {
err = errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "error: %v, verify: %v", err, verify)
result.HttpResult(c, nil, err)
return
}
}
validateErr := svcCtx.Validate(&req) validateErr := svcCtx.Validate(&req)
if validateErr != nil { if validateErr != nil {
result.ParamErrorResult(c, validateErr) result.ParamErrorResult(c, validateErr)

View File

@ -1,16 +1,11 @@
package auth package auth
import ( import (
"time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/logic/auth"
"github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result" "github.com/perfect-panel/server/pkg/result"
"github.com/perfect-panel/server/pkg/turnstile"
"github.com/perfect-panel/server/pkg/xerr"
"github.com/pkg/errors"
) )
// User register // User register
@ -21,16 +16,7 @@ func UserRegisterHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
// get client ip // get client ip
req.IP = c.ClientIP() req.IP = c.ClientIP()
req.UserAgent = c.Request.UserAgent() req.UserAgent = c.Request.UserAgent()
if svcCtx.Config.Verify.RegisterVerify {
verifyTurns := turnstile.New(turnstile.Config{
Secret: svcCtx.Config.Verify.TurnstileSecret,
Timeout: 3 * time.Second,
})
if verify, err := verifyTurns.Verify(c, req.CfToken, req.IP); err != nil || !verify {
result.HttpResult(c, nil, errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "verify error: %v", err.Error()))
return
}
}
validateErr := svcCtx.Validate(&req) validateErr := svcCtx.Validate(&req)
if validateErr != nil { if validateErr != nil {
result.ParamErrorResult(c, validateErr) result.ParamErrorResult(c, validateErr)

View File

@ -0,0 +1,26 @@
package user
import (
"github.com/gin-gonic/gin"
"github.com/perfect-panel/server/internal/logic/public/user"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/result"
)
// Get User Traffic Statistics
func GetUserTrafficStatsHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) {
return func(c *gin.Context) {
var req types.GetUserTrafficStatsRequest
_ = c.ShouldBind(&req)
validateErr := svcCtx.Validate(&req)
if validateErr != nil {
result.ParamErrorResult(c, validateErr)
return
}
l := user.NewGetUserTrafficStatsLogic(c.Request.Context(), svcCtx)
resp, err := l.GetUserTrafficStats(&req)
result.HttpResult(c, resp, err)
}
}

View File

@ -12,6 +12,7 @@ import (
adminConsole "github.com/perfect-panel/server/internal/handler/admin/console" adminConsole "github.com/perfect-panel/server/internal/handler/admin/console"
adminCoupon "github.com/perfect-panel/server/internal/handler/admin/coupon" adminCoupon "github.com/perfect-panel/server/internal/handler/admin/coupon"
adminDocument "github.com/perfect-panel/server/internal/handler/admin/document" adminDocument "github.com/perfect-panel/server/internal/handler/admin/document"
adminGroup "github.com/perfect-panel/server/internal/handler/admin/group"
adminLog "github.com/perfect-panel/server/internal/handler/admin/log" adminLog "github.com/perfect-panel/server/internal/handler/admin/log"
adminMarketing "github.com/perfect-panel/server/internal/handler/admin/marketing" adminMarketing "github.com/perfect-panel/server/internal/handler/admin/marketing"
adminOrder "github.com/perfect-panel/server/internal/handler/admin/order" adminOrder "github.com/perfect-panel/server/internal/handler/admin/order"
@ -24,6 +25,7 @@ import (
adminTool "github.com/perfect-panel/server/internal/handler/admin/tool" adminTool "github.com/perfect-panel/server/internal/handler/admin/tool"
adminUser "github.com/perfect-panel/server/internal/handler/admin/user" adminUser "github.com/perfect-panel/server/internal/handler/admin/user"
auth "github.com/perfect-panel/server/internal/handler/auth" auth "github.com/perfect-panel/server/internal/handler/auth"
authAdmin "github.com/perfect-panel/server/internal/handler/auth/admin"
authOauth "github.com/perfect-panel/server/internal/handler/auth/oauth" authOauth "github.com/perfect-panel/server/internal/handler/auth/oauth"
common "github.com/perfect-panel/server/internal/handler/common" common "github.com/perfect-panel/server/internal/handler/common"
publicAnnouncement "github.com/perfect-panel/server/internal/handler/public/announcement" publicAnnouncement "github.com/perfect-panel/server/internal/handler/public/announcement"
@ -189,6 +191,53 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
adminDocumentGroupRouter.GET("/list", adminDocument.GetDocumentListHandler(serverCtx)) adminDocumentGroupRouter.GET("/list", adminDocument.GetDocumentListHandler(serverCtx))
} }
adminGroupGroupRouter := router.Group("/v1/admin/group")
adminGroupGroupRouter.Use(middleware.AuthMiddleware(serverCtx))
{
// Get group config
adminGroupGroupRouter.GET("/config", adminGroup.GetGroupConfigHandler(serverCtx))
// Update group config
adminGroupGroupRouter.PUT("/config", adminGroup.UpdateGroupConfigHandler(serverCtx))
// Export group result
adminGroupGroupRouter.GET("/export", adminGroup.ExportGroupResultHandler(serverCtx))
// Get group history
adminGroupGroupRouter.GET("/history", adminGroup.GetGroupHistoryHandler(serverCtx))
// Get group history detail
adminGroupGroupRouter.GET("/history/detail", adminGroup.GetGroupHistoryDetailHandler(serverCtx))
// Create node group
adminGroupGroupRouter.POST("/node", adminGroup.CreateNodeGroupHandler(serverCtx))
// Update node group
adminGroupGroupRouter.PUT("/node", adminGroup.UpdateNodeGroupHandler(serverCtx))
// Delete node group
adminGroupGroupRouter.DELETE("/node", adminGroup.DeleteNodeGroupHandler(serverCtx))
// Get node group list
adminGroupGroupRouter.GET("/node/list", adminGroup.GetNodeGroupListHandler(serverCtx))
// Preview user nodes
adminGroupGroupRouter.GET("/preview", adminGroup.PreviewUserNodesHandler(serverCtx))
// Recalculate group
adminGroupGroupRouter.POST("/recalculate", adminGroup.RecalculateGroupHandler(serverCtx))
// Get recalculation status
adminGroupGroupRouter.GET("/recalculation/status", adminGroup.GetRecalculationStatusHandler(serverCtx))
// Reset all groups
adminGroupGroupRouter.POST("/reset", adminGroup.ResetGroupsHandler(serverCtx))
// Get subscribe group mapping
adminGroupGroupRouter.GET("/subscribe/mapping", adminGroup.GetSubscribeGroupMappingHandler(serverCtx))
}
adminLogGroupRouter := router.Group("/v1/admin/log") adminLogGroupRouter := router.Group("/v1/admin/log")
adminLogGroupRouter.Use(middleware.AuthMiddleware(serverCtx)) adminLogGroupRouter.Use(middleware.AuthMiddleware(serverCtx))
@ -272,6 +321,9 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
// Query quota task pre-count // Query quota task pre-count
adminMarketingGroupRouter.POST("/quota/pre-count", adminMarketing.QueryQuotaTaskPreCountHandler(serverCtx)) adminMarketingGroupRouter.POST("/quota/pre-count", adminMarketing.QueryQuotaTaskPreCountHandler(serverCtx))
// Query quota task status
adminMarketingGroupRouter.POST("/quota/status", adminMarketing.QueryQuotaTaskStatusHandler(serverCtx))
} }
adminOrderGroupRouter := router.Group("/v1/admin/order") adminOrderGroupRouter := router.Group("/v1/admin/order")
@ -659,6 +711,12 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
authGroupRouter.Use(middleware.DeviceMiddleware(serverCtx)) authGroupRouter.Use(middleware.DeviceMiddleware(serverCtx))
{ {
// Generate captcha
authGroupRouter.POST("/captcha/generate", auth.GenerateCaptchaHandler(serverCtx))
// Verify slider captcha
authGroupRouter.POST("/captcha/slider/verify", auth.SliderVerifyCaptchaHandler(serverCtx))
// Check user is exist // Check user is exist
authGroupRouter.GET("/check", auth.CheckUserHandler(serverCtx)) authGroupRouter.GET("/check", auth.CheckUserHandler(serverCtx))
@ -681,15 +739,32 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
authGroupRouter.POST("/register", auth.UserRegisterHandler(serverCtx)) authGroupRouter.POST("/register", auth.UserRegisterHandler(serverCtx))
// User Telephone register // User Telephone register
authGroupRouter.POST("/register/telephone", auth.TelephoneUserRegisterHandler(serverCtx)) authGroupRouter.POST("/register/telephone", auth.TelephoneRegisterHandler(serverCtx))
// Reset password // Reset password
authGroupRouter.POST("/reset", auth.ResetPasswordHandler(serverCtx)) authGroupRouter.POST("/reset", auth.ResetPasswordHandler(serverCtx))
// Reset password // Reset password by telephone
authGroupRouter.POST("/reset/telephone", auth.TelephoneResetPasswordHandler(serverCtx)) authGroupRouter.POST("/reset/telephone", auth.TelephoneResetPasswordHandler(serverCtx))
} }
authAdminGroupRouter := router.Group("/v1/auth/admin")
authAdminGroupRouter.Use(middleware.DeviceMiddleware(serverCtx))
{
// Generate captcha
authAdminGroupRouter.POST("/captcha/generate", authAdmin.AdminGenerateCaptchaHandler(serverCtx))
// Verify slider captcha
authAdminGroupRouter.POST("/captcha/slider/verify", authAdmin.AdminSliderVerifyCaptchaHandler(serverCtx))
// Admin login
authAdminGroupRouter.POST("/login", authAdmin.AdminLoginHandler(serverCtx))
// Admin reset password
authAdminGroupRouter.POST("/reset", authAdmin.AdminResetPasswordHandler(serverCtx))
}
authOauthGroupRouter := router.Group("/v1/auth/oauth") authOauthGroupRouter := router.Group("/v1/auth/oauth")
{ {
@ -742,6 +817,15 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
// Submit contact info // Submit contact info
commonGroupRouter.POST("/contact", common.SubmitContactHandler(serverCtx)) commonGroupRouter.POST("/contact", common.SubmitContactHandler(serverCtx))
// Report log message
commonGroupRouter.POST("/log/report", common.ReportLogMessageHandler(serverCtx))
// Check verification code (legacy v1)
commonGroupRouter.POST("/check_code", auth.CheckCodeLegacyV1Handler(serverCtx))
// Check verification code (legacy v2, consume code)
commonGroupRouter.POST("/check_code/v2", auth.CheckCodeLegacyV2Handler(serverCtx))
} }
publicAnnouncementGroupRouter := router.Group("/v1/public/announcement") publicAnnouncementGroupRouter := router.Group("/v1/public/announcement")
@ -857,6 +941,9 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
// Get user subscribe node info // Get user subscribe node info
publicSubscribeGroupRouter.GET("/node/list", publicSubscribe.QueryUserSubscribeNodeListHandler(serverCtx)) publicSubscribeGroupRouter.GET("/node/list", publicSubscribe.QueryUserSubscribeNodeListHandler(serverCtx))
// Get subscribe group list
publicSubscribeGroupRouter.GET("/group/list", publicSubscribe.QuerySubscribeGroupListHandler(serverCtx))
} }
publicTicketGroupRouter := router.Group("/v1/public/ticket") publicTicketGroupRouter := router.Group("/v1/public/ticket")
@ -891,11 +978,11 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
// Get Agent Downloads // Get Agent Downloads
publicUserGroupRouter.GET("/agent_downloads", publicUser.GetAgentDownloadsHandler(serverCtx)) publicUserGroupRouter.GET("/agent_downloads", publicUser.GetAgentDownloadsHandler(serverCtx))
publicUserGroupRouter.GET("/agent/downloads", publicUser.GetAgentDownloadsHandler(serverCtx)) publicUserGroupRouter.GET("/agent/downloads", publicUser.GetAgentDownloadsHandler(serverCtx)) // alias: backward-compat
// Get Agent Realtime // Get Agent Realtime
publicUserGroupRouter.GET("/agent_realtime", publicUser.GetAgentRealtimeHandler(serverCtx)) publicUserGroupRouter.GET("/agent_realtime", publicUser.GetAgentRealtimeHandler(serverCtx))
publicUserGroupRouter.GET("/agent/realtime", publicUser.GetAgentRealtimeHandler(serverCtx)) publicUserGroupRouter.GET("/agent/realtime", publicUser.GetAgentRealtimeHandler(serverCtx)) // alias: backward-compat
// Query User Balance Log // Query User Balance Log
publicUserGroupRouter.GET("/balance_log", publicUser.QueryUserBalanceLogHandler(serverCtx)) publicUserGroupRouter.GET("/balance_log", publicUser.QueryUserBalanceLogHandler(serverCtx))
@ -944,11 +1031,11 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
// Get Invite Sales // Get Invite Sales
publicUserGroupRouter.GET("/invite_sales", publicUser.GetInviteSalesHandler(serverCtx)) publicUserGroupRouter.GET("/invite_sales", publicUser.GetInviteSalesHandler(serverCtx))
publicUserGroupRouter.GET("/invite/sales", publicUser.GetInviteSalesHandler(serverCtx)) publicUserGroupRouter.GET("/invite/sales", publicUser.GetInviteSalesHandler(serverCtx)) // alias: backward-compat
// Get User Invite Stats // Get User Invite Stats
publicUserGroupRouter.GET("/invite_stats", publicUser.GetUserInviteStatsHandler(serverCtx)) publicUserGroupRouter.GET("/invite_stats", publicUser.GetUserInviteStatsHandler(serverCtx))
publicUserGroupRouter.GET("/invite/stats", publicUser.GetUserInviteStatsHandler(serverCtx)) publicUserGroupRouter.GET("/invite/stats", publicUser.GetUserInviteStatsHandler(serverCtx)) // alias: backward-compat
// Get Login Log // Get Login Log
publicUserGroupRouter.GET("/login_log", publicUser.GetLoginLogHandler(serverCtx)) publicUserGroupRouter.GET("/login_log", publicUser.GetLoginLogHandler(serverCtx))
@ -980,6 +1067,9 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
// Reset User Subscribe Token // Reset User Subscribe Token
publicUserGroupRouter.PUT("/subscribe_token", publicUser.ResetUserSubscribeTokenHandler(serverCtx)) publicUserGroupRouter.PUT("/subscribe_token", publicUser.ResetUserSubscribeTokenHandler(serverCtx))
// Get User Traffic Statistics
publicUserGroupRouter.GET("/traffic_stats", publicUser.GetUserTrafficStatsHandler(serverCtx))
// Unbind Device // Unbind Device
publicUserGroupRouter.PUT("/unbind_device", publicUser.UnbindDeviceHandler(serverCtx)) publicUserGroupRouter.PUT("/unbind_device", publicUser.UnbindDeviceHandler(serverCtx))
@ -1030,10 +1120,10 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) {
serverGroupRouter.GET("/user", server.GetServerUserListHandler(serverCtx)) serverGroupRouter.GET("/user", server.GetServerUserListHandler(serverCtx))
} }
serverV2GroupRouter := router.Group("/v2/server") serverGroupRouterV2 := router.Group("/v2/server")
{ {
// Get Server Protocol Config // Get Server Protocol Config
serverV2GroupRouter.GET("/:server_id", server.QueryServerProtocolConfigHandler(serverCtx)) serverGroupRouterV2.GET("/:server_id", server.QueryServerProtocolConfigHandler(serverCtx))
} }
} }

View File

@ -0,0 +1,81 @@
package group
import (
"context"
"errors"
"time"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
)
type CreateNodeGroupLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewCreateNodeGroupLogic(ctx context.Context, svcCtx *svc.ServiceContext) *CreateNodeGroupLogic {
return &CreateNodeGroupLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *CreateNodeGroupLogic) CreateNodeGroup(req *types.CreateNodeGroupRequest) error {
// 验证:系统中只能有一个过期节点组
if req.IsExpiredGroup != nil && *req.IsExpiredGroup {
var count int64
err := l.svcCtx.DB.Model(&group.NodeGroup{}).
Where("is_expired_group = ?", true).
Count(&count).Error
if err != nil {
logger.Errorf("failed to check expired group count: %v", err)
return err
}
if count > 0 {
return errors.New("system already has an expired node group, cannot create multiple")
}
}
// 创建节点组
nodeGroup := &group.NodeGroup{
Name: req.Name,
Description: req.Description,
Sort: req.Sort,
ForCalculation: req.ForCalculation,
IsExpiredGroup: req.IsExpiredGroup,
MaxTrafficGBExpired: req.MaxTrafficGBExpired,
MinTrafficGB: req.MinTrafficGB,
MaxTrafficGB: req.MaxTrafficGB,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
// 设置过期节点组的默认值
if req.IsExpiredGroup != nil && *req.IsExpiredGroup {
// 过期节点组不参与分组计算
falseValue := false
nodeGroup.ForCalculation = &falseValue
if req.ExpiredDaysLimit != nil {
nodeGroup.ExpiredDaysLimit = *req.ExpiredDaysLimit
} else {
nodeGroup.ExpiredDaysLimit = 7 // 默认7天
}
if req.SpeedLimit != nil {
nodeGroup.SpeedLimit = *req.SpeedLimit
}
}
if err := l.svcCtx.DB.Create(nodeGroup).Error; err != nil {
logger.Errorf("failed to create node group: %v", err)
return err
}
logger.Infof("created node group: node_group_id=%d", nodeGroup.Id)
return nil
}

View File

@ -0,0 +1,62 @@
package group
import (
"context"
"errors"
"fmt"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/node"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
"gorm.io/gorm"
)
type DeleteNodeGroupLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewDeleteNodeGroupLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteNodeGroupLogic {
return &DeleteNodeGroupLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *DeleteNodeGroupLogic) DeleteNodeGroup(req *types.DeleteNodeGroupRequest) error {
// 查询节点组信息
var nodeGroup group.NodeGroup
if err := l.svcCtx.DB.Where("id = ?", req.Id).First(&nodeGroup).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return errors.New("node group not found")
}
logger.Errorf("failed to find node group: %v", err)
return err
}
// 检查是否有关联节点使用JSON_CONTAINS查询node_group_ids数组
var nodeCount int64
if err := l.svcCtx.DB.Model(&node.Node{}).Where("JSON_CONTAINS(node_group_ids, ?)", fmt.Sprintf("[%d]", nodeGroup.Id)).Count(&nodeCount).Error; err != nil {
logger.Errorf("failed to count nodes in group: %v", err)
return err
}
if nodeCount > 0 {
return fmt.Errorf("cannot delete group with %d associated nodes, please migrate nodes first", nodeCount)
}
// 使用 GORM Transaction 删除节点组
return l.svcCtx.DB.Transaction(func(tx *gorm.DB) error {
// 删除节点组
if err := tx.Where("id = ?", req.Id).Delete(&group.NodeGroup{}).Error; err != nil {
logger.Errorf("failed to delete node group: %v", err)
return err // 自动回滚
}
logger.Infof("deleted node group: id=%d", nodeGroup.Id)
return nil // 自动提交
})
}

View File

@ -0,0 +1,129 @@
package group
import (
"bytes"
"context"
"encoding/csv"
"fmt"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/user"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
)
type ExportGroupResultLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewExportGroupResultLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ExportGroupResultLogic {
return &ExportGroupResultLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
// ExportGroupResult 导出分组结果为 CSV
// 返回CSV 数据(字节切片)、文件名、错误
func (l *ExportGroupResultLogic) ExportGroupResult(req *types.ExportGroupResultRequest) ([]byte, string, error) {
var records [][]string
// CSV 表头
records = append(records, []string{"用户ID", "节点组ID", "节点组名称"})
if req.HistoryId != nil {
// 导出指定历史的详细结果
// 1. 查询分组历史详情
var details []group.GroupHistoryDetail
if err := l.svcCtx.DB.Where("history_id = ?", *req.HistoryId).Find(&details).Error; err != nil {
logger.Errorf("failed to get group history details: %v", err)
return nil, "", err
}
// 2. 为每个组生成记录
for _, detail := range details {
// 从 UserData JSON 解析用户信息
type UserInfo struct {
Id int64 `json:"id"`
Email string `json:"email"`
}
var users []UserInfo
if err := l.svcCtx.DB.Raw("SELECT * FROM JSON_ARRAY(?)", detail.UserData).Scan(&users).Error; err != nil {
// 如果解析失败,尝试用标准 JSON 解析
logger.Errorf("failed to parse user data: %v", err)
continue
}
// 查询节点组名称
var nodeGroup group.NodeGroup
l.svcCtx.DB.Where("id = ?", detail.NodeGroupId).First(&nodeGroup)
// 为每个用户生成记录
for _, user := range users {
records = append(records, []string{
fmt.Sprintf("%d", user.Id),
fmt.Sprintf("%d", nodeGroup.Id),
nodeGroup.Name,
})
}
}
} else {
// 导出当前所有用户的分组情况
type UserNodeGroupInfo struct {
Id int64 `json:"id"`
NodeGroupId int64 `json:"node_group_id"`
}
var userSubscribes []UserNodeGroupInfo
if err := l.svcCtx.DB.Model(&user.Subscribe{}).
Select("DISTINCT user_id as id, node_group_id").
Where("node_group_id > ?", 0).
Find(&userSubscribes).Error; err != nil {
logger.Errorf("failed to get users: %v", err)
return nil, "", err
}
// 为每个用户生成记录
for _, us := range userSubscribes {
// 查询节点组信息
var nodeGroup group.NodeGroup
if err := l.svcCtx.DB.Where("id = ?", us.NodeGroupId).First(&nodeGroup).Error; err != nil {
logger.Errorf("failed to find node group: %v", err)
// 跳过该用户
continue
}
records = append(records, []string{
fmt.Sprintf("%d", us.Id),
fmt.Sprintf("%d", nodeGroup.Id),
nodeGroup.Name,
})
}
}
// 生成 CSV 数据
var buf bytes.Buffer
writer := csv.NewWriter(&buf)
writer.WriteAll(records)
writer.Flush()
if err := writer.Error(); err != nil {
logger.Errorf("failed to write csv: %v", err)
return nil, "", err
}
// 添加 UTF-8 BOM
bom := []byte{0xEF, 0xBB, 0xBF}
csvData := buf.Bytes()
result := make([]byte, 0, len(bom)+len(csvData))
result = append(result, bom...)
result = append(result, csvData...)
// 生成文件名
filename := fmt.Sprintf("group_result_%d.csv", req.HistoryId)
return result, filename, nil
}

View File

@ -0,0 +1,125 @@
package group
import (
"context"
"encoding/json"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/system"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
"github.com/pkg/errors"
"gorm.io/gorm"
)
type GetGroupConfigLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// Get group config
func NewGetGroupConfigLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetGroupConfigLogic {
return &GetGroupConfigLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetGroupConfigLogic) GetGroupConfig(req *types.GetGroupConfigRequest) (resp *types.GetGroupConfigResponse, err error) {
// 读取基础配置
var enabledConfig system.System
var modeConfig system.System
var averageConfig system.System
var subscribeConfig system.System
var trafficConfig system.System
// 从 system_config 表读取配置
if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "enabled").First(&enabledConfig).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
l.Errorw("failed to get group enabled config", logger.Field("error", err.Error()))
return nil, err
}
if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "mode").First(&modeConfig).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
l.Errorw("failed to get group mode config", logger.Field("error", err.Error()))
return nil, err
}
// 读取 JSON 配置
config := make(map[string]interface{})
if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "average_config").First(&averageConfig).Error; err == nil {
var averageCfg map[string]interface{}
if err := json.Unmarshal([]byte(averageConfig.Value), &averageCfg); err == nil {
config["average_config"] = averageCfg
}
}
if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "subscribe_config").First(&subscribeConfig).Error; err == nil {
var subscribeCfg map[string]interface{}
if err := json.Unmarshal([]byte(subscribeConfig.Value), &subscribeCfg); err == nil {
config["subscribe_config"] = subscribeCfg
}
}
if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "traffic_config").First(&trafficConfig).Error; err == nil {
var trafficCfg map[string]interface{}
if err := json.Unmarshal([]byte(trafficConfig.Value), &trafficCfg); err == nil {
config["traffic_config"] = trafficCfg
}
}
// 解析基础配置
enabled := enabledConfig.Value == "true"
mode := modeConfig.Value
if mode == "" {
mode = "average" // 默认模式
}
// 获取重算状态
state, err := l.getRecalculationState()
if err != nil {
l.Errorw("failed to get recalculation state", logger.Field("error", err.Error()))
// 继续执行,不影响配置获取
state = &types.RecalculationState{
State: "idle",
Progress: 0,
Total: 0,
}
}
resp = &types.GetGroupConfigResponse{
Enabled: enabled,
Mode: mode,
Config: config,
State: *state,
}
return resp, nil
}
// getRecalculationState 获取重算状态
func (l *GetGroupConfigLogic) getRecalculationState() (*types.RecalculationState, error) {
var history group.GroupHistory
err := l.svcCtx.DB.Order("id desc").First(&history).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return &types.RecalculationState{
State: "idle",
Progress: 0,
Total: 0,
}, nil
}
return nil, err
}
state := &types.RecalculationState{
State: history.State,
Progress: history.TotalUsers,
Total: history.TotalUsers,
}
return state, nil
}

View File

@ -0,0 +1,109 @@
package group
import (
"context"
"encoding/json"
"errors"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
"gorm.io/gorm"
)
type GetGroupHistoryDetailLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewGetGroupHistoryDetailLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetGroupHistoryDetailLogic {
return &GetGroupHistoryDetailLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetGroupHistoryDetailLogic) GetGroupHistoryDetail(req *types.GetGroupHistoryDetailRequest) (resp *types.GetGroupHistoryDetailResponse, err error) {
// 查询分组历史记录
var history group.GroupHistory
if err := l.svcCtx.DB.Where("id = ?", req.Id).First(&history).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, errors.New("group history not found")
}
logger.Errorf("failed to find group history: %v", err)
return nil, err
}
// 查询分组历史详情
var details []group.GroupHistoryDetail
if err := l.svcCtx.DB.Where("history_id = ?", req.Id).Find(&details).Error; err != nil {
logger.Errorf("failed to find group history details: %v", err)
return nil, err
}
// 转换时间格式
var startTime, endTime *int64
if history.StartTime != nil {
t := history.StartTime.Unix()
startTime = &t
}
if history.EndTime != nil {
t := history.EndTime.Unix()
endTime = &t
}
// 构建 GroupHistoryDetail
historyDetail := types.GroupHistoryDetail{
GroupHistory: types.GroupHistory{
Id: history.Id,
GroupMode: history.GroupMode,
TriggerType: history.TriggerType,
TotalUsers: history.TotalUsers,
SuccessCount: history.SuccessCount,
FailedCount: history.FailedCount,
StartTime: startTime,
EndTime: endTime,
ErrorLog: history.ErrorMessage,
CreatedAt: history.CreatedAt.Unix(),
},
}
// 如果有详情记录,构建 ConfigSnapshot
if len(details) > 0 {
configSnapshot := make(map[string]interface{})
configSnapshot["group_details"] = details
// 获取配置快照(从 system_config 读取)
var configValue string
if history.GroupMode == "average" {
l.svcCtx.DB.Table("system_config").
Where("`key` = ?", "group.average_config").
Select("value").
Scan(&configValue)
} else if history.GroupMode == "traffic" {
l.svcCtx.DB.Table("system_config").
Where("`key` = ?", "group.traffic_config").
Select("value").
Scan(&configValue)
}
// 解析 JSON 配置
if configValue != "" {
var config map[string]interface{}
if err := json.Unmarshal([]byte(configValue), &config); err == nil {
configSnapshot["config"] = config
}
}
historyDetail.ConfigSnapshot = configSnapshot
}
resp = &types.GetGroupHistoryDetailResponse{
GroupHistoryDetail: historyDetail,
}
return resp, nil
}

View File

@ -0,0 +1,87 @@
package group
import (
"context"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
)
type GetGroupHistoryLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewGetGroupHistoryLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetGroupHistoryLogic {
return &GetGroupHistoryLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetGroupHistoryLogic) GetGroupHistory(req *types.GetGroupHistoryRequest) (resp *types.GetGroupHistoryResponse, err error) {
var histories []group.GroupHistory
var total int64
// 构建查询
query := l.svcCtx.DB.Model(&group.GroupHistory{})
// 添加过滤条件
if req.GroupMode != "" {
query = query.Where("group_mode = ?", req.GroupMode)
}
if req.TriggerType != "" {
query = query.Where("trigger_type = ?", req.TriggerType)
}
// 获取总数
if err := query.Count(&total).Error; err != nil {
logger.Errorf("failed to count group histories: %v", err)
return nil, err
}
// 分页查询
offset := (req.Page - 1) * req.Size
if err := query.Order("id DESC").Offset(offset).Limit(req.Size).Find(&histories).Error; err != nil {
logger.Errorf("failed to find group histories: %v", err)
return nil, err
}
// 转换为响应格式
var list []types.GroupHistory
for _, h := range histories {
var startTime, endTime *int64
if h.StartTime != nil {
t := h.StartTime.Unix()
startTime = &t
}
if h.EndTime != nil {
t := h.EndTime.Unix()
endTime = &t
}
list = append(list, types.GroupHistory{
Id: h.Id,
GroupMode: h.GroupMode,
TriggerType: h.TriggerType,
TotalUsers: h.TotalUsers,
SuccessCount: h.SuccessCount,
FailedCount: h.FailedCount,
StartTime: startTime,
EndTime: endTime,
ErrorLog: h.ErrorMessage,
CreatedAt: h.CreatedAt.Unix(),
})
}
resp = &types.GetGroupHistoryResponse{
Total: total,
List: list,
}
return resp, nil
}

View File

@ -0,0 +1,103 @@
package group
import (
"context"
"fmt"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/node"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
)
type GetNodeGroupListLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewGetNodeGroupListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetNodeGroupListLogic {
return &GetNodeGroupListLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetNodeGroupListLogic) GetNodeGroupList(req *types.GetNodeGroupListRequest) (resp *types.GetNodeGroupListResponse, err error) {
var nodeGroups []group.NodeGroup
var total int64
// 构建查询
query := l.svcCtx.DB.Model(&group.NodeGroup{})
// 获取总数
if err := query.Count(&total).Error; err != nil {
logger.Errorf("failed to count node groups: %v", err)
return nil, err
}
// 分页查询
offset := (req.Page - 1) * req.Size
if err := query.Order("sort ASC").Offset(offset).Limit(req.Size).Find(&nodeGroups).Error; err != nil {
logger.Errorf("failed to find node groups: %v", err)
return nil, err
}
// 转换为响应格式
var list []types.NodeGroup
for _, ng := range nodeGroups {
// 统计该组的节点数JSON数组查询
var nodeCount int64
l.svcCtx.DB.Model(&node.Node{}).Where("JSON_CONTAINS(node_group_ids, ?)", fmt.Sprintf("[%d]", ng.Id)).Count(&nodeCount)
// 处理指针类型的字段
var forCalculation bool
if ng.ForCalculation != nil {
forCalculation = *ng.ForCalculation
} else {
forCalculation = true // 默认值
}
var isExpiredGroup bool
if ng.IsExpiredGroup != nil {
isExpiredGroup = *ng.IsExpiredGroup
}
var minTrafficGB, maxTrafficGB, maxTrafficGBExpired int64
if ng.MinTrafficGB != nil {
minTrafficGB = *ng.MinTrafficGB
}
if ng.MaxTrafficGB != nil {
maxTrafficGB = *ng.MaxTrafficGB
}
if ng.MaxTrafficGBExpired != nil {
maxTrafficGBExpired = *ng.MaxTrafficGBExpired
}
list = append(list, types.NodeGroup{
Id: ng.Id,
Name: ng.Name,
Description: ng.Description,
Sort: ng.Sort,
ForCalculation: forCalculation,
IsExpiredGroup: isExpiredGroup,
ExpiredDaysLimit: ng.ExpiredDaysLimit,
MaxTrafficGBExpired: maxTrafficGBExpired,
SpeedLimit: ng.SpeedLimit,
MinTrafficGB: minTrafficGB,
MaxTrafficGB: maxTrafficGB,
NodeCount: nodeCount,
CreatedAt: ng.CreatedAt.Unix(),
UpdatedAt: ng.UpdatedAt.Unix(),
})
}
resp = &types.GetNodeGroupListResponse{
Total: total,
List: list,
}
return resp, nil
}

View File

@ -0,0 +1,57 @@
package group
import (
"context"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
"github.com/pkg/errors"
"gorm.io/gorm"
)
type GetRecalculationStatusLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// Get recalculation status
func NewGetRecalculationStatusLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetRecalculationStatusLogic {
return &GetRecalculationStatusLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetRecalculationStatusLogic) GetRecalculationStatus() (resp *types.RecalculationState, err error) {
// 返回最近的一条 GroupHistory 记录
var history group.GroupHistory
err = l.svcCtx.DB.Order("id desc").First(&history).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
// 如果没有历史记录,返回空闲状态
resp = &types.RecalculationState{
State: "idle",
Progress: 0,
Total: 0,
}
return resp, nil
}
l.Errorw("failed to get group history", logger.Field("error", err.Error()))
return nil, err
}
// 转换为 RecalculationState 格式
// Progress = 已处理的用户数(成功+失败Total = 总用户数
processedUsers := history.SuccessCount + history.FailedCount
resp = &types.RecalculationState{
State: history.State,
Progress: processedUsers,
Total: history.TotalUsers,
}
return resp, nil
}

View File

@ -0,0 +1,71 @@
package group
import (
"context"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/subscribe"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
)
type GetSubscribeGroupMappingLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// Get subscribe group mapping
func NewGetSubscribeGroupMappingLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetSubscribeGroupMappingLogic {
return &GetSubscribeGroupMappingLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetSubscribeGroupMappingLogic) GetSubscribeGroupMapping(req *types.GetSubscribeGroupMappingRequest) (resp *types.GetSubscribeGroupMappingResponse, err error) {
// 1. 查询所有订阅套餐
var subscribes []subscribe.Subscribe
if err := l.svcCtx.DB.Model(&subscribe.Subscribe{}).Find(&subscribes).Error; err != nil {
l.Errorw("[GetSubscribeGroupMapping] failed to query subscribes", logger.Field("error", err.Error()))
return nil, err
}
// 2. 查询所有节点组
var nodeGroups []group.NodeGroup
if err := l.svcCtx.DB.Model(&group.NodeGroup{}).Find(&nodeGroups).Error; err != nil {
l.Errorw("[GetSubscribeGroupMapping] failed to query node groups", logger.Field("error", err.Error()))
return nil, err
}
// 创建 node_group_id -> node_group_name 的映射
nodeGroupMap := make(map[int64]string)
for _, ng := range nodeGroups {
nodeGroupMap[ng.Id] = ng.Name
}
// 3. 构建映射结果:套餐 -> 默认节点组(一对一)
var mappingList []types.SubscribeGroupMappingItem
for _, sub := range subscribes {
// 获取套餐的默认节点组node_group_ids 数组的第一个)
nodeGroupName := ""
if len(sub.NodeGroupIds) > 0 {
defaultNodeGroupId := sub.NodeGroupIds[0]
nodeGroupName = nodeGroupMap[defaultNodeGroupId]
}
mappingList = append(mappingList, types.SubscribeGroupMappingItem{
SubscribeName: sub.Name,
NodeGroupName: nodeGroupName,
})
}
resp = &types.GetSubscribeGroupMappingResponse{
List: mappingList,
}
return resp, nil
}

View File

@ -0,0 +1,577 @@
package group
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/node"
"github.com/perfect-panel/server/internal/model/subscribe"
"github.com/perfect-panel/server/internal/model/user"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
"github.com/perfect-panel/server/pkg/tool"
)
type PreviewUserNodesLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewPreviewUserNodesLogic(ctx context.Context, svcCtx *svc.ServiceContext) *PreviewUserNodesLogic {
return &PreviewUserNodesLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *PreviewUserNodesLogic) PreviewUserNodes(req *types.PreviewUserNodesRequest) (resp *types.PreviewUserNodesResponse, err error) {
logger.Infof("[PreviewUserNodes] userId: %v", req.UserId)
// 1. 查询用户的所有有效订阅只查询可用状态0-Pending, 1-Active
type UserSubscribe struct {
Id int64
UserId int64
SubscribeId int64
NodeGroupId int64 // 用户订阅的 node_group_id单个ID
}
var userSubscribes []UserSubscribe
err = l.svcCtx.DB.Model(&user.Subscribe{}).
Select("id, user_id, subscribe_id, node_group_id").
Where("user_id = ? AND status IN ?", req.UserId, []int8{0, 1}).
Find(&userSubscribes).Error
if err != nil {
logger.Errorf("[PreviewUserNodes] failed to get user subscribes: %v", err)
return nil, err
}
if len(userSubscribes) == 0 {
logger.Infof("[PreviewUserNodes] no user subscribes found")
resp = &types.PreviewUserNodesResponse{
UserId: req.UserId,
NodeGroups: []types.NodeGroupItem{},
}
return resp, nil
}
logger.Infof("[PreviewUserNodes] found %v user subscribes", len(userSubscribes))
// 2. 按优先级获取 node_group_iduser_subscribe.node_group_id > subscribe.node_group_id > subscribe.node_group_ids[0]
// 收集所有订阅ID以便批量查询
subscribeIds := make([]int64, len(userSubscribes))
for i, us := range userSubscribes {
subscribeIds[i] = us.SubscribeId
}
// 批量查询订阅信息
type SubscribeInfo struct {
Id int64
NodeGroupId int64
NodeGroupIds string // JSON string
Nodes string // JSON string - 直接分配的节点ID
NodeTags string // 节点标签
}
var subscribeInfos []SubscribeInfo
err = l.svcCtx.DB.Model(&subscribe.Subscribe{}).
Select("id, node_group_id, node_group_ids, nodes, node_tags").
Where("id IN ?", subscribeIds).
Find(&subscribeInfos).Error
if err != nil {
logger.Errorf("[PreviewUserNodes] failed to get subscribe infos: %v", err)
return nil, err
}
// 创建 subscribe_id -> SubscribeInfo 的映射
subInfoMap := make(map[int64]SubscribeInfo)
for _, si := range subscribeInfos {
subInfoMap[si.Id] = si
}
// 按优先级获取每个用户订阅的 node_group_id
var allNodeGroupIds []int64
for _, us := range userSubscribes {
nodeGroupId := int64(0)
// 优先级1: user_subscribe.node_group_id
if us.NodeGroupId != 0 {
nodeGroupId = us.NodeGroupId
logger.Debugf("[PreviewUserNodes] user_subscribe_id=%d using node_group_id=%d", us.Id, nodeGroupId)
} else {
// 优先级2: subscribe.node_group_id
subInfo, ok := subInfoMap[us.SubscribeId]
if ok {
if subInfo.NodeGroupId != 0 {
nodeGroupId = subInfo.NodeGroupId
logger.Debugf("[PreviewUserNodes] user_subscribe_id=%d using subscribe.node_group_id=%d", us.Id, nodeGroupId)
} else if subInfo.NodeGroupIds != "" && subInfo.NodeGroupIds != "null" && subInfo.NodeGroupIds != "[]" {
// 优先级3: subscribe.node_group_ids[0]
var nodeGroupIds []int64
if err := json.Unmarshal([]byte(subInfo.NodeGroupIds), &nodeGroupIds); err == nil && len(nodeGroupIds) > 0 {
nodeGroupId = nodeGroupIds[0]
logger.Debugf("[PreviewUserNodes] user_subscribe_id=%d using subscribe.node_group_ids[0]=%d", us.Id, nodeGroupId)
}
}
}
}
if nodeGroupId != 0 {
allNodeGroupIds = append(allNodeGroupIds, nodeGroupId)
}
}
// 去重
allNodeGroupIds = removeDuplicateInt64(allNodeGroupIds)
logger.Infof("[PreviewUserNodes] collected node_group_ids with priority: %v", allNodeGroupIds)
// 3. 收集所有订阅中直接分配的节点ID
var allDirectNodeIds []int64
for _, subInfo := range subscribeInfos {
if subInfo.Nodes != "" && subInfo.Nodes != "null" {
// nodes 是逗号分隔的字符串,如 "1,2,3"
nodeIdStrs := strings.Split(subInfo.Nodes, ",")
for _, idStr := range nodeIdStrs {
idStr = strings.TrimSpace(idStr)
if idStr != "" {
var nodeId int64
if _, err := fmt.Sscanf(idStr, "%d", &nodeId); err == nil {
allDirectNodeIds = append(allDirectNodeIds, nodeId)
}
}
}
logger.Debugf("[PreviewUserNodes] subscribe_id=%d has direct nodes: %s", subInfo.Id, subInfo.Nodes)
}
}
// 去重
allDirectNodeIds = removeDuplicateInt64(allDirectNodeIds)
logger.Infof("[PreviewUserNodes] collected direct node_ids: %v", allDirectNodeIds)
// 4. 判断分组功能是否启用
type SystemConfig struct {
Value string
}
var config SystemConfig
l.svcCtx.DB.Model(&struct {
Category string `gorm:"column:category"`
Key string `gorm:"column:key"`
Value string `gorm:"column:value"`
}{}).
Table("system").
Where("`category` = ? AND `key` = ?", "group", "enabled").
Select("value").
Scan(&config)
logger.Infof("[PreviewUserNodes] groupEnabled: %v", config.Value)
isGroupEnabled := config.Value == "true" || config.Value == "1"
var filteredNodes []node.Node
if isGroupEnabled {
// === 启用分组功能:通过用户订阅的 node_group_id 查询节点 ===
logger.Infof("[PreviewUserNodes] using group-based node filtering")
if len(allNodeGroupIds) == 0 && len(allDirectNodeIds) == 0 {
logger.Infof("[PreviewUserNodes] no node groups and no direct nodes found in user subscribes")
resp = &types.PreviewUserNodesResponse{
UserId: req.UserId,
NodeGroups: []types.NodeGroupItem{},
}
return resp, nil
}
// 5. 查询所有启用的节点(只有当有节点组时才查询)
if len(allNodeGroupIds) > 0 {
var dbNodes []node.Node
err = l.svcCtx.DB.Model(&node.Node{}).
Where("enabled = ?", true).
Find(&dbNodes).Error
if err != nil {
logger.Errorf("[PreviewUserNodes] failed to get nodes: %v", err)
return nil, err
}
// 6. 过滤出包含至少一个匹配节点组的节点(仅显示用户真正所在分组的节点,不包含公共节点)
for _, n := range dbNodes {
// 节点未配置节点组(公共节点),预览时不显示
if len(n.NodeGroupIds) == 0 {
continue
}
// 检查节点的 node_group_ids 是否与订阅的 node_group_id 有交集
for _, nodeGroupId := range n.NodeGroupIds {
if tool.Contains(allNodeGroupIds, nodeGroupId) {
filteredNodes = append(filteredNodes, n)
break
}
}
}
logger.Infof("[PreviewUserNodes] found %v nodes using group filter", len(filteredNodes))
}
} else {
// === 未启用分组功能:通过订阅的 node_tags 查询节点 ===
logger.Infof("[PreviewUserNodes] using tag-based node filtering")
// 从已查询的 subscribeInfos 中获取 node_tags
var allTags []string
for _, subInfo := range subscribeInfos {
if subInfo.NodeTags != "" {
tags := strings.Split(subInfo.NodeTags, ",")
allTags = append(allTags, tags...)
}
}
// 去重
allTags = tool.RemoveDuplicateElements(allTags...)
// 去除空字符串
allTags = tool.RemoveStringElement(allTags, "")
logger.Infof("[PreviewUserNodes] merged tags from subscribes: %v", allTags)
if len(allTags) == 0 && len(allDirectNodeIds) == 0 {
logger.Infof("[PreviewUserNodes] no tags and no direct nodes found in subscribes")
resp = &types.PreviewUserNodesResponse{
UserId: req.UserId,
NodeGroups: []types.NodeGroupItem{},
}
return resp, nil
}
// 8. 查询所有启用的节点(只有当有 tags 时才查询)
if len(allTags) > 0 {
var dbNodes []node.Node
err = l.svcCtx.DB.Model(&node.Node{}).
Where("enabled = ?", true).
Find(&dbNodes).Error
if err != nil {
logger.Errorf("[PreviewUserNodes] failed to get nodes: %v", err)
return nil, err
}
// 9. 过滤出包含至少一个匹配标签的节点
for _, n := range dbNodes {
if n.Tags == "" {
continue
}
nodeTags := strings.Split(n.Tags, ",")
// 检查是否有交集
for _, tag := range nodeTags {
if tag != "" && tool.Contains(allTags, tag) {
filteredNodes = append(filteredNodes, n)
break
}
}
}
logger.Infof("[PreviewUserNodes] found %v nodes using tag filter", len(filteredNodes))
}
}
// 10. 根据是否启用分组功能,选择不同的分组方式
nodeGroupItems := make([]types.NodeGroupItem, 0)
if isGroupEnabled {
// === 启用分组:按节点组分组 ===
// 转换为 types.Node 并按节点组分组
type NodeWithGroup struct {
Node node.Node
NodeGroupIds []int64
}
nodesWithGroup := make([]NodeWithGroup, 0, len(filteredNodes))
for _, n := range filteredNodes {
nodesWithGroup = append(nodesWithGroup, NodeWithGroup{
Node: n,
NodeGroupIds: n.NodeGroupIds,
})
}
// 按节点组分组节点
type NodeGroupMap struct {
Id int64
Nodes []types.Node
}
// 创建节点组映射group_id -> nodes
groupMap := make(map[int64]*NodeGroupMap)
// 获取所有涉及的节点组ID
allGroupIds := make([]int64, 0)
for _, ng := range nodesWithGroup {
if len(ng.NodeGroupIds) > 0 {
// 如果节点属于节点组,按第一个节点组分组
firstGroupId := ng.NodeGroupIds[0]
if _, exists := groupMap[firstGroupId]; !exists {
groupMap[firstGroupId] = &NodeGroupMap{
Id: firstGroupId,
Nodes: []types.Node{},
}
allGroupIds = append(allGroupIds, firstGroupId)
}
// 转换节点
tags := []string{}
if ng.Node.Tags != "" {
tags = strings.Split(ng.Node.Tags, ",")
}
node := types.Node{
Id: ng.Node.Id,
Name: ng.Node.Name,
Tags: tags,
Port: ng.Node.Port,
Address: ng.Node.Address,
ServerId: ng.Node.ServerId,
Protocol: ng.Node.Protocol,
Enabled: ng.Node.Enabled,
Sort: ng.Node.Sort,
NodeGroupIds: []int64(ng.Node.NodeGroupIds),
CreatedAt: ng.Node.CreatedAt.Unix(),
UpdatedAt: ng.Node.UpdatedAt.Unix(),
}
groupMap[firstGroupId].Nodes = append(groupMap[firstGroupId].Nodes, node)
} else {
// 没有节点组的节点,使用 group_id = 0 作为"无节点组"分组
if _, exists := groupMap[0]; !exists {
groupMap[0] = &NodeGroupMap{
Id: 0,
Nodes: []types.Node{},
}
}
tags := []string{}
if ng.Node.Tags != "" {
tags = strings.Split(ng.Node.Tags, ",")
}
node := types.Node{
Id: ng.Node.Id,
Name: ng.Node.Name,
Tags: tags,
Port: ng.Node.Port,
Address: ng.Node.Address,
ServerId: ng.Node.ServerId,
Protocol: ng.Node.Protocol,
Enabled: ng.Node.Enabled,
Sort: ng.Node.Sort,
NodeGroupIds: []int64(ng.Node.NodeGroupIds),
CreatedAt: ng.Node.CreatedAt.Unix(),
UpdatedAt: ng.Node.UpdatedAt.Unix(),
}
groupMap[0].Nodes = append(groupMap[0].Nodes, node)
}
}
// 查询节点组信息并构建响应
nodeGroupInfoMap := make(map[int64]string)
validGroupIds := make([]int64, 0)
if len(allGroupIds) > 0 {
type NodeGroupInfo struct {
Id int64
Name string
}
var nodeGroupInfos []NodeGroupInfo
err = l.svcCtx.DB.Model(&group.NodeGroup{}).
Select("id, name").
Where("id IN ?", allGroupIds).
Find(&nodeGroupInfos).Error
if err != nil {
logger.Errorf("[PreviewUserNodes] failed to get node group infos: %v", err)
return nil, err
}
logger.Infof("[PreviewUserNodes] found %v node group infos from %v requested", len(nodeGroupInfos), len(allGroupIds))
// 创建节点组信息映射和有效节点组ID列表
for _, ngInfo := range nodeGroupInfos {
nodeGroupInfoMap[ngInfo.Id] = ngInfo.Name
validGroupIds = append(validGroupIds, ngInfo.Id)
logger.Debugf("[PreviewUserNodes] node_group[%d] = %s", ngInfo.Id, ngInfo.Name)
}
// 记录无效的节点组ID
for _, requestedId := range allGroupIds {
found := false
for _, validId := range validGroupIds {
if requestedId == validId {
found = true
break
}
}
if !found {
logger.Infof("[PreviewUserNodes] node_group_id %d not found in database, treating as public nodes", requestedId)
}
}
}
// 构建响应根据有效节点组ID重新分组节点
publicNodes := make([]types.Node, 0)
// 遍历所有分组,重新分类节点
for groupId, gm := range groupMap {
if groupId == 0 {
// 本来就是无节点组的节点
publicNodes = append(publicNodes, gm.Nodes...)
continue
}
// 检查这个节点组ID是否有效
isValid := false
for _, validId := range validGroupIds {
if groupId == validId {
isValid = true
break
}
}
if isValid {
// 节点组有效,添加到对应的分组
groupName := nodeGroupInfoMap[groupId]
if groupName == "" {
groupName = fmt.Sprintf("Group %d", groupId)
}
nodeGroupItems = append(nodeGroupItems, types.NodeGroupItem{
Id: groupId,
Name: groupName,
Nodes: gm.Nodes,
})
logger.Infof("[PreviewUserNodes] adding node group: id=%d, name=%s, nodes=%d", groupId, groupName, len(gm.Nodes))
} else {
// 节点组无效,节点归入公共节点组
logger.Infof("[PreviewUserNodes] node_group_id %d invalid, moving %d nodes to public group", groupId, len(gm.Nodes))
publicNodes = append(publicNodes, gm.Nodes...)
}
}
// 预览模式不显示公共节点node_group_ids 为空的节点),只展示用户真正所在分组的节点
if len(publicNodes) > 0 {
logger.Infof("[PreviewUserNodes] skipping %d public nodes (not in user's assigned group)", len(publicNodes))
}
} else {
// === 未启用分组:按 tag 分组 ===
// 按 tag 分组节点
tagGroupMap := make(map[string][]types.Node)
for _, n := range filteredNodes {
tags := []string{}
if n.Tags != "" {
tags = strings.Split(n.Tags, ",")
}
// 转换节点
node := types.Node{
Id: n.Id,
Name: n.Name,
Tags: tags,
Port: n.Port,
Address: n.Address,
ServerId: n.ServerId,
Protocol: n.Protocol,
Enabled: n.Enabled,
Sort: n.Sort,
NodeGroupIds: []int64(n.NodeGroupIds),
CreatedAt: n.CreatedAt.Unix(),
UpdatedAt: n.UpdatedAt.Unix(),
}
// 将节点添加到每个匹配的 tag 分组中
if len(tags) > 0 {
for _, tag := range tags {
tag = strings.TrimSpace(tag)
if tag != "" {
tagGroupMap[tag] = append(tagGroupMap[tag], node)
}
}
} else {
// 没有 tag 的节点放入特殊分组
tagGroupMap[""] = append(tagGroupMap[""], node)
}
}
// 构建响应:按 tag 分组
for tag, nodes := range tagGroupMap {
nodeGroupItems = append(nodeGroupItems, types.NodeGroupItem{
Id: 0, // tag 分组使用 ID 0
Name: tag,
Nodes: nodes,
})
logger.Infof("[PreviewUserNodes] adding tag group: tag=%s, nodes=%d", tag, len(nodes))
}
}
// 添加套餐节点组(直接分配的节点)
if len(allDirectNodeIds) > 0 {
// 查询直接分配的节点详情
var directNodes []node.Node
err = l.svcCtx.DB.Model(&node.Node{}).
Where("id IN ? AND enabled = ?", allDirectNodeIds, true).
Find(&directNodes).Error
if err != nil {
logger.Errorf("[PreviewUserNodes] failed to get direct nodes: %v", err)
return nil, err
}
if len(directNodes) > 0 {
// 转换为 types.Node
directNodeItems := make([]types.Node, 0, len(directNodes))
for _, n := range directNodes {
tags := []string{}
if n.Tags != "" {
tags = strings.Split(n.Tags, ",")
}
directNodeItems = append(directNodeItems, types.Node{
Id: n.Id,
Name: n.Name,
Tags: tags,
Port: n.Port,
Address: n.Address,
ServerId: n.ServerId,
Protocol: n.Protocol,
Enabled: n.Enabled,
Sort: n.Sort,
NodeGroupIds: []int64(n.NodeGroupIds),
CreatedAt: n.CreatedAt.Unix(),
UpdatedAt: n.UpdatedAt.Unix(),
})
}
// 添加套餐节点组使用特殊ID -1Name 为空字符串,前端根据 ID -1 进行国际化)
nodeGroupItems = append(nodeGroupItems, types.NodeGroupItem{
Id: -1,
Name: "", // 空字符串,前端根据 ID -1 识别并国际化
Nodes: directNodeItems,
})
logger.Infof("[PreviewUserNodes] adding subscription nodes group: nodes=%d", len(directNodeItems))
}
}
// 14. 返回结果
resp = &types.PreviewUserNodesResponse{
UserId: req.UserId,
NodeGroups: nodeGroupItems,
}
logger.Infof("[PreviewUserNodes] returning %v node groups for user %v", len(resp.NodeGroups), req.UserId)
return resp, nil
}
// removeDuplicateInt64 去重 []int64
func removeDuplicateInt64(slice []int64) []int64 {
keys := make(map[int64]bool)
var list []int64
for _, entry := range slice {
if !keys[entry] {
keys[entry] = true
list = append(list, entry)
}
}
return list
}

View File

@ -0,0 +1,818 @@
package group
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/node"
"github.com/perfect-panel/server/internal/model/subscribe"
"github.com/perfect-panel/server/internal/model/user"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
"github.com/pkg/errors"
"gorm.io/gorm"
)
type RecalculateGroupLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// Recalculate group
func NewRecalculateGroupLogic(ctx context.Context, svcCtx *svc.ServiceContext) *RecalculateGroupLogic {
return &RecalculateGroupLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *RecalculateGroupLogic) RecalculateGroup(req *types.RecalculateGroupRequest) error {
// 验证 mode 参数
if req.Mode != "average" && req.Mode != "subscribe" && req.Mode != "traffic" {
return errors.New("invalid mode, must be one of: average, subscribe, traffic")
}
// 创建 GroupHistory 记录state=pending
triggerType := req.TriggerType
if triggerType == "" {
triggerType = "manual" // 默认为手动触发
}
history := &group.GroupHistory{
GroupMode: req.Mode,
TriggerType: triggerType,
TotalUsers: 0,
SuccessCount: 0,
FailedCount: 0,
}
now := time.Now()
history.StartTime = &now
// 使用 GORM Transaction 执行分组重算
err := l.svcCtx.DB.Transaction(func(tx *gorm.DB) error {
// 创建历史记录
if err := tx.Create(history).Error; err != nil {
l.Errorw("failed to create group history", logger.Field("error", err.Error()))
return err
}
// 更新状态为 running
if err := tx.Model(history).Update("state", "running").Error; err != nil {
l.Errorw("failed to update history state to running", logger.Field("error", err.Error()))
return err
}
// 根据 mode 执行不同的分组算法
var affectedCount int
var err error
switch req.Mode {
case "average":
affectedCount, err = l.executeAverageGrouping(tx, history.Id)
if err != nil {
l.Errorw("failed to execute average grouping", logger.Field("error", err.Error()))
return err
}
case "subscribe":
affectedCount, err = l.executeSubscribeGrouping(tx, history.Id)
if err != nil {
l.Errorw("failed to execute subscribe grouping", logger.Field("error", err.Error()))
return err
}
case "traffic":
affectedCount, err = l.executeTrafficGrouping(tx, history.Id)
if err != nil {
l.Errorw("failed to execute traffic grouping", logger.Field("error", err.Error()))
return err
}
}
// 更新 GroupHistory 记录state=completed, 统计成功/失败数)
endTime := time.Now()
updates := map[string]interface{}{
"state": "completed",
"total_users": affectedCount,
"success_count": affectedCount, // 暂时假设所有都成功
"failed_count": 0,
"end_time": endTime,
}
if err := tx.Model(history).Updates(updates).Error; err != nil {
l.Errorw("failed to update history state to completed", logger.Field("error", err.Error()))
return err
}
l.Infof("group recalculation completed: mode=%s, affected_users=%d", req.Mode, affectedCount)
return nil
})
if err != nil {
// 如果失败,更新历史记录状态为 failed
updateErr := l.svcCtx.DB.Model(history).Updates(map[string]interface{}{
"state": "failed",
"error_message": err.Error(),
"end_time": time.Now(),
}).Error
if updateErr != nil {
l.Errorw("failed to update history state to failed", logger.Field("error", updateErr.Error()))
}
return err
}
return nil
}
// getUserEmail 查询用户的邮箱
func (l *RecalculateGroupLogic) getUserEmail(tx *gorm.DB, userId int64) string {
type UserAuthMethod struct {
AuthIdentifier string `json:"auth_identifier"`
}
var authMethod UserAuthMethod
if err := tx.Model(&user.AuthMethods{}).
Select("auth_identifier").
Where("user_id = ? AND (auth_type = ? OR auth_type = ?)", userId, "email", "6").
First(&authMethod).Error; err != nil {
return ""
}
return authMethod.AuthIdentifier
}
// executeAverageGrouping 实现平均分组算法(随机分配节点组到用户订阅)
// 新逻辑获取所有有效用户订阅从订阅的节点组ID中随机选择一个设置到用户订阅的 node_group_id 字段
func (l *RecalculateGroupLogic) executeAverageGrouping(tx *gorm.DB, historyId int64) (int, error) {
// 1. 查询所有有效且未锁定的用户订阅status IN (0, 1)
type UserSubscribeInfo struct {
Id int64 `json:"id"`
UserId int64 `json:"user_id"`
SubscribeId int64 `json:"subscribe_id"`
}
var userSubscribes []UserSubscribeInfo
if err := tx.Model(&user.Subscribe{}).
Select("id, user_id, subscribe_id").
Where("group_locked = ? AND status IN (0, 1)", 0). // 只查询未锁定且有效的用户订阅
Scan(&userSubscribes).Error; err != nil {
return 0, err
}
if len(userSubscribes) == 0 {
l.Infof("average grouping: no valid and unlocked user subscribes found")
return 0, nil
}
l.Infof("average grouping: found %d valid and unlocked user subscribes", len(userSubscribes))
// 1.5 查询所有参与计算的节点组ID
var calculationNodeGroups []group.NodeGroup
if err := tx.Model(&group.NodeGroup{}).
Select("id").
Where("for_calculation = ?", true).
Scan(&calculationNodeGroups).Error; err != nil {
l.Errorw("failed to query calculation node groups", logger.Field("error", err.Error()))
return 0, err
}
// 创建参与计算的节点组ID集合用于快速查找
calculationNodeGroupIds := make(map[int64]bool)
for _, ng := range calculationNodeGroups {
calculationNodeGroupIds[ng.Id] = true
}
l.Infof("average grouping: found %d node groups with for_calculation=true", len(calculationNodeGroupIds))
// 2. 批量查询订阅的节点组ID信息
subscribeIds := make([]int64, len(userSubscribes))
for i, us := range userSubscribes {
subscribeIds[i] = us.SubscribeId
}
type SubscribeInfo struct {
Id int64 `json:"id"`
NodeGroupIds string `json:"node_group_ids"` // JSON string
}
var subscribeInfos []SubscribeInfo
if err := tx.Model(&subscribe.Subscribe{}).
Select("id, node_group_ids").
Where("id IN ?", subscribeIds).
Find(&subscribeInfos).Error; err != nil {
l.Errorw("failed to query subscribe infos", logger.Field("error", err.Error()))
return 0, err
}
// 创建 subscribe_id -> SubscribeInfo 的映射
subInfoMap := make(map[int64]SubscribeInfo)
for _, si := range subscribeInfos {
subInfoMap[si.Id] = si
}
// 用于存储统计信息按节点组ID统计用户数
groupUsersMap := make(map[int64][]struct {
Id int64 `json:"id"`
Email string `json:"email"`
})
nodeGroupUserCount := make(map[int64]int) // node_group_id -> user_count
nodeGroupNodeCount := make(map[int64]int) // node_group_id -> node_count
// 3. 遍历所有用户订阅,按序平均分配节点组
affectedCount := 0
failedCount := 0
// 为每个订阅维护一个分配索引,用于按序循环分配
subscribeAllocationIndex := make(map[int64]int) // subscribe_id -> current_index
for _, us := range userSubscribes {
subInfo, ok := subInfoMap[us.SubscribeId]
if !ok {
l.Infow("subscribe not found",
logger.Field("user_subscribe_id", us.Id),
logger.Field("subscribe_id", us.SubscribeId))
failedCount++
continue
}
// 解析订阅的节点组ID列表并过滤出参与计算的节点组
var nodeGroupIds []int64
if subInfo.NodeGroupIds != "" && subInfo.NodeGroupIds != "[]" {
var allNodeGroupIds []int64
if err := json.Unmarshal([]byte(subInfo.NodeGroupIds), &allNodeGroupIds); err != nil {
l.Errorw("failed to parse node_group_ids",
logger.Field("subscribe_id", subInfo.Id),
logger.Field("node_group_ids", subInfo.NodeGroupIds),
logger.Field("error", err.Error()))
failedCount++
continue
}
// 只保留参与计算的节点组
for _, ngId := range allNodeGroupIds {
if calculationNodeGroupIds[ngId] {
nodeGroupIds = append(nodeGroupIds, ngId)
}
}
if len(nodeGroupIds) == 0 && len(allNodeGroupIds) > 0 {
l.Debugw("all node_group_ids are not for calculation, setting to 0",
logger.Field("subscribe_id", subInfo.Id),
logger.Field("total_node_groups", len(allNodeGroupIds)))
}
}
// 如果没有节点组ID,跳过
if len(nodeGroupIds) == 0 {
l.Debugf("no valid node_group_ids for subscribe_id=%d, setting to 0", subInfo.Id)
if err := tx.Model(&user.Subscribe{}).
Where("id = ?", us.Id).
Update("node_group_id", 0).Error; err != nil {
l.Errorw("failed to update user_subscribe node_group_id",
logger.Field("user_subscribe_id", us.Id),
logger.Field("error", err.Error()))
failedCount++
continue
}
}
// 按序选择节点组ID循环轮询分配
selectedNodeGroupId := int64(0)
if len(nodeGroupIds) > 0 {
// 获取当前订阅的分配索引
currentIndex := subscribeAllocationIndex[us.SubscribeId]
// 选择当前索引对应的节点组
selectedNodeGroupId = nodeGroupIds[currentIndex]
// 更新索引,循环使用(轮询)
subscribeAllocationIndex[us.SubscribeId] = (currentIndex + 1) % len(nodeGroupIds)
l.Debugf("assigning user_subscribe_id=%d (subscribe_id=%d) to node_group_id=%d (index=%d, total_options=%d, mode=sequential)",
us.Id, us.SubscribeId, selectedNodeGroupId, currentIndex, len(nodeGroupIds))
}
// 更新 user_subscribe 的 node_group_id 字段单个ID
if err := tx.Model(&user.Subscribe{}).
Where("id = ?", us.Id).
Update("node_group_id", selectedNodeGroupId).Error; err != nil {
l.Errorw("failed to update user_subscribe node_group_id",
logger.Field("user_subscribe_id", us.Id),
logger.Field("error", err.Error()))
failedCount++
continue
}
// 只统计有节点组的用户
if selectedNodeGroupId > 0 {
// 查询用户邮箱,用于保存到历史记录
email := l.getUserEmail(tx, us.UserId)
groupUsersMap[selectedNodeGroupId] = append(groupUsersMap[selectedNodeGroupId], struct {
Id int64 `json:"id"`
Email string `json:"email"`
}{
Id: us.UserId,
Email: email,
})
nodeGroupUserCount[selectedNodeGroupId]++
}
affectedCount++
}
l.Infof("average grouping completed: affected=%d, failed=%d", affectedCount, failedCount)
// 4. 创建分组历史详情记录按节点组ID统计
for nodeGroupId, users := range groupUsersMap {
userCount := len(users)
if userCount == 0 {
continue
}
// 统计该节点组的节点数
var nodeCount int64 = 0
if nodeGroupId > 0 {
if err := tx.Model(&node.Node{}).
Where("JSON_CONTAINS(node_group_ids, ?)", fmt.Sprintf("[%d]", nodeGroupId)).
Count(&nodeCount).Error; err != nil {
l.Errorw("failed to count nodes",
logger.Field("node_group_id", nodeGroupId),
logger.Field("error", err.Error()))
}
}
nodeGroupNodeCount[nodeGroupId] = int(nodeCount)
// 序列化用户信息为 JSON
userDataJSON := "[]"
if jsonData, err := json.Marshal(users); err == nil {
userDataJSON = string(jsonData)
} else {
l.Errorw("failed to marshal user data",
logger.Field("node_group_id", nodeGroupId),
logger.Field("error", err.Error()))
}
// 创建历史详情(使用 node_group_id 作为分组标识)
detail := &group.GroupHistoryDetail{
HistoryId: historyId,
NodeGroupId: nodeGroupId,
UserCount: userCount,
NodeCount: int(nodeCount),
UserData: userDataJSON,
}
if err := tx.Create(detail).Error; err != nil {
l.Errorw("failed to create group history detail",
logger.Field("node_group_id", nodeGroupId),
logger.Field("error", err.Error()))
}
l.Infof("Average Group (node_group_id=%d): users=%d, nodes=%d",
nodeGroupId, userCount, nodeCount)
}
return affectedCount, nil
}
// executeSubscribeGrouping 实现基于订阅套餐的分组算法
// 逻辑:查询有效订阅 → 获取订阅的 node_group_ids → 取第一个 node_group_id如果有 → 更新 user_subscribe.node_group_id
// 订阅过期的用户 → 设置 node_group_id 为 0
func (l *RecalculateGroupLogic) executeSubscribeGrouping(tx *gorm.DB, historyId int64) (int, error) {
// 1. 查询所有有效且未锁定的用户订阅status IN (0, 1), group_locked = 0
type UserSubscribeInfo struct {
Id int64 `json:"id"`
UserId int64 `json:"user_id"`
SubscribeId int64 `json:"subscribe_id"`
}
var userSubscribes []UserSubscribeInfo
if err := tx.Model(&user.Subscribe{}).
Select("id, user_id, subscribe_id").
Where("group_locked = ? AND status IN (0, 1)", 0).
Scan(&userSubscribes).Error; err != nil {
l.Errorw("failed to query user subscribes", logger.Field("error", err.Error()))
return 0, err
}
if len(userSubscribes) == 0 {
l.Infof("subscribe grouping: no valid and unlocked user subscribes found")
return 0, nil
}
l.Infof("subscribe grouping: found %d valid and unlocked user subscribes", len(userSubscribes))
// 1.5 查询所有参与计算的节点组ID
var calculationNodeGroups []group.NodeGroup
if err := tx.Model(&group.NodeGroup{}).
Select("id").
Where("for_calculation = ?", true).
Scan(&calculationNodeGroups).Error; err != nil {
l.Errorw("failed to query calculation node groups", logger.Field("error", err.Error()))
return 0, err
}
// 创建参与计算的节点组ID集合用于快速查找
calculationNodeGroupIds := make(map[int64]bool)
for _, ng := range calculationNodeGroups {
calculationNodeGroupIds[ng.Id] = true
}
l.Infof("subscribe grouping: found %d node groups with for_calculation=true", len(calculationNodeGroupIds))
// 2. 批量查询订阅的节点组ID信息
subscribeIds := make([]int64, len(userSubscribes))
for i, us := range userSubscribes {
subscribeIds[i] = us.SubscribeId
}
type SubscribeInfo struct {
Id int64 `json:"id"`
NodeGroupIds string `json:"node_group_ids"` // JSON string
}
var subscribeInfos []SubscribeInfo
if err := tx.Model(&subscribe.Subscribe{}).
Select("id, node_group_ids").
Where("id IN ?", subscribeIds).
Find(&subscribeInfos).Error; err != nil {
l.Errorw("failed to query subscribe infos", logger.Field("error", err.Error()))
return 0, err
}
// 创建 subscribe_id -> SubscribeInfo 的映射
subInfoMap := make(map[int64]SubscribeInfo)
for _, si := range subscribeInfos {
subInfoMap[si.Id] = si
}
// 用于存储统计信息按节点组ID统计用户数
type UserInfo struct {
Id int64 `json:"id"`
Email string `json:"email"`
}
groupUsersMap := make(map[int64][]UserInfo)
nodeGroupUserCount := make(map[int64]int) // node_group_id -> user_count
nodeGroupNodeCount := make(map[int64]int) // node_group_id -> node_count
// 3. 遍历所有用户订阅取第一个节点组ID
affectedCount := 0
failedCount := 0
for _, us := range userSubscribes {
subInfo, ok := subInfoMap[us.SubscribeId]
if !ok {
l.Infow("subscribe not found",
logger.Field("user_subscribe_id", us.Id),
logger.Field("subscribe_id", us.SubscribeId))
failedCount++
continue
}
// 解析订阅的节点组ID列表并过滤出参与计算的节点组
var nodeGroupIds []int64
if subInfo.NodeGroupIds != "" && subInfo.NodeGroupIds != "[]" {
var allNodeGroupIds []int64
if err := json.Unmarshal([]byte(subInfo.NodeGroupIds), &allNodeGroupIds); err != nil {
l.Errorw("failed to parse node_group_ids",
logger.Field("subscribe_id", subInfo.Id),
logger.Field("node_group_ids", subInfo.NodeGroupIds),
logger.Field("error", err.Error()))
failedCount++
continue
}
// 只保留参与计算的节点组
for _, ngId := range allNodeGroupIds {
if calculationNodeGroupIds[ngId] {
nodeGroupIds = append(nodeGroupIds, ngId)
}
}
if len(nodeGroupIds) == 0 && len(allNodeGroupIds) > 0 {
l.Debugw("all node_group_ids are not for calculation, setting to 0",
logger.Field("subscribe_id", subInfo.Id),
logger.Field("total_node_groups", len(allNodeGroupIds)))
}
}
// 取第一个参与计算的节点组ID如果有否则设置为 0
selectedNodeGroupId := int64(0)
if len(nodeGroupIds) > 0 {
selectedNodeGroupId = nodeGroupIds[0]
}
l.Debugf("assigning user_subscribe_id=%d (subscribe_id=%d) to node_group_id=%d (total_options=%d, selected_first)",
us.Id, us.SubscribeId, selectedNodeGroupId, len(nodeGroupIds))
// 更新 user_subscribe 的 node_group_id 字段
if err := tx.Model(&user.Subscribe{}).
Where("id = ?", us.Id).
Update("node_group_id", selectedNodeGroupId).Error; err != nil {
l.Errorw("failed to update user_subscribe node_group_id",
logger.Field("user_subscribe_id", us.Id),
logger.Field("error", err.Error()))
failedCount++
continue
}
// 只统计有节点组的用户
if selectedNodeGroupId > 0 {
// 查询用户邮箱,用于保存到历史记录
email := l.getUserEmail(tx, us.UserId)
groupUsersMap[selectedNodeGroupId] = append(groupUsersMap[selectedNodeGroupId], UserInfo{
Id: us.UserId,
Email: email,
})
nodeGroupUserCount[selectedNodeGroupId]++
}
affectedCount++
}
l.Infof("subscribe grouping completed: affected=%d, failed=%d", affectedCount, failedCount)
// 4. 处理订阅过期/失效的用户,设置 node_group_id 为 0
// 查询所有没有有效订阅且未锁定的用户订阅记录
var expiredUserSubscribes []struct {
Id int64 `json:"id"`
UserId int64 `json:"user_id"`
}
if err := tx.Raw(`
SELECT us.id, us.user_id
FROM user_subscribe as us
WHERE us.group_locked = 0
AND us.status NOT IN (0, 1)
`).Scan(&expiredUserSubscribes).Error; err != nil {
l.Errorw("failed to query expired user subscribes", logger.Field("error", err.Error()))
// 继续处理,不因为过期用户查询失败而影响
} else {
l.Infof("found %d expired user subscribes for subscribe-based grouping, will set node_group_id to 0", len(expiredUserSubscribes))
expiredAffectedCount := 0
for _, eu := range expiredUserSubscribes {
// 更新 user_subscribe 表的 node_group_id 字段到 0
if err := tx.Model(&user.Subscribe{}).
Where("id = ?", eu.Id).
Update("node_group_id", 0).Error; err != nil {
l.Errorw("failed to update expired user subscribe node_group_id",
logger.Field("user_subscribe_id", eu.Id),
logger.Field("error", err.Error()))
continue
}
expiredAffectedCount++
}
l.Infof("expired user subscribes grouping completed: affected=%d", expiredAffectedCount)
}
// 5. 创建分组历史详情记录按节点组ID统计
for nodeGroupId, users := range groupUsersMap {
userCount := len(users)
if userCount == 0 {
continue
}
// 统计该节点组的节点数
var nodeCount int64 = 0
if nodeGroupId > 0 {
if err := tx.Model(&node.Node{}).
Where("JSON_CONTAINS(node_group_ids, ?)", fmt.Sprintf("[%d]", nodeGroupId)).
Count(&nodeCount).Error; err != nil {
l.Errorw("failed to count nodes",
logger.Field("node_group_id", nodeGroupId),
logger.Field("error", err.Error()))
}
}
nodeGroupNodeCount[nodeGroupId] = int(nodeCount)
// 序列化用户信息为 JSON
userDataJSON := "[]"
if jsonData, err := json.Marshal(users); err == nil {
userDataJSON = string(jsonData)
} else {
l.Errorw("failed to marshal user data",
logger.Field("node_group_id", nodeGroupId),
logger.Field("error", err.Error()))
}
// 创建历史详情
detail := &group.GroupHistoryDetail{
HistoryId: historyId,
NodeGroupId: nodeGroupId,
UserCount: userCount,
NodeCount: int(nodeCount),
UserData: userDataJSON,
}
if err := tx.Create(detail).Error; err != nil {
l.Errorw("failed to create group history detail",
logger.Field("node_group_id", nodeGroupId),
logger.Field("error", err.Error()))
}
l.Infof("Subscribe Group (node_group_id=%d): users=%d, nodes=%d",
nodeGroupId, userCount, nodeCount)
}
return affectedCount, nil
}
// executeTrafficGrouping 实现基于流量的分组算法
// 逻辑:根据配置的流量范围,将用户分配到对应的用户组
func (l *RecalculateGroupLogic) executeTrafficGrouping(tx *gorm.DB, historyId int64) (int, error) {
// 用于存储每个节点组的用户信息id 和 email
type UserInfo struct {
Id int64 `json:"id"`
Email string `json:"email"`
}
groupUsersMap := make(map[int64][]UserInfo) // node_group_id -> []UserInfo
// 1. 获取所有设置了流量区间的节点组
var nodeGroups []group.NodeGroup
if err := tx.Where("for_calculation = ?", true).
Where("max_traffic_gb > 0").
Find(&nodeGroups).Error; err != nil {
l.Errorw("failed to query node groups", logger.Field("error", err.Error()))
return 0, err
}
if len(nodeGroups) == 0 {
l.Infow("no node groups with traffic ranges configured")
return 0, nil
}
l.Infow("executeTrafficGrouping loaded node groups",
logger.Field("node_groups_count", len(nodeGroups)))
// 2. 查询所有有效且未锁定的用户订阅及其已用流量
type UserSubscribeInfo struct {
Id int64
UserId int64
Upload int64
Download int64
UsedTraffic int64 // 已用流量 = upload + download (bytes)
}
var userSubscribes []UserSubscribeInfo
if err := tx.Model(&user.Subscribe{}).
Select("id, user_id, upload, download, (upload + download) as used_traffic").
Where("group_locked = ? AND status IN (0, 1)", 0). // 只查询有效且未锁定的用户订阅
Scan(&userSubscribes).Error; err != nil {
l.Errorw("failed to query user subscribes", logger.Field("error", err.Error()))
return 0, err
}
if len(userSubscribes) == 0 {
l.Infow("no valid and unlocked user subscribes found")
return 0, nil
}
l.Infow("found user subscribes for traffic-based grouping", logger.Field("count", len(userSubscribes)))
// 3. 根据流量范围分配节点组ID到用户订阅
affectedCount := 0
groupUserCount := make(map[int64]int) // node_group_id -> user_count
for _, us := range userSubscribes {
// 将字节转换为 GB
usedTrafficGB := float64(us.UsedTraffic) / (1024 * 1024 * 1024)
// 查找匹配的流量范围(使用左闭右开区间 [Min, Max)
var targetNodeGroupId int64 = 0
for _, ng := range nodeGroups {
if ng.MinTrafficGB == nil || ng.MaxTrafficGB == nil {
continue
}
minTraffic := float64(*ng.MinTrafficGB)
maxTraffic := float64(*ng.MaxTrafficGB)
// 检查是否在区间内 [min, max)
if usedTrafficGB >= minTraffic && usedTrafficGB < maxTraffic {
targetNodeGroupId = ng.Id
break
}
}
// 如果没有匹配到任何范围targetNodeGroupId 保持为 0不分配节点组
// 更新 user_subscribe 的 node_group_id 字段
if err := tx.Model(&user.Subscribe{}).
Where("id = ?", us.Id).
Update("node_group_id", targetNodeGroupId).Error; err != nil {
l.Errorw("failed to update user subscribe node_group_id",
logger.Field("user_subscribe_id", us.Id),
logger.Field("target_node_group_id", targetNodeGroupId),
logger.Field("error", err.Error()))
continue
}
// 只有分配了节点组的用户才记录到历史
if targetNodeGroupId > 0 {
// 查询用户邮箱,用于保存到历史记录
email := l.getUserEmail(tx, us.UserId)
userInfo := UserInfo{
Id: us.UserId,
Email: email,
}
groupUsersMap[targetNodeGroupId] = append(groupUsersMap[targetNodeGroupId], userInfo)
groupUserCount[targetNodeGroupId]++
l.Debugf("assigned user subscribe %d (traffic: %.2fGB) to node group %d",
us.Id, usedTrafficGB, targetNodeGroupId)
} else {
l.Debugf("user subscribe %d (traffic: %.2fGB) not assigned to any node group",
us.Id, usedTrafficGB)
}
affectedCount++
}
l.Infof("traffic-based grouping completed: affected_subscribes=%d", affectedCount)
// 4. 创建分组历史详情记录(只统计有用户的节点组)
nodeGroupCount := make(map[int64]int) // node_group_id -> node_count
for _, ng := range nodeGroups {
nodeGroupCount[ng.Id] = 1 // 每个节点组计为1
}
for nodeGroupId, userCount := range groupUserCount {
userDataJSON, err := json.Marshal(groupUsersMap[nodeGroupId])
if err != nil {
l.Errorw("failed to marshal user data",
logger.Field("node_group_id", nodeGroupId),
logger.Field("error", err.Error()))
continue
}
detail := group.GroupHistoryDetail{
HistoryId: historyId,
NodeGroupId: nodeGroupId,
UserCount: userCount,
NodeCount: nodeGroupCount[nodeGroupId],
UserData: string(userDataJSON),
}
if err := tx.Create(&detail).Error; err != nil {
l.Errorw("failed to create group history detail",
logger.Field("history_id", historyId),
logger.Field("node_group_id", nodeGroupId),
logger.Field("error", err.Error()))
}
}
return affectedCount, nil
}
// containsIgnoreCase checks if a string contains another substring (case-insensitive)
func containsIgnoreCase(s, substr string) bool {
if len(substr) == 0 {
return true
}
if len(s) < len(substr) {
return false
}
// Simple case-insensitive contains check
sLower := toLower(s)
substrLower := toLower(substr)
return contains(sLower, substrLower)
}
// toLower converts a string to lowercase
func toLower(s string) string {
result := make([]rune, len(s))
for i, r := range s {
if r >= 'A' && r <= 'Z' {
result[i] = r + ('a' - 'A')
} else {
result[i] = r
}
}
return string(result)
}
// contains checks if a string contains another substring (case-sensitive)
func contains(s, substr string) bool {
return len(s) >= len(substr) && indexOf(s, substr) >= 0
}
// indexOf returns the index of the first occurrence of substr in s, or -1 if not found
func indexOf(s, substr string) int {
n := len(substr)
if n == 0 {
return 0
}
if n > len(s) {
return -1
}
// Simple string search
for i := 0; i <= len(s)-n; i++ {
if s[i:i+n] == substr {
return i
}
}
return -1
}

View File

@ -0,0 +1,82 @@
package group
import (
"context"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/node"
"github.com/perfect-panel/server/internal/model/subscribe"
"github.com/perfect-panel/server/internal/model/system"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/pkg/logger"
)
type ResetGroupsLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// NewResetGroupsLogic Reset all groups (delete all node groups and reset related data)
func NewResetGroupsLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ResetGroupsLogic {
return &ResetGroupsLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *ResetGroupsLogic) ResetGroups() error {
// 1. Delete all node groups
err := l.svcCtx.DB.Where("1 = 1").Delete(&group.NodeGroup{}).Error
if err != nil {
l.Errorw("Failed to delete all node groups", logger.Field("error", err.Error()))
return err
}
l.Infow("Successfully deleted all node groups")
// 2. Clear node_group_ids for all subscribes (products)
err = l.svcCtx.DB.Model(&subscribe.Subscribe{}).Where("1 = 1").Update("node_group_ids", "[]").Error
if err != nil {
l.Errorw("Failed to clear subscribes' node_group_ids", logger.Field("error", err.Error()))
return err
}
l.Infow("Successfully cleared all subscribes' node_group_ids")
// 3. Clear node_group_ids for all nodes
err = l.svcCtx.DB.Model(&node.Node{}).Where("1 = 1").Update("node_group_ids", "[]").Error
if err != nil {
l.Errorw("Failed to clear nodes' node_group_ids", logger.Field("error", err.Error()))
return err
}
l.Infow("Successfully cleared all nodes' node_group_ids")
// 4. Clear group history
err = l.svcCtx.DB.Where("1 = 1").Delete(&group.GroupHistory{}).Error
if err != nil {
l.Errorw("Failed to clear group history", logger.Field("error", err.Error()))
// Non-critical error, continue anyway
} else {
l.Infow("Successfully cleared group history")
}
// 7. Clear group history details
err = l.svcCtx.DB.Where("1 = 1").Delete(&group.GroupHistoryDetail{}).Error
if err != nil {
l.Errorw("Failed to clear group history details", logger.Field("error", err.Error()))
// Non-critical error, continue anyway
} else {
l.Infow("Successfully cleared group history details")
}
// 5. Delete all group config settings
err = l.svcCtx.DB.Where("`category` = ?", "group").Delete(&system.System{}).Error
if err != nil {
l.Errorw("Failed to delete group config", logger.Field("error", err.Error()))
return err
}
l.Infow("Successfully deleted all group config settings")
l.Infow("Group reset completed successfully")
return nil
}

View File

@ -0,0 +1,188 @@
package group
import (
"context"
"encoding/json"
"github.com/perfect-panel/server/internal/model/system"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
"github.com/pkg/errors"
"gorm.io/gorm"
)
type UpdateGroupConfigLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// Update group config
func NewUpdateGroupConfigLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateGroupConfigLogic {
return &UpdateGroupConfigLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *UpdateGroupConfigLogic) UpdateGroupConfig(req *types.UpdateGroupConfigRequest) error {
// 验证 mode 是否为合法值
if req.Mode != "" {
if req.Mode != "average" && req.Mode != "subscribe" && req.Mode != "traffic" {
return errors.New("invalid mode, must be one of: average, subscribe, traffic")
}
}
// 使用 GORM Transaction 更新配置
err := l.svcCtx.DB.Transaction(func(tx *gorm.DB) error {
// 更新 enabled 配置(使用 Upsert 逻辑)
enabledValue := "false"
if req.Enabled {
enabledValue = "true"
}
result := tx.Model(&system.System{}).
Where("`category` = 'group' and `key` = ?", "enabled").
Update("value", enabledValue)
if result.Error != nil {
l.Errorw("failed to update group enabled config", logger.Field("error", result.Error.Error()))
return result.Error
}
// 如果没有更新任何行,说明记录不存在,需要插入
if result.RowsAffected == 0 {
if err := tx.Create(&system.System{
Category: "group",
Key: "enabled",
Value: enabledValue,
Desc: "Group Feature Enabled",
}).Error; err != nil {
l.Errorw("failed to create group enabled config", logger.Field("error", err.Error()))
return err
}
}
// 更新 mode 配置(使用 Upsert 逻辑)
if req.Mode != "" {
result := tx.Model(&system.System{}).
Where("`category` = 'group' and `key` = ?", "mode").
Update("value", req.Mode)
if result.Error != nil {
l.Errorw("failed to update group mode config", logger.Field("error", result.Error.Error()))
return result.Error
}
// 如果没有更新任何行,说明记录不存在,需要插入
if result.RowsAffected == 0 {
if err := tx.Create(&system.System{
Category: "group",
Key: "mode",
Value: req.Mode,
Desc: "Group Mode",
}).Error; err != nil {
l.Errorw("failed to create group mode config", logger.Field("error", err.Error()))
return err
}
}
}
// 更新 JSON 配置
if req.Config != nil {
// 更新 average_config
if averageConfig, ok := req.Config["average_config"]; ok {
jsonBytes, err := json.Marshal(averageConfig)
if err != nil {
l.Errorw("failed to marshal average_config", logger.Field("error", err.Error()))
return errors.Wrap(err, "failed to marshal average_config")
}
// 使用 Upsert 逻辑:先尝试 UPDATE如果不存在则 INSERT
result := tx.Model(&system.System{}).
Where("`category` = 'group' and `key` = ?", "average_config").
Update("value", string(jsonBytes))
if result.Error != nil {
l.Errorw("failed to update group average_config", logger.Field("error", result.Error.Error()))
return result.Error
}
// 如果没有更新任何行,说明记录不存在,需要插入
if result.RowsAffected == 0 {
if err := tx.Create(&system.System{
Category: "group",
Key: "average_config",
Value: string(jsonBytes),
Desc: "Average Group Config",
}).Error; err != nil {
l.Errorw("failed to create group average_config", logger.Field("error", err.Error()))
return err
}
}
}
// 更新 subscribe_config
if subscribeConfig, ok := req.Config["subscribe_config"]; ok {
jsonBytes, err := json.Marshal(subscribeConfig)
if err != nil {
l.Errorw("failed to marshal subscribe_config", logger.Field("error", err.Error()))
return errors.Wrap(err, "failed to marshal subscribe_config")
}
// 使用 Upsert 逻辑:先尝试 UPDATE如果不存在则 INSERT
result := tx.Model(&system.System{}).
Where("`category` = 'group' and `key` = ?", "subscribe_config").
Update("value", string(jsonBytes))
if result.Error != nil {
l.Errorw("failed to update group subscribe_config", logger.Field("error", result.Error.Error()))
return result.Error
}
// 如果没有更新任何行,说明记录不存在,需要插入
if result.RowsAffected == 0 {
if err := tx.Create(&system.System{
Category: "group",
Key: "subscribe_config",
Value: string(jsonBytes),
Desc: "Subscribe Group Config",
}).Error; err != nil {
l.Errorw("failed to create group subscribe_config", logger.Field("error", err.Error()))
return err
}
}
}
// 更新 traffic_config
if trafficConfig, ok := req.Config["traffic_config"]; ok {
jsonBytes, err := json.Marshal(trafficConfig)
if err != nil {
l.Errorw("failed to marshal traffic_config", logger.Field("error", err.Error()))
return errors.Wrap(err, "failed to marshal traffic_config")
}
// 使用 Upsert 逻辑:先尝试 UPDATE如果不存在则 INSERT
result := tx.Model(&system.System{}).
Where("`category` = 'group' and `key` = ?", "traffic_config").
Update("value", string(jsonBytes))
if result.Error != nil {
l.Errorw("failed to update group traffic_config", logger.Field("error", result.Error.Error()))
return result.Error
}
// 如果没有更新任何行,说明记录不存在,需要插入
if result.RowsAffected == 0 {
if err := tx.Create(&system.System{
Category: "group",
Key: "traffic_config",
Value: string(jsonBytes),
Desc: "Traffic Group Config",
}).Error; err != nil {
l.Errorw("failed to create group traffic_config", logger.Field("error", err.Error()))
return err
}
}
}
}
return nil
})
if err != nil {
l.Errorw("failed to update group config", logger.Field("error", err.Error()))
return err
}
l.Infof("group config updated successfully: enabled=%v, mode=%s", req.Enabled, req.Mode)
return nil
}

View File

@ -0,0 +1,185 @@
package group
import (
"context"
"errors"
"time"
"github.com/perfect-panel/server/internal/model/group"
"github.com/perfect-panel/server/internal/model/subscribe"
"github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types"
"github.com/perfect-panel/server/pkg/logger"
"gorm.io/gorm"
)
type UpdateNodeGroupLogic struct {
logger.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewUpdateNodeGroupLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateNodeGroupLogic {
return &UpdateNodeGroupLogic{
Logger: logger.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *UpdateNodeGroupLogic) UpdateNodeGroup(req *types.UpdateNodeGroupRequest) error {
// 检查节点组是否存在
var nodeGroup group.NodeGroup
if err := l.svcCtx.DB.Where("id = ?", req.Id).First(&nodeGroup).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return errors.New("node group not found")
}
logger.Errorf("failed to find node group: %v", err)
return err
}
// 验证:系统中只能有一个过期节点组
if req.IsExpiredGroup != nil && *req.IsExpiredGroup {
var count int64
err := l.svcCtx.DB.Model(&group.NodeGroup{}).
Where("is_expired_group = ? AND id != ?", true, req.Id).
Count(&count).Error
if err != nil {
logger.Errorf("failed to check expired group count: %v", err)
return err
}
if count > 0 {
return errors.New("system already has an expired node group, cannot create multiple")
}
// 验证:被订阅商品设置为默认节点组的不能设置为过期节点组
var subscribeCount int64
err = l.svcCtx.DB.Model(&subscribe.Subscribe{}).
Where("node_group_id = ?", req.Id).
Count(&subscribeCount).Error
if err != nil {
logger.Errorf("failed to check subscribe usage: %v", err)
return err
}
if subscribeCount > 0 {
return errors.New("this node group is used as default node group in subscription products, cannot set as expired group")
}
}
// 构建更新数据
updates := map[string]interface{}{
"updated_at": time.Now(),
}
if req.Name != "" {
updates["name"] = req.Name
}
if req.Description != "" {
updates["description"] = req.Description
}
if req.Sort != 0 {
updates["sort"] = req.Sort
}
if req.ForCalculation != nil {
updates["for_calculation"] = *req.ForCalculation
}
if req.IsExpiredGroup != nil {
updates["is_expired_group"] = *req.IsExpiredGroup
// 过期节点组不参与分组计算
if *req.IsExpiredGroup {
updates["for_calculation"] = false
}
}
if req.ExpiredDaysLimit != nil {
updates["expired_days_limit"] = *req.ExpiredDaysLimit
}
if req.MaxTrafficGBExpired != nil {
updates["max_traffic_gb_expired"] = *req.MaxTrafficGBExpired
}
if req.SpeedLimit != nil {
updates["speed_limit"] = *req.SpeedLimit
}
// 获取新的流量区间值
newMinTraffic := nodeGroup.MinTrafficGB
newMaxTraffic := nodeGroup.MaxTrafficGB
if req.MinTrafficGB != nil {
newMinTraffic = req.MinTrafficGB
updates["min_traffic_gb"] = *req.MinTrafficGB
}
if req.MaxTrafficGB != nil {
newMaxTraffic = req.MaxTrafficGB
updates["max_traffic_gb"] = *req.MaxTrafficGB
}
// 校验流量区间
if err := l.validateTrafficRange(int(req.Id), newMinTraffic, newMaxTraffic); err != nil {
return err
}
// 执行更新
if err := l.svcCtx.DB.Model(&nodeGroup).Updates(updates).Error; err != nil {
logger.Errorf("failed to update node group: %v", err)
return err
}
logger.Infof("updated node group: id=%d", req.Id)
return nil
}
// validateTrafficRange 校验流量区间:不能重叠、不能留空档、最小值不能大于最大值
func (l *UpdateNodeGroupLogic) validateTrafficRange(currentNodeGroupId int, newMin, newMax *int64) error {
// 处理指针值
minVal := int64(0)
maxVal := int64(0)
if newMin != nil {
minVal = *newMin
}
if newMax != nil {
maxVal = *newMax
}
// 检查最小值是否大于最大值
if minVal > maxVal {
return errors.New("minimum traffic cannot exceed maximum traffic")
}
// 如果两个值都为0表示不参与流量分组不需要校验
if minVal == 0 && maxVal == 0 {
return nil
}
// 查询所有其他设置了流量区间的节点组
var otherGroups []group.NodeGroup
if err := l.svcCtx.DB.
Where("id != ?", currentNodeGroupId).
Where("(min_traffic_gb > 0 OR max_traffic_gb > 0)").
Find(&otherGroups).Error; err != nil {
logger.Errorf("failed to query other node groups: %v", err)
return err
}
// 检查是否有重叠
for _, other := range otherGroups {
otherMin := int64(0)
otherMax := int64(0)
if other.MinTrafficGB != nil {
otherMin = *other.MinTrafficGB
}
if other.MaxTrafficGB != nil {
otherMax = *other.MaxTrafficGB
}
// 如果对方也没设置区间,跳过
if otherMin == 0 && otherMax == 0 {
continue
}
// 检查是否有重叠: 如果两个区间相交,就是重叠
// 不重叠的条件是: newMax <= otherMin OR newMin >= otherMax
if !(maxVal <= otherMin || minVal >= otherMax) {
return errors.New("traffic range overlaps with another node group")
}
}
return nil
}

View File

@ -29,13 +29,14 @@ func NewCreateNodeLogic(ctx context.Context, svcCtx *svc.ServiceContext) *Create
func (l *CreateNodeLogic) CreateNode(req *types.CreateNodeRequest) error { func (l *CreateNodeLogic) CreateNode(req *types.CreateNodeRequest) error {
data := node.Node{ data := node.Node{
Name: req.Name, Name: req.Name,
Tags: tool.StringSliceToString(req.Tags), Tags: tool.StringSliceToString(req.Tags),
Enabled: req.Enabled, Enabled: req.Enabled,
Port: req.Port, Port: req.Port,
Address: req.Address, Address: req.Address,
ServerId: req.ServerId, ServerId: req.ServerId,
Protocol: req.Protocol, Protocol: req.Protocol,
NodeGroupIds: node.JSONInt64Slice(req.NodeGroupIds),
} }
err := l.svcCtx.NodeModel.InsertNode(l.ctx, &data) err := l.svcCtx.NodeModel.InsertNode(l.ctx, &data)
if err != nil { if err != nil {

View File

@ -29,10 +29,17 @@ func NewFilterNodeListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *Fi
} }
func (l *FilterNodeListLogic) FilterNodeList(req *types.FilterNodeListRequest) (resp *types.FilterNodeListResponse, err error) { func (l *FilterNodeListLogic) FilterNodeList(req *types.FilterNodeListRequest) (resp *types.FilterNodeListResponse, err error) {
// Convert NodeGroupId to []int64 for model
var nodeGroupIds []int64
if req.NodeGroupId != nil {
nodeGroupIds = []int64{*req.NodeGroupId}
}
total, data, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{ total, data, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{
Page: req.Page, Page: req.Page,
Size: req.Size, Size: req.Size,
Search: req.Search, Search: req.Search,
NodeGroupIds: nodeGroupIds,
}) })
if err != nil { if err != nil {
@ -43,17 +50,18 @@ func (l *FilterNodeListLogic) FilterNodeList(req *types.FilterNodeListRequest) (
list := make([]types.Node, 0) list := make([]types.Node, 0)
for _, datum := range data { for _, datum := range data {
list = append(list, types.Node{ list = append(list, types.Node{
Id: datum.Id, Id: datum.Id,
Name: datum.Name, Name: datum.Name,
Tags: tool.RemoveDuplicateElements(strings.Split(datum.Tags, ",")...), Tags: tool.RemoveDuplicateElements(strings.Split(datum.Tags, ",")...),
Port: datum.Port, Port: datum.Port,
Address: datum.Address, Address: datum.Address,
ServerId: datum.ServerId, ServerId: datum.ServerId,
Protocol: datum.Protocol, Protocol: datum.Protocol,
Enabled: datum.Enabled, Enabled: datum.Enabled,
Sort: datum.Sort, Sort: datum.Sort,
CreatedAt: datum.CreatedAt.UnixMilli(), NodeGroupIds: []int64(datum.NodeGroupIds),
UpdatedAt: datum.UpdatedAt.UnixMilli(), CreatedAt: datum.CreatedAt.UnixMilli(),
UpdatedAt: datum.UpdatedAt.UnixMilli(),
}) })
} }

View File

@ -40,6 +40,7 @@ func (l *UpdateNodeLogic) UpdateNode(req *types.UpdateNodeRequest) error {
data.Address = req.Address data.Address = req.Address
data.Protocol = req.Protocol data.Protocol = req.Protocol
data.Enabled = req.Enabled data.Enabled = req.Enabled
data.NodeGroupIds = node.JSONInt64Slice(req.NodeGroupIds)
err = l.svcCtx.NodeModel.UpdateNode(l.ctx, data) err = l.svcCtx.NodeModel.UpdateNode(l.ctx, data)
if err != nil { if err != nil {
l.Errorw("[UpdateNode] Update Database Error: ", logger.Field("error", err.Error())) l.Errorw("[UpdateNode] Update Database Error: ", logger.Field("error", err.Error()))

View File

@ -34,6 +34,12 @@ func (l *CreateSubscribeLogic) CreateSubscribe(req *types.CreateSubscribeRequest
val, _ := json.Marshal(req.Discount) val, _ := json.Marshal(req.Discount)
discount = string(val) discount = string(val)
} }
trafficLimit := ""
if len(req.TrafficLimit) > 0 {
val, _ := json.Marshal(req.TrafficLimit)
trafficLimit = string(val)
}
sub := &subscribe.Subscribe{ sub := &subscribe.Subscribe{
Id: 0, Id: 0,
Name: req.Name, Name: req.Name,
@ -51,6 +57,9 @@ func (l *CreateSubscribeLogic) CreateSubscribe(req *types.CreateSubscribeRequest
NewUserOnly: req.NewUserOnly, NewUserOnly: req.NewUserOnly,
Nodes: tool.Int64SliceToString(req.Nodes), Nodes: tool.Int64SliceToString(req.Nodes),
NodeTags: tool.StringSliceToString(req.NodeTags), NodeTags: tool.StringSliceToString(req.NodeTags),
NodeGroupIds: subscribe.JSONInt64Slice(req.NodeGroupIds),
NodeGroupId: req.NodeGroupId,
TrafficLimit: trafficLimit,
Show: req.Show, Show: req.Show,
Sell: req.Sell, Sell: req.Sell,
Sort: 0, Sort: 0,

View File

@ -42,6 +42,12 @@ func (l *GetSubscribeDetailsLogic) GetSubscribeDetails(req *types.GetSubscribeDe
l.Logger.Error("[GetSubscribeDetailsLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("discount", sub.Discount)) l.Logger.Error("[GetSubscribeDetailsLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("discount", sub.Discount))
} }
} }
if sub.TrafficLimit != "" {
err = json.Unmarshal([]byte(sub.TrafficLimit), &resp.TrafficLimit)
if err != nil {
l.Logger.Error("[GetSubscribeDetailsLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("traffic_limit", sub.TrafficLimit))
}
}
resp.Nodes = tool.StringToInt64Slice(sub.Nodes) resp.Nodes = tool.StringToInt64Slice(sub.Nodes)
resp.NodeTags = strings.Split(sub.NodeTags, ",") resp.NodeTags = strings.Split(sub.NodeTags, ",")
return resp, nil return resp, nil

View File

@ -30,12 +30,20 @@ func NewGetSubscribeListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *
} }
func (l *GetSubscribeListLogic) GetSubscribeList(req *types.GetSubscribeListRequest) (resp *types.GetSubscribeListResponse, err error) { func (l *GetSubscribeListLogic) GetSubscribeList(req *types.GetSubscribeListRequest) (resp *types.GetSubscribeListResponse, err error) {
total, list, err := l.svcCtx.SubscribeModel.FilterList(l.ctx, &subscribe.FilterParams{ // Build filter params
filterParams := &subscribe.FilterParams{
Page: int(req.Page), Page: int(req.Page),
Size: int(req.Size), Size: int(req.Size),
Language: req.Language, Language: req.Language,
Search: req.Search, Search: req.Search,
}) }
// Add NodeGroupId filter if provided
if req.NodeGroupId > 0 {
filterParams.NodeGroupId = &req.NodeGroupId
}
total, list, err := l.svcCtx.SubscribeModel.FilterList(l.ctx, filterParams)
if err != nil { if err != nil {
l.Logger.Error("[GetSubscribeListLogic] get subscribe list failed: ", logger.Field("error", err.Error())) l.Logger.Error("[GetSubscribeListLogic] get subscribe list failed: ", logger.Field("error", err.Error()))
return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "get subscribe list failed: %v", err.Error()) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "get subscribe list failed: %v", err.Error())
@ -54,8 +62,22 @@ func (l *GetSubscribeListLogic) GetSubscribeList(req *types.GetSubscribeListRequ
l.Logger.Error("[GetSubscribeListLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("discount", item.Discount)) l.Logger.Error("[GetSubscribeListLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("discount", item.Discount))
} }
} }
if item.TrafficLimit != "" {
err = json.Unmarshal([]byte(item.TrafficLimit), &sub.TrafficLimit)
if err != nil {
l.Logger.Error("[GetSubscribeListLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("traffic_limit", item.TrafficLimit))
}
}
sub.Nodes = tool.StringToInt64Slice(item.Nodes) sub.Nodes = tool.StringToInt64Slice(item.Nodes)
sub.NodeTags = strings.Split(item.NodeTags, ",") sub.NodeTags = strings.Split(item.NodeTags, ",")
// Handle NodeGroupIds - convert from JSONInt64Slice to []int64
if item.NodeGroupIds != nil {
sub.NodeGroupIds = []int64(item.NodeGroupIds)
} else {
sub.NodeGroupIds = []int64{}
}
// NodeGroupId is already int64, should be copied by DeepCopy
sub.NodeGroupId = item.NodeGroupId
resultList = append(resultList, sub) resultList = append(resultList, sub)
} }

View File

@ -42,6 +42,12 @@ func (l *UpdateSubscribeLogic) UpdateSubscribe(req *types.UpdateSubscribeRequest
val, _ := json.Marshal(req.Discount) val, _ := json.Marshal(req.Discount)
discount = string(val) discount = string(val)
} }
trafficLimit := ""
if len(req.TrafficLimit) > 0 {
val, _ := json.Marshal(req.TrafficLimit)
trafficLimit = string(val)
}
sub := &subscribe.Subscribe{ sub := &subscribe.Subscribe{
Id: req.Id, Id: req.Id,
Name: req.Name, Name: req.Name,
@ -59,6 +65,9 @@ func (l *UpdateSubscribeLogic) UpdateSubscribe(req *types.UpdateSubscribeRequest
NewUserOnly: req.NewUserOnly, NewUserOnly: req.NewUserOnly,
Nodes: tool.Int64SliceToString(req.Nodes), Nodes: tool.Int64SliceToString(req.Nodes),
NodeTags: tool.StringSliceToString(req.NodeTags), NodeTags: tool.StringSliceToString(req.NodeTags),
NodeGroupIds: subscribe.JSONInt64Slice(req.NodeGroupIds),
NodeGroupId: req.NodeGroupId,
TrafficLimit: trafficLimit,
Show: req.Show, Show: req.Show,
Sell: req.Sell, Sell: req.Sell,
Sort: req.Sort, Sort: req.Sort,

View File

@ -6,6 +6,7 @@ import (
"time" "time"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/perfect-panel/server/internal/logic/admin/group"
"github.com/perfect-panel/server/internal/model/user" "github.com/perfect-panel/server/internal/model/user"
"github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/svc"
"github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/internal/types"
@ -64,6 +65,7 @@ func (l *CreateUserSubscribeLogic) CreateUserSubscribe(req *types.CreateUserSubs
Upload: 0, Upload: 0,
Token: uuidx.SubscribeToken(fmt.Sprintf("adminCreate:%d", time.Now().UnixMilli())), Token: uuidx.SubscribeToken(fmt.Sprintf("adminCreate:%d", time.Now().UnixMilli())),
UUID: uuid.New().String(), UUID: uuid.New().String(),
NodeGroupId: sub.NodeGroupId,
Status: 1, Status: 1,
} }
if err = l.svcCtx.UserModel.InsertSubscribe(l.ctx, &userSub); err != nil { if err = l.svcCtx.UserModel.InsertSubscribe(l.ctx, &userSub); err != nil {
@ -71,6 +73,60 @@ func (l *CreateUserSubscribeLogic) CreateUserSubscribe(req *types.CreateUserSubs
return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseInsertError), "InsertSubscribe error: %v", err.Error()) return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseInsertError), "InsertSubscribe error: %v", err.Error())
} }
// Trigger user group recalculation (runs in background)
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Check if group management is enabled
var groupEnabled string
err := l.svcCtx.DB.Table("system").
Where("`category` = ? AND `key` = ?", "group", "enabled").
Select("value").
Scan(&groupEnabled).Error
if err != nil || groupEnabled != "true" && groupEnabled != "1" {
l.Debugf("Group management not enabled, skipping recalculation")
return
}
// Get the configured grouping mode
var groupMode string
err = l.svcCtx.DB.Table("system").
Where("`category` = ? AND `key` = ?", "group", "mode").
Select("value").
Scan(&groupMode).Error
if err != nil {
l.Errorw("Failed to get group mode", logger.Field("error", err.Error()))
return
}
// Validate group mode
if groupMode != "average" && groupMode != "subscribe" && groupMode != "traffic" {
l.Debugf("Invalid group mode (current: %s), skipping", groupMode)
return
}
// Trigger group recalculation with the configured mode
logic := group.NewRecalculateGroupLogic(ctx, l.svcCtx)
req := &types.RecalculateGroupRequest{
Mode: groupMode,
}
if err := logic.RecalculateGroup(req); err != nil {
l.Errorw("Failed to recalculate user group",
logger.Field("user_id", userInfo.Id),
logger.Field("error", err.Error()),
)
return
}
l.Infow("Successfully recalculated user group after admin created subscription",
logger.Field("user_id", userInfo.Id),
logger.Field("subscribe_id", userSub.Id),
logger.Field("mode", groupMode),
)
}()
err = l.svcCtx.UserModel.UpdateUserCache(l.ctx, userInfo) err = l.svcCtx.UserModel.UpdateUserCache(l.ctx, userInfo)
if err != nil { if err != nil {
l.Errorw("UpdateUserCache error", logger.Field("error", err.Error())) l.Errorw("UpdateUserCache error", logger.Field("error", err.Error()))
@ -81,5 +137,6 @@ func (l *CreateUserSubscribeLogic) CreateUserSubscribe(req *types.CreateUserSubs
if err != nil { if err != nil {
logger.Errorw("ClearSubscribe error", logger.Field("error", err.Error())) logger.Errorw("ClearSubscribe error", logger.Field("error", err.Error()))
} }
return nil return nil
} }

Some files were not shown because too many files have changed in this diff Show More