diff --git a/.agents/skills/agentdb-advanced/SKILL.md b/.agents/skills/agentdb-advanced/SKILL.md new file mode 100644 index 0000000..da61dc2 --- /dev/null +++ b/.agents/skills/agentdb-advanced/SKILL.md @@ -0,0 +1,550 @@ +--- +name: "AgentDB Advanced Features" +description: "Master advanced AgentDB features including QUIC synchronization, multi-database management, custom distance metrics, hybrid search, and distributed systems integration. Use when building distributed AI systems, multi-agent coordination, or advanced vector search applications." +--- + +# AgentDB Advanced Features + +## What This Skill Does + +Covers advanced AgentDB capabilities for distributed systems, multi-database coordination, custom distance metrics, hybrid search (vector + metadata), QUIC synchronization, and production deployment patterns. Enables building sophisticated AI systems with sub-millisecond cross-node communication and advanced search capabilities. + +**Performance**: <1ms QUIC sync, hybrid search with filters, custom distance metrics. + +## Prerequisites + +- Node.js 18+ +- AgentDB v1.0.7+ (via agentic-flow) +- Understanding of distributed systems (for QUIC sync) +- Vector search fundamentals + +--- + +## QUIC Synchronization + +### What is QUIC Sync? + +QUIC (Quick UDP Internet Connections) enables sub-millisecond latency synchronization between AgentDB instances across network boundaries with automatic retry, multiplexing, and encryption. + +**Benefits**: +- <1ms latency between nodes +- Multiplexed streams (multiple operations simultaneously) +- Built-in encryption (TLS 1.3) +- Automatic retry and recovery +- Event-based broadcasting + +### Enable QUIC Sync + +```typescript +import { createAgentDBAdapter } from 'agentic-flow/reasoningbank'; + +// Initialize with QUIC synchronization +const adapter = await createAgentDBAdapter({ + dbPath: '.agentdb/distributed.db', + enableQUICSync: true, + syncPort: 4433, + syncPeers: [ + '192.168.1.10:4433', + '192.168.1.11:4433', + '192.168.1.12:4433', + ], +}); + +// Patterns automatically sync across all peers +await adapter.insertPattern({ + // ... pattern data +}); + +// Available on all peers within ~1ms +``` + +### QUIC Configuration + +```typescript +const adapter = await createAgentDBAdapter({ + enableQUICSync: true, + syncPort: 4433, // QUIC server port + syncPeers: ['host1:4433'], // Peer addresses + syncInterval: 1000, // Sync interval (ms) + syncBatchSize: 100, // Patterns per batch + maxRetries: 3, // Retry failed syncs + compression: true, // Enable compression +}); +``` + +### Multi-Node Deployment + +```bash +# Node 1 (192.168.1.10) +AGENTDB_QUIC_SYNC=true \ +AGENTDB_QUIC_PORT=4433 \ +AGENTDB_QUIC_PEERS=192.168.1.11:4433,192.168.1.12:4433 \ +node server.js + +# Node 2 (192.168.1.11) +AGENTDB_QUIC_SYNC=true \ +AGENTDB_QUIC_PORT=4433 \ +AGENTDB_QUIC_PEERS=192.168.1.10:4433,192.168.1.12:4433 \ +node server.js + +# Node 3 (192.168.1.12) +AGENTDB_QUIC_SYNC=true \ +AGENTDB_QUIC_PORT=4433 \ +AGENTDB_QUIC_PEERS=192.168.1.10:4433,192.168.1.11:4433 \ +node server.js +``` + +--- + +## Distance Metrics + +### Cosine Similarity (Default) + +Best for normalized vectors, semantic similarity: + +```bash +# CLI +npx agentdb@latest query ./vectors.db "[0.1,0.2,...]" -m cosine + +# API +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + metric: 'cosine', + k: 10, +}); +``` + +**Use Cases**: +- Text embeddings (BERT, GPT, etc.) +- Semantic search +- Document similarity +- Most general-purpose applications + +**Formula**: `cos(θ) = (A · B) / (||A|| × ||B||)` +**Range**: [-1, 1] (1 = identical, -1 = opposite) + +### Euclidean Distance (L2) + +Best for spatial data, geometric similarity: + +```bash +# CLI +npx agentdb@latest query ./vectors.db "[0.1,0.2,...]" -m euclidean + +# API +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + metric: 'euclidean', + k: 10, +}); +``` + +**Use Cases**: +- Image embeddings +- Spatial data +- Computer vision +- When vector magnitude matters + +**Formula**: `d = √(Σ(ai - bi)²)` +**Range**: [0, ∞] (0 = identical, ∞ = very different) + +### Dot Product + +Best for pre-normalized vectors, fast computation: + +```bash +# CLI +npx agentdb@latest query ./vectors.db "[0.1,0.2,...]" -m dot + +# API +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + metric: 'dot', + k: 10, +}); +``` + +**Use Cases**: +- Pre-normalized embeddings +- Fast similarity computation +- When vectors are already unit-length + +**Formula**: `dot = Σ(ai × bi)` +**Range**: [-∞, ∞] (higher = more similar) + +### Custom Distance Metrics + +```typescript +// Implement custom distance function +function customDistance(vec1: number[], vec2: number[]): number { + // Weighted Euclidean distance + const weights = [1.0, 2.0, 1.5, ...]; + let sum = 0; + for (let i = 0; i < vec1.length; i++) { + sum += weights[i] * Math.pow(vec1[i] - vec2[i], 2); + } + return Math.sqrt(sum); +} + +// Use in search (requires custom implementation) +``` + +--- + +## Hybrid Search (Vector + Metadata) + +### Basic Hybrid Search + +Combine vector similarity with metadata filtering: + +```typescript +// Store documents with metadata +await adapter.insertPattern({ + id: '', + type: 'document', + domain: 'research-papers', + pattern_data: JSON.stringify({ + embedding: documentEmbedding, + text: documentText, + metadata: { + author: 'Jane Smith', + year: 2025, + category: 'machine-learning', + citations: 150, + } + }), + confidence: 1.0, + usage_count: 0, + success_count: 0, + created_at: Date.now(), + last_used: Date.now(), +}); + +// Hybrid search: vector similarity + metadata filters +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'research-papers', + k: 20, + filters: { + year: { $gte: 2023 }, // Published 2023 or later + category: 'machine-learning', // ML papers only + citations: { $gte: 50 }, // Highly cited + }, +}); +``` + +### Advanced Filtering + +```typescript +// Complex metadata queries +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'products', + k: 50, + filters: { + price: { $gte: 10, $lte: 100 }, // Price range + category: { $in: ['electronics', 'gadgets'] }, // Multiple categories + rating: { $gte: 4.0 }, // High rated + inStock: true, // Available + tags: { $contains: 'wireless' }, // Has tag + }, +}); +``` + +### Weighted Hybrid Search + +Combine vector and metadata scores: + +```typescript +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'content', + k: 20, + hybridWeights: { + vectorSimilarity: 0.7, // 70% weight on semantic similarity + metadataScore: 0.3, // 30% weight on metadata match + }, + filters: { + category: 'technology', + recency: { $gte: Date.now() - 30 * 24 * 3600000 }, // Last 30 days + }, +}); +``` + +--- + +## Multi-Database Management + +### Multiple Databases + +```typescript +// Separate databases for different domains +const knowledgeDB = await createAgentDBAdapter({ + dbPath: '.agentdb/knowledge.db', +}); + +const conversationDB = await createAgentDBAdapter({ + dbPath: '.agentdb/conversations.db', +}); + +const codeDB = await createAgentDBAdapter({ + dbPath: '.agentdb/code.db', +}); + +// Use appropriate database for each task +await knowledgeDB.insertPattern({ /* knowledge */ }); +await conversationDB.insertPattern({ /* conversation */ }); +await codeDB.insertPattern({ /* code */ }); +``` + +### Database Sharding + +```typescript +// Shard by domain for horizontal scaling +const shards = { + 'domain-a': await createAgentDBAdapter({ dbPath: '.agentdb/shard-a.db' }), + 'domain-b': await createAgentDBAdapter({ dbPath: '.agentdb/shard-b.db' }), + 'domain-c': await createAgentDBAdapter({ dbPath: '.agentdb/shard-c.db' }), +}; + +// Route queries to appropriate shard +function getDBForDomain(domain: string) { + const shardKey = domain.split('-')[0]; // Extract shard key + return shards[shardKey] || shards['domain-a']; +} + +// Insert to correct shard +const db = getDBForDomain('domain-a-task'); +await db.insertPattern({ /* ... */ }); +``` + +--- + +## MMR (Maximal Marginal Relevance) + +Retrieve diverse results to avoid redundancy: + +```typescript +// Without MMR: Similar results may be redundant +const standardResults = await adapter.retrieveWithReasoning(queryEmbedding, { + k: 10, + useMMR: false, +}); + +// With MMR: Diverse, non-redundant results +const diverseResults = await adapter.retrieveWithReasoning(queryEmbedding, { + k: 10, + useMMR: true, + mmrLambda: 0.5, // Balance relevance (0) vs diversity (1) +}); +``` + +**MMR Parameters**: +- `mmrLambda = 0`: Maximum relevance (may be redundant) +- `mmrLambda = 0.5`: Balanced (default) +- `mmrLambda = 1`: Maximum diversity (may be less relevant) + +**Use Cases**: +- Search result diversification +- Recommendation systems +- Avoiding echo chambers +- Exploratory search + +--- + +## Context Synthesis + +Generate rich context from multiple memories: + +```typescript +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'problem-solving', + k: 10, + synthesizeContext: true, // Enable context synthesis +}); + +// ContextSynthesizer creates coherent narrative +console.log('Synthesized Context:', result.context); +// "Based on 10 similar problem-solving attempts, the most effective +// approach involves: 1) analyzing root cause, 2) brainstorming solutions, +// 3) evaluating trade-offs, 4) implementing incrementally. Success rate: 85%" + +console.log('Patterns:', result.patterns); +// Extracted common patterns across memories +``` + +--- + +## Production Patterns + +### Connection Pooling + +```typescript +// Singleton pattern for shared adapter +class AgentDBPool { + private static instance: AgentDBAdapter; + + static async getInstance() { + if (!this.instance) { + this.instance = await createAgentDBAdapter({ + dbPath: '.agentdb/production.db', + quantizationType: 'scalar', + cacheSize: 2000, + }); + } + return this.instance; + } +} + +// Use in application +const db = await AgentDBPool.getInstance(); +const results = await db.retrieveWithReasoning(queryEmbedding, { k: 10 }); +``` + +### Error Handling + +```typescript +async function safeRetrieve(queryEmbedding: number[], options: any) { + try { + const result = await adapter.retrieveWithReasoning(queryEmbedding, options); + return result; + } catch (error) { + if (error.code === 'DIMENSION_MISMATCH') { + console.error('Query embedding dimension mismatch'); + // Handle dimension error + } else if (error.code === 'DATABASE_LOCKED') { + // Retry with exponential backoff + await new Promise(resolve => setTimeout(resolve, 100)); + return safeRetrieve(queryEmbedding, options); + } + throw error; + } +} +``` + +### Monitoring and Logging + +```typescript +// Performance monitoring +const startTime = Date.now(); +const result = await adapter.retrieveWithReasoning(queryEmbedding, { k: 10 }); +const latency = Date.now() - startTime; + +if (latency > 100) { + console.warn('Slow query detected:', latency, 'ms'); +} + +// Log statistics +const stats = await adapter.getStats(); +console.log('Database Stats:', { + totalPatterns: stats.totalPatterns, + dbSize: stats.dbSize, + cacheHitRate: stats.cacheHitRate, + avgSearchLatency: stats.avgSearchLatency, +}); +``` + +--- + +## CLI Advanced Operations + +### Database Import/Export + +```bash +# Export with compression +npx agentdb@latest export ./vectors.db ./backup.json.gz --compress + +# Import from backup +npx agentdb@latest import ./backup.json.gz --decompress + +# Merge databases +npx agentdb@latest merge ./db1.sqlite ./db2.sqlite ./merged.sqlite +``` + +### Database Optimization + +```bash +# Vacuum database (reclaim space) +sqlite3 .agentdb/vectors.db "VACUUM;" + +# Analyze for query optimization +sqlite3 .agentdb/vectors.db "ANALYZE;" + +# Rebuild indices +npx agentdb@latest reindex ./vectors.db +``` + +--- + +## Environment Variables + +```bash +# AgentDB configuration +AGENTDB_PATH=.agentdb/reasoningbank.db +AGENTDB_ENABLED=true + +# Performance tuning +AGENTDB_QUANTIZATION=binary # binary|scalar|product|none +AGENTDB_CACHE_SIZE=2000 +AGENTDB_HNSW_M=16 +AGENTDB_HNSW_EF=100 + +# Learning plugins +AGENTDB_LEARNING=true + +# Reasoning agents +AGENTDB_REASONING=true + +# QUIC synchronization +AGENTDB_QUIC_SYNC=true +AGENTDB_QUIC_PORT=4433 +AGENTDB_QUIC_PEERS=host1:4433,host2:4433 +``` + +--- + +## Troubleshooting + +### Issue: QUIC sync not working + +```bash +# Check firewall allows UDP port 4433 +sudo ufw allow 4433/udp + +# Verify peers are reachable +ping host1 + +# Check QUIC logs +DEBUG=agentdb:quic node server.js +``` + +### Issue: Hybrid search returns no results + +```typescript +// Relax filters +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + k: 100, // Increase k + filters: { + // Remove or relax filters + }, +}); +``` + +### Issue: Memory consolidation too aggressive + +```typescript +// Disable automatic optimization +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + optimizeMemory: false, // Disable auto-consolidation + k: 10, +}); +``` + +--- + +## Learn More + +- **QUIC Protocol**: docs/quic-synchronization.pdf +- **Hybrid Search**: docs/hybrid-search-guide.md +- **GitHub**: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb +- **Website**: https://agentdb.ruv.io + +--- + +**Category**: Advanced / Distributed Systems +**Difficulty**: Advanced +**Estimated Time**: 45-60 minutes diff --git a/.agents/skills/agentdb-learning/SKILL.md b/.agents/skills/agentdb-learning/SKILL.md new file mode 100644 index 0000000..874760c --- /dev/null +++ b/.agents/skills/agentdb-learning/SKILL.md @@ -0,0 +1,545 @@ +--- +name: "AgentDB Learning Plugins" +description: "Create and train AI learning plugins with AgentDB's 9 reinforcement learning algorithms. Includes Decision Transformer, Q-Learning, SARSA, Actor-Critic, and more. Use when building self-learning agents, implementing RL, or optimizing agent behavior through experience." +--- + +# AgentDB Learning Plugins + +## What This Skill Does + +Provides access to 9 reinforcement learning algorithms via AgentDB's plugin system. Create, train, and deploy learning plugins for autonomous agents that improve through experience. Includes offline RL (Decision Transformer), value-based learning (Q-Learning), policy gradients (Actor-Critic), and advanced techniques. + +**Performance**: Train models 10-100x faster with WASM-accelerated neural inference. + +## Prerequisites + +- Node.js 18+ +- AgentDB v1.0.7+ (via agentic-flow) +- Basic understanding of reinforcement learning (recommended) + +--- + +## Quick Start with CLI + +### Create Learning Plugin + +```bash +# Interactive wizard +npx agentdb@latest create-plugin + +# Use specific template +npx agentdb@latest create-plugin -t decision-transformer -n my-agent + +# Preview without creating +npx agentdb@latest create-plugin -t q-learning --dry-run + +# Custom output directory +npx agentdb@latest create-plugin -t actor-critic -o ./plugins +``` + +### List Available Templates + +```bash +# Show all plugin templates +npx agentdb@latest list-templates + +# Available templates: +# - decision-transformer (sequence modeling RL - recommended) +# - q-learning (value-based learning) +# - sarsa (on-policy TD learning) +# - actor-critic (policy gradient with baseline) +# - curiosity-driven (exploration-based) +``` + +### Manage Plugins + +```bash +# List installed plugins +npx agentdb@latest list-plugins + +# Get plugin information +npx agentdb@latest plugin-info my-agent + +# Shows: algorithm, configuration, training status +``` + +--- + +## Quick Start with API + +```typescript +import { createAgentDBAdapter } from 'agentic-flow/reasoningbank'; + +// Initialize with learning enabled +const adapter = await createAgentDBAdapter({ + dbPath: '.agentdb/learning.db', + enableLearning: true, // Enable learning plugins + enableReasoning: true, + cacheSize: 1000, +}); + +// Store training experience +await adapter.insertPattern({ + id: '', + type: 'experience', + domain: 'game-playing', + pattern_data: JSON.stringify({ + embedding: await computeEmbedding('state-action-reward'), + pattern: { + state: [0.1, 0.2, 0.3], + action: 2, + reward: 1.0, + next_state: [0.15, 0.25, 0.35], + done: false + } + }), + confidence: 0.9, + usage_count: 1, + success_count: 1, + created_at: Date.now(), + last_used: Date.now(), +}); + +// Train learning model +const metrics = await adapter.train({ + epochs: 50, + batchSize: 32, +}); + +console.log('Training Loss:', metrics.loss); +console.log('Duration:', metrics.duration, 'ms'); +``` + +--- + +## Available Learning Algorithms (9 Total) + +### 1. Decision Transformer (Recommended) + +**Type**: Offline Reinforcement Learning +**Best For**: Learning from logged experiences, imitation learning +**Strengths**: No online interaction needed, stable training + +```bash +npx agentdb@latest create-plugin -t decision-transformer -n dt-agent +``` + +**Use Cases**: +- Learn from historical data +- Imitation learning from expert demonstrations +- Safe learning without environment interaction +- Sequence modeling tasks + +**Configuration**: +```json +{ + "algorithm": "decision-transformer", + "model_size": "base", + "context_length": 20, + "embed_dim": 128, + "n_heads": 8, + "n_layers": 6 +} +``` + +### 2. Q-Learning + +**Type**: Value-Based RL (Off-Policy) +**Best For**: Discrete action spaces, sample efficiency +**Strengths**: Proven, simple, works well for small/medium problems + +```bash +npx agentdb@latest create-plugin -t q-learning -n q-agent +``` + +**Use Cases**: +- Grid worlds, board games +- Navigation tasks +- Resource allocation +- Discrete decision-making + +**Configuration**: +```json +{ + "algorithm": "q-learning", + "learning_rate": 0.001, + "gamma": 0.99, + "epsilon": 0.1, + "epsilon_decay": 0.995 +} +``` + +### 3. SARSA + +**Type**: Value-Based RL (On-Policy) +**Best For**: Safe exploration, risk-sensitive tasks +**Strengths**: More conservative than Q-Learning, better for safety + +```bash +npx agentdb@latest create-plugin -t sarsa -n sarsa-agent +``` + +**Use Cases**: +- Safety-critical applications +- Risk-sensitive decision-making +- Online learning with exploration + +**Configuration**: +```json +{ + "algorithm": "sarsa", + "learning_rate": 0.001, + "gamma": 0.99, + "epsilon": 0.1 +} +``` + +### 4. Actor-Critic + +**Type**: Policy Gradient with Value Baseline +**Best For**: Continuous actions, variance reduction +**Strengths**: Stable, works for continuous/discrete actions + +```bash +npx agentdb@latest create-plugin -t actor-critic -n ac-agent +``` + +**Use Cases**: +- Continuous control (robotics, simulations) +- Complex action spaces +- Multi-agent coordination + +**Configuration**: +```json +{ + "algorithm": "actor-critic", + "actor_lr": 0.001, + "critic_lr": 0.002, + "gamma": 0.99, + "entropy_coef": 0.01 +} +``` + +### 5. Active Learning + +**Type**: Query-Based Learning +**Best For**: Label-efficient learning, human-in-the-loop +**Strengths**: Minimizes labeling cost, focuses on uncertain samples + +**Use Cases**: +- Human feedback incorporation +- Label-efficient training +- Uncertainty sampling +- Annotation cost reduction + +### 6. Adversarial Training + +**Type**: Robustness Enhancement +**Best For**: Safety, robustness to perturbations +**Strengths**: Improves model robustness, adversarial defense + +**Use Cases**: +- Security applications +- Robust decision-making +- Adversarial defense +- Safety testing + +### 7. Curriculum Learning + +**Type**: Progressive Difficulty Training +**Best For**: Complex tasks, faster convergence +**Strengths**: Stable learning, faster convergence on hard tasks + +**Use Cases**: +- Complex multi-stage tasks +- Hard exploration problems +- Skill composition +- Transfer learning + +### 8. Federated Learning + +**Type**: Distributed Learning +**Best For**: Privacy, distributed data +**Strengths**: Privacy-preserving, scalable + +**Use Cases**: +- Multi-agent systems +- Privacy-sensitive data +- Distributed training +- Collaborative learning + +### 9. Multi-Task Learning + +**Type**: Transfer Learning +**Best For**: Related tasks, knowledge sharing +**Strengths**: Faster learning on new tasks, better generalization + +**Use Cases**: +- Task families +- Transfer learning +- Domain adaptation +- Meta-learning + +--- + +## Training Workflow + +### 1. Collect Experiences + +```typescript +// Store experiences during agent execution +for (let i = 0; i < numEpisodes; i++) { + const episode = runEpisode(); + + for (const step of episode.steps) { + await adapter.insertPattern({ + id: '', + type: 'experience', + domain: 'task-domain', + pattern_data: JSON.stringify({ + embedding: await computeEmbedding(JSON.stringify(step)), + pattern: { + state: step.state, + action: step.action, + reward: step.reward, + next_state: step.next_state, + done: step.done + } + }), + confidence: step.reward > 0 ? 0.9 : 0.5, + usage_count: 1, + success_count: step.reward > 0 ? 1 : 0, + created_at: Date.now(), + last_used: Date.now(), + }); + } +} +``` + +### 2. Train Model + +```typescript +// Train on collected experiences +const trainingMetrics = await adapter.train({ + epochs: 100, + batchSize: 64, + learningRate: 0.001, + validationSplit: 0.2, +}); + +console.log('Training Metrics:', trainingMetrics); +// { +// loss: 0.023, +// valLoss: 0.028, +// duration: 1523, +// epochs: 100 +// } +``` + +### 3. Evaluate Performance + +```typescript +// Retrieve similar successful experiences +const testQuery = await computeEmbedding(JSON.stringify(testState)); +const result = await adapter.retrieveWithReasoning(testQuery, { + domain: 'task-domain', + k: 10, + synthesizeContext: true, +}); + +// Evaluate action quality +const suggestedAction = result.memories[0].pattern.action; +const confidence = result.memories[0].similarity; + +console.log('Suggested Action:', suggestedAction); +console.log('Confidence:', confidence); +``` + +--- + +## Advanced Training Techniques + +### Experience Replay + +```typescript +// Store experiences in buffer +const replayBuffer = []; + +// Sample random batch for training +const batch = sampleRandomBatch(replayBuffer, batchSize: 32); + +// Train on batch +await adapter.train({ + data: batch, + epochs: 1, + batchSize: 32, +}); +``` + +### Prioritized Experience Replay + +```typescript +// Store experiences with priority (TD error) +await adapter.insertPattern({ + // ... standard fields + confidence: tdError, // Use TD error as confidence/priority + // ... +}); + +// Retrieve high-priority experiences +const highPriority = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'task-domain', + k: 32, + minConfidence: 0.7, // Only high TD-error experiences +}); +``` + +### Multi-Agent Training + +```typescript +// Collect experiences from multiple agents +for (const agent of agents) { + const experience = await agent.step(); + + await adapter.insertPattern({ + // ... store experience with agent ID + domain: `multi-agent/${agent.id}`, + }); +} + +// Train shared model +await adapter.train({ + epochs: 50, + batchSize: 64, +}); +``` + +--- + +## Performance Optimization + +### Batch Training + +```typescript +// Collect batch of experiences +const experiences = collectBatch(size: 1000); + +// Batch insert (500x faster) +for (const exp of experiences) { + await adapter.insertPattern({ /* ... */ }); +} + +// Train on batch +await adapter.train({ + epochs: 10, + batchSize: 128, // Larger batch for efficiency +}); +``` + +### Incremental Learning + +```typescript +// Train incrementally as new data arrives +setInterval(async () => { + const newExperiences = getNewExperiences(); + + if (newExperiences.length > 100) { + await adapter.train({ + epochs: 5, + batchSize: 32, + }); + } +}, 60000); // Every minute +``` + +--- + +## Integration with Reasoning Agents + +Combine learning with reasoning for better performance: + +```typescript +// Train learning model +await adapter.train({ epochs: 50, batchSize: 32 }); + +// Use reasoning agents for inference +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'decision-making', + k: 10, + useMMR: true, // Diverse experiences + synthesizeContext: true, // Rich context + optimizeMemory: true, // Consolidate patterns +}); + +// Make decision based on learned experiences + reasoning +const decision = result.context.suggestedAction; +const confidence = result.memories[0].similarity; +``` + +--- + +## CLI Operations + +```bash +# Create plugin +npx agentdb@latest create-plugin -t decision-transformer -n my-plugin + +# List plugins +npx agentdb@latest list-plugins + +# Get plugin info +npx agentdb@latest plugin-info my-plugin + +# List templates +npx agentdb@latest list-templates +``` + +--- + +## Troubleshooting + +### Issue: Training not converging +```typescript +// Reduce learning rate +await adapter.train({ + epochs: 100, + batchSize: 32, + learningRate: 0.0001, // Lower learning rate +}); +``` + +### Issue: Overfitting +```typescript +// Use validation split +await adapter.train({ + epochs: 50, + batchSize: 64, + validationSplit: 0.2, // 20% validation +}); + +// Enable memory optimization +await adapter.retrieveWithReasoning(queryEmbedding, { + optimizeMemory: true, // Consolidate, reduce overfitting +}); +``` + +### Issue: Slow training +```bash +# Enable quantization for faster inference +# Use binary quantization (32x faster) +``` + +--- + +## Learn More + +- **Algorithm Papers**: See docs/algorithms/ for detailed papers +- **GitHub**: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb +- **MCP Integration**: `npx agentdb@latest mcp` +- **Website**: https://agentdb.ruv.io + +--- + +**Category**: Machine Learning / Reinforcement Learning +**Difficulty**: Intermediate to Advanced +**Estimated Time**: 30-60 minutes diff --git a/.agents/skills/agentdb-memory-patterns/SKILL.md b/.agents/skills/agentdb-memory-patterns/SKILL.md new file mode 100644 index 0000000..6074a7a --- /dev/null +++ b/.agents/skills/agentdb-memory-patterns/SKILL.md @@ -0,0 +1,339 @@ +--- +name: "AgentDB Memory Patterns" +description: "Implement persistent memory patterns for AI agents using AgentDB. Includes session memory, long-term storage, pattern learning, and context management. Use when building stateful agents, chat systems, or intelligent assistants." +--- + +# AgentDB Memory Patterns + +## What This Skill Does + +Provides memory management patterns for AI agents using AgentDB's persistent storage and ReasoningBank integration. Enables agents to remember conversations, learn from interactions, and maintain context across sessions. + +**Performance**: 150x-12,500x faster than traditional solutions with 100% backward compatibility. + +## Prerequisites + +- Node.js 18+ +- AgentDB v1.0.7+ (via agentic-flow or standalone) +- Understanding of agent architectures + +## Quick Start with CLI + +### Initialize AgentDB + +```bash +# Initialize vector database +npx agentdb@latest init ./agents.db + +# Or with custom dimensions +npx agentdb@latest init ./agents.db --dimension 768 + +# Use preset configurations +npx agentdb@latest init ./agents.db --preset large + +# In-memory database for testing +npx agentdb@latest init ./memory.db --in-memory +``` + +### Start MCP Server for Codex + +```bash +# Start MCP server (integrates with Codex) +npx agentdb@latest mcp + +# Add to Codex (one-time setup) +Codex mcp add agentdb npx agentdb@latest mcp +``` + +### Create Learning Plugin + +```bash +# Interactive plugin wizard +npx agentdb@latest create-plugin + +# Use template directly +npx agentdb@latest create-plugin -t decision-transformer -n my-agent + +# Available templates: +# - decision-transformer (sequence modeling RL) +# - q-learning (value-based learning) +# - sarsa (on-policy TD learning) +# - actor-critic (policy gradient) +# - curiosity-driven (exploration-based) +``` + +## Quick Start with API + +```typescript +import { createAgentDBAdapter } from 'agentic-flow/reasoningbank'; + +// Initialize with default configuration +const adapter = await createAgentDBAdapter({ + dbPath: '.agentdb/reasoningbank.db', + enableLearning: true, // Enable learning plugins + enableReasoning: true, // Enable reasoning agents + quantizationType: 'scalar', // binary | scalar | product | none + cacheSize: 1000, // In-memory cache +}); + +// Store interaction memory +const patternId = await adapter.insertPattern({ + id: '', + type: 'pattern', + domain: 'conversation', + pattern_data: JSON.stringify({ + embedding: await computeEmbedding('What is the capital of France?'), + pattern: { + user: 'What is the capital of France?', + assistant: 'The capital of France is Paris.', + timestamp: Date.now() + } + }), + confidence: 0.95, + usage_count: 1, + success_count: 1, + created_at: Date.now(), + last_used: Date.now(), +}); + +// Retrieve context with reasoning +const context = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'conversation', + k: 10, + useMMR: true, // Maximal Marginal Relevance + synthesizeContext: true, // Generate rich context +}); +``` + +## Memory Patterns + +### 1. Session Memory +```typescript +class SessionMemory { + async storeMessage(role: string, content: string) { + return await db.storeMemory({ + sessionId: this.sessionId, + role, + content, + timestamp: Date.now() + }); + } + + async getSessionHistory(limit = 20) { + return await db.query({ + filters: { sessionId: this.sessionId }, + orderBy: 'timestamp', + limit + }); + } +} +``` + +### 2. Long-Term Memory +```typescript +// Store important facts +await db.storeFact({ + category: 'user_preference', + key: 'language', + value: 'English', + confidence: 1.0, + source: 'explicit' +}); + +// Retrieve facts +const prefs = await db.getFacts({ + category: 'user_preference' +}); +``` + +### 3. Pattern Learning +```typescript +// Learn from successful interactions +await db.storePattern({ + trigger: 'user_asks_time', + response: 'provide_formatted_time', + success: true, + context: { timezone: 'UTC' } +}); + +// Apply learned patterns +const pattern = await db.matchPattern(currentContext); +``` + +## Advanced Patterns + +### Hierarchical Memory +```typescript +// Organize memory in hierarchy +await memory.organize({ + immediate: recentMessages, // Last 10 messages + shortTerm: sessionContext, // Current session + longTerm: importantFacts, // Persistent facts + semantic: embeddedKnowledge // Vector search +}); +``` + +### Memory Consolidation +```typescript +// Periodically consolidate memories +await memory.consolidate({ + strategy: 'importance', // Keep important memories + maxSize: 10000, // Size limit + minScore: 0.5 // Relevance threshold +}); +``` + +## CLI Operations + +### Query Database + +```bash +# Query with vector embedding +npx agentdb@latest query ./agents.db "[0.1,0.2,0.3,...]" + +# Top-k results +npx agentdb@latest query ./agents.db "[0.1,0.2,0.3]" -k 10 + +# With similarity threshold +npx agentdb@latest query ./agents.db "0.1 0.2 0.3" -t 0.75 + +# JSON output +npx agentdb@latest query ./agents.db "[...]" -f json +``` + +### Import/Export Data + +```bash +# Export vectors to file +npx agentdb@latest export ./agents.db ./backup.json + +# Import vectors from file +npx agentdb@latest import ./backup.json + +# Get database statistics +npx agentdb@latest stats ./agents.db +``` + +### Performance Benchmarks + +```bash +# Run performance benchmarks +npx agentdb@latest benchmark + +# Results show: +# - Pattern Search: 150x faster (100µs vs 15ms) +# - Batch Insert: 500x faster (2ms vs 1s) +# - Large-scale Query: 12,500x faster (8ms vs 100s) +``` + +## Integration with ReasoningBank + +```typescript +import { createAgentDBAdapter, migrateToAgentDB } from 'agentic-flow/reasoningbank'; + +// Migrate from legacy ReasoningBank +const result = await migrateToAgentDB( + '.swarm/memory.db', // Source (legacy) + '.agentdb/reasoningbank.db' // Destination (AgentDB) +); + +console.log(`✅ Migrated ${result.patternsMigrated} patterns`); + +// Train learning model +const adapter = await createAgentDBAdapter({ + enableLearning: true, +}); + +await adapter.train({ + epochs: 50, + batchSize: 32, +}); + +// Get optimal strategy with reasoning +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'task-planning', + synthesizeContext: true, + optimizeMemory: true, +}); +``` + +## Learning Plugins + +### Available Algorithms (9 Total) + +1. **Decision Transformer** - Sequence modeling RL (recommended) +2. **Q-Learning** - Value-based learning +3. **SARSA** - On-policy TD learning +4. **Actor-Critic** - Policy gradient with baseline +5. **Active Learning** - Query selection +6. **Adversarial Training** - Robustness +7. **Curriculum Learning** - Progressive difficulty +8. **Federated Learning** - Distributed learning +9. **Multi-task Learning** - Transfer learning + +### List and Manage Plugins + +```bash +# List available plugins +npx agentdb@latest list-plugins + +# List plugin templates +npx agentdb@latest list-templates + +# Get plugin info +npx agentdb@latest plugin-info +``` + +## Reasoning Agents (4 Modules) + +1. **PatternMatcher** - Find similar patterns with HNSW indexing +2. **ContextSynthesizer** - Generate rich context from multiple sources +3. **MemoryOptimizer** - Consolidate similar patterns, prune low-quality +4. **ExperienceCurator** - Quality-based experience filtering + +## Best Practices + +1. **Enable quantization**: Use scalar/binary for 4-32x memory reduction +2. **Use caching**: 1000 pattern cache for <1ms retrieval +3. **Batch operations**: 500x faster than individual inserts +4. **Train regularly**: Update learning models with new experiences +5. **Enable reasoning**: Automatic context synthesis and optimization +6. **Monitor metrics**: Use `stats` command to track performance + +## Troubleshooting + +### Issue: Memory growing too large +```bash +# Check database size +npx agentdb@latest stats ./agents.db + +# Enable quantization +# Use 'binary' (32x smaller) or 'scalar' (4x smaller) +``` + +### Issue: Slow search performance +```bash +# Enable HNSW indexing and caching +# Results: <100µs search time +``` + +### Issue: Migration from legacy ReasoningBank +```bash +# Automatic migration with validation +npx agentdb@latest migrate --source .swarm/memory.db +``` + +## Performance Characteristics + +- **Vector Search**: <100µs (HNSW indexing) +- **Pattern Retrieval**: <1ms (with cache) +- **Batch Insert**: 2ms for 100 patterns +- **Memory Efficiency**: 4-32x reduction with quantization +- **Backward Compatibility**: 100% compatible with ReasoningBank API + +## Learn More + +- GitHub: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb +- Documentation: node_modules/agentic-flow/docs/AGENTDB_INTEGRATION.md +- MCP Integration: `npx agentdb@latest mcp` for Codex +- Website: https://agentdb.ruv.io diff --git a/.agents/skills/agentdb-optimization/SKILL.md b/.agents/skills/agentdb-optimization/SKILL.md new file mode 100644 index 0000000..f19df86 --- /dev/null +++ b/.agents/skills/agentdb-optimization/SKILL.md @@ -0,0 +1,509 @@ +--- +name: "AgentDB Performance Optimization" +description: "Optimize AgentDB performance with quantization (4-32x memory reduction), HNSW indexing (150x faster search), caching, and batch operations. Use when optimizing memory usage, improving search speed, or scaling to millions of vectors." +--- + +# AgentDB Performance Optimization + +## What This Skill Does + +Provides comprehensive performance optimization techniques for AgentDB vector databases. Achieve 150x-12,500x performance improvements through quantization, HNSW indexing, caching strategies, and batch operations. Reduce memory usage by 4-32x while maintaining accuracy. + +**Performance**: <100µs vector search, <1ms pattern retrieval, 2ms batch insert for 100 vectors. + +## Prerequisites + +- Node.js 18+ +- AgentDB v1.0.7+ (via agentic-flow) +- Existing AgentDB database or application + +--- + +## Quick Start + +### Run Performance Benchmarks + +```bash +# Comprehensive performance benchmarking +npx agentdb@latest benchmark + +# Results show: +# ✅ Pattern Search: 150x faster (100µs vs 15ms) +# ✅ Batch Insert: 500x faster (2ms vs 1s for 100 vectors) +# ✅ Large-scale Query: 12,500x faster (8ms vs 100s at 1M vectors) +# ✅ Memory Efficiency: 4-32x reduction with quantization +``` + +### Enable Optimizations + +```typescript +import { createAgentDBAdapter } from 'agentic-flow/reasoningbank'; + +// Optimized configuration +const adapter = await createAgentDBAdapter({ + dbPath: '.agentdb/optimized.db', + quantizationType: 'binary', // 32x memory reduction + cacheSize: 1000, // In-memory cache + enableLearning: true, + enableReasoning: true, +}); +``` + +--- + +## Quantization Strategies + +### 1. Binary Quantization (32x Reduction) + +**Best For**: Large-scale deployments (1M+ vectors), memory-constrained environments +**Trade-off**: ~2-5% accuracy loss, 32x memory reduction, 10x faster + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'binary', + // 768-dim float32 (3072 bytes) → 96 bytes binary + // 1M vectors: 3GB → 96MB +}); +``` + +**Use Cases**: +- Mobile/edge deployment +- Large-scale vector storage (millions of vectors) +- Real-time search with memory constraints + +**Performance**: +- Memory: 32x smaller +- Search Speed: 10x faster (bit operations) +- Accuracy: 95-98% of original + +### 2. Scalar Quantization (4x Reduction) + +**Best For**: Balanced performance/accuracy, moderate datasets +**Trade-off**: ~1-2% accuracy loss, 4x memory reduction, 3x faster + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'scalar', + // 768-dim float32 (3072 bytes) → 768 bytes (uint8) + // 1M vectors: 3GB → 768MB +}); +``` + +**Use Cases**: +- Production applications requiring high accuracy +- Medium-scale deployments (10K-1M vectors) +- General-purpose optimization + +**Performance**: +- Memory: 4x smaller +- Search Speed: 3x faster +- Accuracy: 98-99% of original + +### 3. Product Quantization (8-16x Reduction) + +**Best For**: High-dimensional vectors, balanced compression +**Trade-off**: ~3-7% accuracy loss, 8-16x memory reduction, 5x faster + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'product', + // 768-dim float32 (3072 bytes) → 48-96 bytes + // 1M vectors: 3GB → 192MB +}); +``` + +**Use Cases**: +- High-dimensional embeddings (>512 dims) +- Image/video embeddings +- Large-scale similarity search + +**Performance**: +- Memory: 8-16x smaller +- Search Speed: 5x faster +- Accuracy: 93-97% of original + +### 4. No Quantization (Full Precision) + +**Best For**: Maximum accuracy, small datasets +**Trade-off**: No accuracy loss, full memory usage + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'none', + // Full float32 precision +}); +``` + +--- + +## HNSW Indexing + +**Hierarchical Navigable Small World** - O(log n) search complexity + +### Automatic HNSW + +AgentDB automatically builds HNSW indices: + +```typescript +const adapter = await createAgentDBAdapter({ + dbPath: '.agentdb/vectors.db', + // HNSW automatically enabled +}); + +// Search with HNSW (100µs vs 15ms linear scan) +const results = await adapter.retrieveWithReasoning(queryEmbedding, { + k: 10, +}); +``` + +### HNSW Parameters + +```typescript +// Advanced HNSW configuration +const adapter = await createAgentDBAdapter({ + dbPath: '.agentdb/vectors.db', + hnswM: 16, // Connections per layer (default: 16) + hnswEfConstruction: 200, // Build quality (default: 200) + hnswEfSearch: 100, // Search quality (default: 100) +}); +``` + +**Parameter Tuning**: +- **M** (connections): Higher = better recall, more memory + - Small datasets (<10K): M = 8 + - Medium datasets (10K-100K): M = 16 + - Large datasets (>100K): M = 32 +- **efConstruction**: Higher = better index quality, slower build + - Fast build: 100 + - Balanced: 200 (default) + - High quality: 400 +- **efSearch**: Higher = better recall, slower search + - Fast search: 50 + - Balanced: 100 (default) + - High recall: 200 + +--- + +## Caching Strategies + +### In-Memory Pattern Cache + +```typescript +const adapter = await createAgentDBAdapter({ + cacheSize: 1000, // Cache 1000 most-used patterns +}); + +// First retrieval: ~2ms (database) +// Subsequent: <1ms (cache hit) +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + k: 10, +}); +``` + +**Cache Tuning**: +- Small applications: 100-500 patterns +- Medium applications: 500-2000 patterns +- Large applications: 2000-5000 patterns + +### LRU Cache Behavior + +```typescript +// Cache automatically evicts least-recently-used patterns +// Most frequently accessed patterns stay in cache + +// Monitor cache performance +const stats = await adapter.getStats(); +console.log('Cache Hit Rate:', stats.cacheHitRate); +// Aim for >80% hit rate +``` + +--- + +## Batch Operations + +### Batch Insert (500x Faster) + +```typescript +// ❌ SLOW: Individual inserts +for (const doc of documents) { + await adapter.insertPattern({ /* ... */ }); // 1s for 100 docs +} + +// ✅ FAST: Batch insert +const patterns = documents.map(doc => ({ + id: '', + type: 'document', + domain: 'knowledge', + pattern_data: JSON.stringify({ + embedding: doc.embedding, + text: doc.text, + }), + confidence: 1.0, + usage_count: 0, + success_count: 0, + created_at: Date.now(), + last_used: Date.now(), +})); + +// Insert all at once (2ms for 100 docs) +for (const pattern of patterns) { + await adapter.insertPattern(pattern); +} +``` + +### Batch Retrieval + +```typescript +// Retrieve multiple queries efficiently +const queries = [queryEmbedding1, queryEmbedding2, queryEmbedding3]; + +// Parallel retrieval +const results = await Promise.all( + queries.map(q => adapter.retrieveWithReasoning(q, { k: 5 })) +); +``` + +--- + +## Memory Optimization + +### Automatic Consolidation + +```typescript +// Enable automatic pattern consolidation +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'documents', + optimizeMemory: true, // Consolidate similar patterns + k: 10, +}); + +console.log('Optimizations:', result.optimizations); +// { +// consolidated: 15, // Merged 15 similar patterns +// pruned: 3, // Removed 3 low-quality patterns +// improved_quality: 0.12 // 12% quality improvement +// } +``` + +### Manual Optimization + +```typescript +// Manually trigger optimization +await adapter.optimize(); + +// Get statistics +const stats = await adapter.getStats(); +console.log('Before:', stats.totalPatterns); +console.log('After:', stats.totalPatterns); // Reduced by ~10-30% +``` + +### Pruning Strategies + +```typescript +// Prune low-confidence patterns +await adapter.prune({ + minConfidence: 0.5, // Remove confidence < 0.5 + minUsageCount: 2, // Remove usage_count < 2 + maxAge: 30 * 24 * 3600, // Remove >30 days old +}); +``` + +--- + +## Performance Monitoring + +### Database Statistics + +```bash +# Get comprehensive stats +npx agentdb@latest stats .agentdb/vectors.db + +# Output: +# Total Patterns: 125,430 +# Database Size: 47.2 MB (with binary quantization) +# Avg Confidence: 0.87 +# Domains: 15 +# Cache Hit Rate: 84% +# Index Type: HNSW +``` + +### Runtime Metrics + +```typescript +const stats = await adapter.getStats(); + +console.log('Performance Metrics:'); +console.log('Total Patterns:', stats.totalPatterns); +console.log('Database Size:', stats.dbSize); +console.log('Avg Confidence:', stats.avgConfidence); +console.log('Cache Hit Rate:', stats.cacheHitRate); +console.log('Search Latency (avg):', stats.avgSearchLatency); +console.log('Insert Latency (avg):', stats.avgInsertLatency); +``` + +--- + +## Optimization Recipes + +### Recipe 1: Maximum Speed (Sacrifice Accuracy) + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'binary', // 32x memory reduction + cacheSize: 5000, // Large cache + hnswM: 8, // Fewer connections = faster + hnswEfSearch: 50, // Low search quality = faster +}); + +// Expected: <50µs search, 90-95% accuracy +``` + +### Recipe 2: Balanced Performance + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'scalar', // 4x memory reduction + cacheSize: 1000, // Standard cache + hnswM: 16, // Balanced connections + hnswEfSearch: 100, // Balanced quality +}); + +// Expected: <100µs search, 98-99% accuracy +``` + +### Recipe 3: Maximum Accuracy + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'none', // No quantization + cacheSize: 2000, // Large cache + hnswM: 32, // Many connections + hnswEfSearch: 200, // High search quality +}); + +// Expected: <200µs search, 100% accuracy +``` + +### Recipe 4: Memory-Constrained (Mobile/Edge) + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'binary', // 32x memory reduction + cacheSize: 100, // Small cache + hnswM: 8, // Minimal connections +}); + +// Expected: <100µs search, ~10MB for 100K vectors +``` + +--- + +## Scaling Strategies + +### Small Scale (<10K vectors) + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'none', // Full precision + cacheSize: 500, + hnswM: 8, +}); +``` + +### Medium Scale (10K-100K vectors) + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'scalar', // 4x reduction + cacheSize: 1000, + hnswM: 16, +}); +``` + +### Large Scale (100K-1M vectors) + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'binary', // 32x reduction + cacheSize: 2000, + hnswM: 32, +}); +``` + +### Massive Scale (>1M vectors) + +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'product', // 8-16x reduction + cacheSize: 5000, + hnswM: 48, + hnswEfConstruction: 400, +}); +``` + +--- + +## Troubleshooting + +### Issue: High memory usage + +```bash +# Check database size +npx agentdb@latest stats .agentdb/vectors.db + +# Enable quantization +# Use 'binary' for 32x reduction +``` + +### Issue: Slow search performance + +```typescript +// Increase cache size +const adapter = await createAgentDBAdapter({ + cacheSize: 2000, // Increase from 1000 +}); + +// Reduce search quality (faster) +const result = await adapter.retrieveWithReasoning(queryEmbedding, { + k: 5, // Reduce from 10 +}); +``` + +### Issue: Low accuracy + +```typescript +// Disable or use lighter quantization +const adapter = await createAgentDBAdapter({ + quantizationType: 'scalar', // Instead of 'binary' + hnswEfSearch: 200, // Higher search quality +}); +``` + +--- + +## Performance Benchmarks + +**Test System**: AMD Ryzen 9 5950X, 64GB RAM + +| Operation | Vector Count | No Optimization | Optimized | Improvement | +|-----------|-------------|-----------------|-----------|-------------| +| Search | 10K | 15ms | 100µs | 150x | +| Search | 100K | 150ms | 120µs | 1,250x | +| Search | 1M | 100s | 8ms | 12,500x | +| Batch Insert (100) | - | 1s | 2ms | 500x | +| Memory Usage | 1M | 3GB | 96MB | 32x (binary) | + +--- + +## Learn More + +- **Quantization Paper**: docs/quantization-techniques.pdf +- **HNSW Algorithm**: docs/hnsw-index.pdf +- **GitHub**: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb +- **Website**: https://agentdb.ruv.io + +--- + +**Category**: Performance / Optimization +**Difficulty**: Intermediate +**Estimated Time**: 20-30 minutes diff --git a/.agents/skills/agentdb-vector-search/SKILL.md b/.agents/skills/agentdb-vector-search/SKILL.md new file mode 100644 index 0000000..31ebc72 --- /dev/null +++ b/.agents/skills/agentdb-vector-search/SKILL.md @@ -0,0 +1,339 @@ +--- +name: "AgentDB Vector Search" +description: "Implement semantic vector search with AgentDB for intelligent document retrieval, similarity matching, and context-aware querying. Use when building RAG systems, semantic search engines, or intelligent knowledge bases." +--- + +# AgentDB Vector Search + +## What This Skill Does + +Implements vector-based semantic search using AgentDB's high-performance vector database with **150x-12,500x faster** operations than traditional solutions. Features HNSW indexing, quantization, and sub-millisecond search (<100µs). + +## Prerequisites + +- Node.js 18+ +- AgentDB v1.0.7+ (via agentic-flow or standalone) +- OpenAI API key (for embeddings) or custom embedding model + +## Quick Start with CLI + +### Initialize Vector Database + +```bash +# Initialize with default dimensions (1536 for OpenAI ada-002) +npx agentdb@latest init ./vectors.db + +# Custom dimensions for different embedding models +npx agentdb@latest init ./vectors.db --dimension 768 # sentence-transformers +npx agentdb@latest init ./vectors.db --dimension 384 # all-MiniLM-L6-v2 + +# Use preset configurations +npx agentdb@latest init ./vectors.db --preset small # <10K vectors +npx agentdb@latest init ./vectors.db --preset medium # 10K-100K vectors +npx agentdb@latest init ./vectors.db --preset large # >100K vectors + +# In-memory database for testing +npx agentdb@latest init ./vectors.db --in-memory +``` + +### Query Vector Database + +```bash +# Basic similarity search +npx agentdb@latest query ./vectors.db "[0.1,0.2,0.3,...]" + +# Top-k results +npx agentdb@latest query ./vectors.db "[0.1,0.2,0.3]" -k 10 + +# With similarity threshold (cosine similarity) +npx agentdb@latest query ./vectors.db "0.1 0.2 0.3" -t 0.75 -m cosine + +# Different distance metrics +npx agentdb@latest query ./vectors.db "[...]" -m euclidean # L2 distance +npx agentdb@latest query ./vectors.db "[...]" -m dot # Dot product + +# JSON output for automation +npx agentdb@latest query ./vectors.db "[...]" -f json -k 5 + +# Verbose output with distances +npx agentdb@latest query ./vectors.db "[...]" -v +``` + +### Import/Export Vectors + +```bash +# Export vectors to JSON +npx agentdb@latest export ./vectors.db ./backup.json + +# Import vectors from JSON +npx agentdb@latest import ./backup.json + +# Get database statistics +npx agentdb@latest stats ./vectors.db +``` + +## Quick Start with API + +```typescript +import { createAgentDBAdapter, computeEmbedding } from 'agentic-flow/reasoningbank'; + +// Initialize with vector search optimizations +const adapter = await createAgentDBAdapter({ + dbPath: '.agentdb/vectors.db', + enableLearning: false, // Vector search only + enableReasoning: true, // Enable semantic matching + quantizationType: 'binary', // 32x memory reduction + cacheSize: 1000, // Fast retrieval +}); + +// Store document with embedding +const text = "The quantum computer achieved 100 qubits"; +const embedding = await computeEmbedding(text); + +await adapter.insertPattern({ + id: '', + type: 'document', + domain: 'technology', + pattern_data: JSON.stringify({ + embedding, + text, + metadata: { category: "quantum", date: "2025-01-15" } + }), + confidence: 1.0, + usage_count: 0, + success_count: 0, + created_at: Date.now(), + last_used: Date.now(), +}); + +// Semantic search with MMR (Maximal Marginal Relevance) +const queryEmbedding = await computeEmbedding("quantum computing advances"); +const results = await adapter.retrieveWithReasoning(queryEmbedding, { + domain: 'technology', + k: 10, + useMMR: true, // Diverse results + synthesizeContext: true, // Rich context +}); +``` + +## Core Features + +### 1. Vector Storage +```typescript +// Store with automatic embedding +await db.storeWithEmbedding({ + content: "Your document text", + metadata: { source: "docs", page: 42 } +}); +``` + +### 2. Similarity Search +```typescript +// Find similar documents +const similar = await db.findSimilar("quantum computing", { + limit: 5, + minScore: 0.75 +}); +``` + +### 3. Hybrid Search (Vector + Metadata) +```typescript +// Combine vector similarity with metadata filtering +const results = await db.hybridSearch({ + query: "machine learning models", + filters: { + category: "research", + date: { $gte: "2024-01-01" } + }, + limit: 20 +}); +``` + +## Advanced Usage + +### RAG (Retrieval Augmented Generation) +```typescript +// Build RAG pipeline +async function ragQuery(question: string) { + // 1. Get relevant context + const context = await db.searchSimilar( + await embed(question), + { limit: 5, threshold: 0.7 } + ); + + // 2. Generate answer with context + const prompt = `Context: ${context.map(c => c.text).join('\n')} +Question: ${question}`; + + return await llm.generate(prompt); +} +``` + +### Batch Operations +```typescript +// Efficient batch storage +await db.batchStore(documents.map(doc => ({ + text: doc.content, + embedding: doc.vector, + metadata: doc.meta +}))); +``` + +## MCP Server Integration + +```bash +# Start AgentDB MCP server for Codex +npx agentdb@latest mcp + +# Add to Codex (one-time setup) +Codex mcp add agentdb npx agentdb@latest mcp + +# Now use MCP tools in Codex: +# - agentdb_query: Semantic vector search +# - agentdb_store: Store documents with embeddings +# - agentdb_stats: Database statistics +``` + +## Performance Benchmarks + +```bash +# Run comprehensive benchmarks +npx agentdb@latest benchmark + +# Results: +# ✅ Pattern Search: 150x faster (100µs vs 15ms) +# ✅ Batch Insert: 500x faster (2ms vs 1s for 100 vectors) +# ✅ Large-scale Query: 12,500x faster (8ms vs 100s at 1M vectors) +# ✅ Memory Efficiency: 4-32x reduction with quantization +``` + +## Quantization Options + +AgentDB provides multiple quantization strategies for memory efficiency: + +### Binary Quantization (32x reduction) +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'binary', // 768-dim → 96 bytes +}); +``` + +### Scalar Quantization (4x reduction) +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'scalar', // 768-dim → 768 bytes +}); +``` + +### Product Quantization (8-16x reduction) +```typescript +const adapter = await createAgentDBAdapter({ + quantizationType: 'product', // 768-dim → 48-96 bytes +}); +``` + +## Distance Metrics + +```bash +# Cosine similarity (default, best for most use cases) +npx agentdb@latest query ./db.sqlite "[...]" -m cosine + +# Euclidean distance (L2 norm) +npx agentdb@latest query ./db.sqlite "[...]" -m euclidean + +# Dot product (for normalized vectors) +npx agentdb@latest query ./db.sqlite "[...]" -m dot +``` + +## Advanced Features + +### HNSW Indexing +- **O(log n) search complexity** +- **Sub-millisecond retrieval** (<100µs) +- **Automatic index building** + +### Caching +- **1000 pattern in-memory cache** +- **<1ms pattern retrieval** +- **Automatic cache invalidation** + +### MMR (Maximal Marginal Relevance) +- **Diverse result sets** +- **Avoid redundancy** +- **Balance relevance and diversity** + +## Performance Tips + +1. **Enable HNSW indexing**: Automatic with AgentDB, 10-100x faster +2. **Use quantization**: Binary (32x), Scalar (4x), Product (8-16x) memory reduction +3. **Batch operations**: 500x faster for bulk inserts +4. **Match dimensions**: 1536 (OpenAI), 768 (sentence-transformers), 384 (MiniLM) +5. **Similarity threshold**: Start at 0.7 for quality, adjust based on use case +6. **Enable caching**: 1000 pattern cache for frequent queries + +## Troubleshooting + +### Issue: Slow search performance +```bash +# Check if HNSW indexing is enabled (automatic) +npx agentdb@latest stats ./vectors.db + +# Expected: <100µs search time +``` + +### Issue: High memory usage +```bash +# Enable binary quantization (32x reduction) +# Use in adapter: quantizationType: 'binary' +``` + +### Issue: Poor relevance +```bash +# Adjust similarity threshold +npx agentdb@latest query ./db.sqlite "[...]" -t 0.8 # Higher threshold + +# Or use MMR for diverse results +# Use in adapter: useMMR: true +``` + +### Issue: Wrong dimensions +```bash +# Check embedding model dimensions: +# - OpenAI ada-002: 1536 +# - sentence-transformers: 768 +# - all-MiniLM-L6-v2: 384 + +npx agentdb@latest init ./db.sqlite --dimension 768 +``` + +## Database Statistics + +```bash +# Get comprehensive stats +npx agentdb@latest stats ./vectors.db + +# Shows: +# - Total patterns/vectors +# - Database size +# - Average confidence +# - Domains distribution +# - Index status +``` + +## Performance Characteristics + +- **Vector Search**: <100µs (HNSW indexing) +- **Pattern Retrieval**: <1ms (with cache) +- **Batch Insert**: 2ms for 100 vectors +- **Memory Efficiency**: 4-32x reduction with quantization +- **Scalability**: Handles 1M+ vectors efficiently +- **Latency**: Sub-millisecond for most operations + +## Learn More + +- GitHub: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb +- Documentation: node_modules/agentic-flow/docs/AGENTDB_INTEGRATION.md +- MCP Integration: `npx agentdb@latest mcp` for Codex +- Website: https://agentdb.ruv.io +- CLI Help: `npx agentdb@latest --help` +- Command Help: `npx agentdb@latest help ` diff --git a/.agents/skills/browser/SKILL.md b/.agents/skills/browser/SKILL.md new file mode 100644 index 0000000..085b3c7 --- /dev/null +++ b/.agents/skills/browser/SKILL.md @@ -0,0 +1,204 @@ +--- +name: browser +description: Web browser automation with AI-optimized snapshots for Codex-flow agents +version: 1.0.0 +triggers: + - /browser + - browse + - web automation + - scrape + - navigate + - screenshot +tools: + - browser/open + - browser/snapshot + - browser/click + - browser/fill + - browser/screenshot + - browser/close +--- + +# Browser Automation Skill + +Web browser automation using agent-browser with AI-optimized snapshots. Reduces context by 93% using element refs (@e1, @e2) instead of full DOM. + +## Core Workflow + +```bash +# 1. Navigate to page +agent-browser open + +# 2. Get accessibility tree with element refs +agent-browser snapshot -i # -i = interactive elements only + +# 3. Interact using refs from snapshot +agent-browser click @e2 +agent-browser fill @e3 "text" + +# 4. Re-snapshot after page changes +agent-browser snapshot -i +``` + +## Quick Reference + +### Navigation +| Command | Description | +|---------|-------------| +| `open ` | Navigate to URL | +| `back` | Go back | +| `forward` | Go forward | +| `reload` | Reload page | +| `close` | Close browser | + +### Snapshots (AI-Optimized) +| Command | Description | +|---------|-------------| +| `snapshot` | Full accessibility tree | +| `snapshot -i` | Interactive elements only (buttons, links, inputs) | +| `snapshot -c` | Compact (remove empty elements) | +| `snapshot -d 3` | Limit depth to 3 levels | +| `screenshot [path]` | Capture screenshot (base64 if no path) | + +### Interaction +| Command | Description | +|---------|-------------| +| `click ` | Click element | +| `fill ` | Clear and fill input | +| `type ` | Type with key events | +| `press ` | Press key (Enter, Tab, etc.) | +| `hover ` | Hover element | +| `select ` | Select dropdown option | +| `check/uncheck ` | Toggle checkbox | +| `scroll [px]` | Scroll page | + +### Get Info +| Command | Description | +|---------|-------------| +| `get text ` | Get text content | +| `get html ` | Get innerHTML | +| `get value ` | Get input value | +| `get attr ` | Get attribute | +| `get title` | Get page title | +| `get url` | Get current URL | + +### Wait +| Command | Description | +|---------|-------------| +| `wait ` | Wait for element | +| `wait ` | Wait milliseconds | +| `wait --text "text"` | Wait for text | +| `wait --url "pattern"` | Wait for URL | +| `wait --load networkidle` | Wait for load state | + +### Sessions +| Command | Description | +|---------|-------------| +| `--session ` | Use isolated session | +| `session list` | List active sessions | + +## Selectors + +### Element Refs (Recommended) +```bash +# Get refs from snapshot +agent-browser snapshot -i +# Output: button "Submit" [ref=e2] + +# Use ref to interact +agent-browser click @e2 +``` + +### CSS Selectors +```bash +agent-browser click "#submit" +agent-browser fill ".email-input" "test@test.com" +``` + +### Semantic Locators +```bash +agent-browser find role button click --name "Submit" +agent-browser find label "Email" fill "test@test.com" +agent-browser find testid "login-btn" click +``` + +## Examples + +### Login Flow +```bash +agent-browser open https://example.com/login +agent-browser snapshot -i +agent-browser fill @e2 "user@example.com" +agent-browser fill @e3 "password123" +agent-browser click @e4 +agent-browser wait --url "**/dashboard" +``` + +### Form Submission +```bash +agent-browser open https://example.com/contact +agent-browser snapshot -i +agent-browser fill @e1 "John Doe" +agent-browser fill @e2 "john@example.com" +agent-browser fill @e3 "Hello, this is my message" +agent-browser click @e4 +agent-browser wait --text "Thank you" +``` + +### Data Extraction +```bash +agent-browser open https://example.com/products +agent-browser snapshot -i +# Iterate through product refs +agent-browser get text @e1 # Product name +agent-browser get text @e2 # Price +agent-browser get attr @e3 href # Link +``` + +### Multi-Session (Swarm) +```bash +# Session 1: Navigator +agent-browser --session nav open https://example.com +agent-browser --session nav state save auth.json + +# Session 2: Scraper (uses same auth) +agent-browser --session scrape state load auth.json +agent-browser --session scrape open https://example.com/data +agent-browser --session scrape snapshot -i +``` + +## Integration with Codex Flow + +### MCP Tools +All browser operations are available as MCP tools with `browser/` prefix: +- `browser/open` +- `browser/snapshot` +- `browser/click` +- `browser/fill` +- `browser/screenshot` +- etc. + +### Memory Integration +```bash +# Store successful patterns +npx @Codex-flow/cli memory store --namespace browser-patterns --key "login-flow" --value "snapshot->fill->click->wait" + +# Retrieve before similar task +npx @Codex-flow/cli memory search --query "login automation" +``` + +### Hooks +```bash +# Pre-browse hook (get context) +npx @Codex-flow/cli hooks pre-edit --file "browser-task.ts" + +# Post-browse hook (record success) +npx @Codex-flow/cli hooks post-task --task-id "browse-1" --success true +``` + +## Tips + +1. **Always use snapshots** - They're optimized for AI with refs +2. **Prefer `-i` flag** - Gets only interactive elements, smaller output +3. **Use refs, not selectors** - More reliable, deterministic +4. **Re-snapshot after navigation** - Page state changes +5. **Use sessions for parallel work** - Each session is isolated diff --git a/.agents/skills/github-code-review/SKILL.md b/.agents/skills/github-code-review/SKILL.md new file mode 100644 index 0000000..b12d74e --- /dev/null +++ b/.agents/skills/github-code-review/SKILL.md @@ -0,0 +1,1140 @@ +--- +name: github-code-review +version: 1.0.0 +description: Comprehensive GitHub code review with AI-powered swarm coordination +category: github +tags: [code-review, github, swarm, pr-management, automation] +author: Codex Flow +requires: + - github-cli + - ruv-swarm + - Codex-flow +capabilities: + - Multi-agent code review + - Automated PR management + - Security and performance analysis + - Swarm-based review orchestration + - Intelligent comment generation + - Quality gate enforcement +--- + +# GitHub Code Review Skill + +> **AI-Powered Code Review**: Deploy specialized review agents to perform comprehensive, intelligent code reviews that go beyond traditional static analysis. + +## 🎯 Quick Start + +### Simple Review +```bash +# Initialize review swarm for PR +gh pr view 123 --json files,diff | npx ruv-swarm github review-init --pr 123 + +# Post review status +gh pr comment 123 --body "🔍 Multi-agent code review initiated" +``` + +### Complete Review Workflow +```bash +# Get PR context with gh CLI +PR_DATA=$(gh pr view 123 --json files,additions,deletions,title,body) +PR_DIFF=$(gh pr diff 123) + +# Initialize comprehensive review +npx ruv-swarm github review-init \ + --pr 123 \ + --pr-data "$PR_DATA" \ + --diff "$PR_DIFF" \ + --agents "security,performance,style,architecture,accessibility" \ + --depth comprehensive +``` + +--- + +## 📚 Table of Contents + +
+Core Features + +- [Multi-Agent Review System](#multi-agent-review-system) +- [Specialized Review Agents](#specialized-review-agents) +- [PR-Based Swarm Management](#pr-based-swarm-management) +- [Automated Workflows](#automated-workflows) +- [Quality Gates & Checks](#quality-gates--checks) + +
+ +
+Review Agents + +- [Security Review Agent](#security-review-agent) +- [Performance Review Agent](#performance-review-agent) +- [Architecture Review Agent](#architecture-review-agent) +- [Style & Convention Agent](#style--convention-agent) +- [Accessibility Agent](#accessibility-agent) + +
+ +
+Advanced Features + +- [Context-Aware Reviews](#context-aware-reviews) +- [Learning from History](#learning-from-history) +- [Cross-PR Analysis](#cross-pr-analysis) +- [Custom Review Agents](#custom-review-agents) + +
+ +
+Integration & Automation + +- [CI/CD Integration](#cicd-integration) +- [Webhook Handlers](#webhook-handlers) +- [PR Comment Commands](#pr-comment-commands) +- [Automated Fixes](#automated-fixes) + +
+ +--- + +## 🚀 Core Features + +### Multi-Agent Review System + +Deploy specialized AI agents for comprehensive code review: + +```bash +# Initialize review swarm with GitHub CLI integration +PR_DATA=$(gh pr view 123 --json files,additions,deletions,title,body) +PR_DIFF=$(gh pr diff 123) + +# Start multi-agent review +npx ruv-swarm github review-init \ + --pr 123 \ + --pr-data "$PR_DATA" \ + --diff "$PR_DIFF" \ + --agents "security,performance,style,architecture,accessibility" \ + --depth comprehensive + +# Post initial review status +gh pr comment 123 --body "🔍 Multi-agent code review initiated" +``` + +**Benefits:** +- ✅ Parallel review by specialized agents +- ✅ Comprehensive coverage across multiple domains +- ✅ Faster review cycles with coordinated analysis +- ✅ Consistent quality standards enforcement + +--- + +## 🤖 Specialized Review Agents + +### Security Review Agent + +**Focus:** Identify security vulnerabilities and suggest fixes + +```bash +# Get changed files from PR +CHANGED_FILES=$(gh pr view 123 --json files --jq '.files[].path') + +# Run security-focused review +SECURITY_RESULTS=$(npx ruv-swarm github review-security \ + --pr 123 \ + --files "$CHANGED_FILES" \ + --check "owasp,cve,secrets,permissions" \ + --suggest-fixes) + +# Post findings based on severity +if echo "$SECURITY_RESULTS" | grep -q "critical"; then + # Request changes for critical issues + gh pr review 123 --request-changes --body "$SECURITY_RESULTS" + gh pr edit 123 --add-label "security-review-required" +else + # Post as comment for non-critical issues + gh pr comment 123 --body "$SECURITY_RESULTS" +fi +``` + +
+Security Checks Performed + +```javascript +{ + "checks": [ + "SQL injection vulnerabilities", + "XSS attack vectors", + "Authentication bypasses", + "Authorization flaws", + "Cryptographic weaknesses", + "Dependency vulnerabilities", + "Secret exposure", + "CORS misconfigurations" + ], + "actions": [ + "Block PR on critical issues", + "Suggest secure alternatives", + "Add security test cases", + "Update security documentation" + ] +} +``` + +
+ +
+Comment Template: Security Issue + +```markdown +🔒 **Security Issue: [Type]** + +**Severity**: 🔴 Critical / 🟡 High / 🟢 Low + +**Description**: +[Clear explanation of the security issue] + +**Impact**: +[Potential consequences if not addressed] + +**Suggested Fix**: +```language +[Code example of the fix] +``` + +**References**: +- [OWASP Guide](link) +- [Security Best Practices](link) +``` + +
+ +--- + +### Performance Review Agent + +**Focus:** Analyze performance impact and optimization opportunities + +```bash +# Run performance analysis +npx ruv-swarm github review-performance \ + --pr 123 \ + --profile "cpu,memory,io" \ + --benchmark-against main \ + --suggest-optimizations +``` + +
+Performance Metrics Analyzed + +```javascript +{ + "metrics": [ + "Algorithm complexity (Big O analysis)", + "Database query efficiency", + "Memory allocation patterns", + "Cache utilization", + "Network request optimization", + "Bundle size impact", + "Render performance" + ], + "benchmarks": [ + "Compare with baseline", + "Load test simulations", + "Memory leak detection", + "Bottleneck identification" + ] +} +``` + +
+ +--- + +### Architecture Review Agent + +**Focus:** Evaluate design patterns and architectural decisions + +```bash +# Architecture review +npx ruv-swarm github review-architecture \ + --pr 123 \ + --check "patterns,coupling,cohesion,solid" \ + --visualize-impact \ + --suggest-refactoring +``` + +
+Architecture Analysis + +```javascript +{ + "patterns": [ + "Design pattern adherence", + "SOLID principles", + "DRY violations", + "Separation of concerns", + "Dependency injection", + "Layer violations", + "Circular dependencies" + ], + "metrics": [ + "Coupling metrics", + "Cohesion scores", + "Complexity measures", + "Maintainability index" + ] +} +``` + +
+ +--- + +### Style & Convention Agent + +**Focus:** Enforce coding standards and best practices + +```bash +# Style enforcement with auto-fix +npx ruv-swarm github review-style \ + --pr 123 \ + --check "formatting,naming,docs,tests" \ + --auto-fix "formatting,imports,whitespace" +``` + +
+Style Checks + +```javascript +{ + "checks": [ + "Code formatting", + "Naming conventions", + "Documentation standards", + "Comment quality", + "Test coverage", + "Error handling patterns", + "Logging standards" + ], + "auto-fix": [ + "Formatting issues", + "Import organization", + "Trailing whitespace", + "Simple naming issues" + ] +} +``` + +
+ +--- + +## 🔄 PR-Based Swarm Management + +### Create Swarm from PR + +```bash +# Create swarm from PR description using gh CLI +gh pr view 123 --json body,title,labels,files | npx ruv-swarm swarm create-from-pr + +# Auto-spawn agents based on PR labels +gh pr view 123 --json labels | npx ruv-swarm swarm auto-spawn + +# Create swarm with full PR context +gh pr view 123 --json body,labels,author,assignees | \ + npx ruv-swarm swarm init --from-pr-data +``` + +### Label-Based Agent Assignment + +Map PR labels to specialized agents: + +```json +{ + "label-mapping": { + "bug": ["debugger", "tester"], + "feature": ["architect", "coder", "tester"], + "refactor": ["analyst", "coder"], + "docs": ["researcher", "writer"], + "performance": ["analyst", "optimizer"], + "security": ["security", "authentication", "audit"] + } +} +``` + +### Topology Selection by PR Size + +```bash +# Automatic topology selection based on PR complexity +# Small PR (< 100 lines): ring topology +# Medium PR (100-500 lines): mesh topology +# Large PR (> 500 lines): hierarchical topology +npx ruv-swarm github pr-topology --pr 123 +``` + +--- + +## 🎬 PR Comment Commands + +Execute swarm commands directly from PR comments: + +```markdown + +/swarm init mesh 6 +/swarm spawn coder "Implement authentication" +/swarm spawn tester "Write unit tests" +/swarm status +/swarm review --agents security,performance +``` + +
+Webhook Handler for Comment Commands + +```javascript +// webhook-handler.js +const { createServer } = require('http'); +const { execSync } = require('child_process'); + +createServer((req, res) => { + if (req.url === '/github-webhook') { + const event = JSON.parse(body); + + if (event.action === 'opened' && event.pull_request) { + execSync(`npx ruv-swarm github pr-init ${event.pull_request.number}`); + } + + if (event.comment && event.comment.body.startsWith('/swarm')) { + const command = event.comment.body; + execSync(`npx ruv-swarm github handle-comment --pr ${event.issue.number} --command "${command}"`); + } + + res.writeHead(200); + res.end('OK'); + } +}).listen(3000); +``` + +
+ +--- + +## ⚙️ Review Configuration + +### Configuration File + +```yaml +# .github/review-swarm.yml +version: 1 +review: + auto-trigger: true + required-agents: + - security + - performance + - style + optional-agents: + - architecture + - accessibility + - i18n + + thresholds: + security: block # Block merge on security issues + performance: warn # Warn on performance issues + style: suggest # Suggest style improvements + + rules: + security: + - no-eval + - no-hardcoded-secrets + - proper-auth-checks + - validate-input + performance: + - no-n-plus-one + - efficient-queries + - proper-caching + - optimize-loops + architecture: + - max-coupling: 5 + - min-cohesion: 0.7 + - follow-patterns + - avoid-circular-deps +``` + +### Custom Review Triggers + +```javascript +{ + "triggers": { + "high-risk-files": { + "paths": ["**/auth/**", "**/payment/**", "**/admin/**"], + "agents": ["security", "architecture"], + "depth": "comprehensive", + "require-approval": true + }, + "performance-critical": { + "paths": ["**/api/**", "**/database/**", "**/cache/**"], + "agents": ["performance", "database"], + "benchmarks": true, + "regression-threshold": "5%" + }, + "ui-changes": { + "paths": ["**/components/**", "**/styles/**", "**/pages/**"], + "agents": ["accessibility", "style", "i18n"], + "visual-tests": true, + "responsive-check": true + } + } +} +``` + +--- + +## 🤖 Automated Workflows + +### Auto-Review on PR Creation + +```yaml +# .github/workflows/auto-review.yml +name: Automated Code Review +on: + pull_request: + types: [opened, synchronize] + issue_comment: + types: [created] + +jobs: + swarm-review: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup GitHub CLI + run: echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token + + - name: Run Review Swarm + run: | + # Get PR context with gh CLI + PR_NUM=${{ github.event.pull_request.number }} + PR_DATA=$(gh pr view $PR_NUM --json files,title,body,labels) + PR_DIFF=$(gh pr diff $PR_NUM) + + # Run swarm review + REVIEW_OUTPUT=$(npx ruv-swarm github review-all \ + --pr $PR_NUM \ + --pr-data "$PR_DATA" \ + --diff "$PR_DIFF" \ + --agents "security,performance,style,architecture") + + # Post review results + echo "$REVIEW_OUTPUT" | gh pr review $PR_NUM --comment -F - + + # Update PR status + if echo "$REVIEW_OUTPUT" | grep -q "approved"; then + gh pr review $PR_NUM --approve + elif echo "$REVIEW_OUTPUT" | grep -q "changes-requested"; then + gh pr review $PR_NUM --request-changes -b "See review comments above" + fi + + - name: Update Labels + run: | + # Add labels based on review results + if echo "$REVIEW_OUTPUT" | grep -q "security"; then + gh pr edit $PR_NUM --add-label "security-review" + fi + if echo "$REVIEW_OUTPUT" | grep -q "performance"; then + gh pr edit $PR_NUM --add-label "performance-review" + fi +``` + +--- + +## 💬 Intelligent Comment Generation + +### Generate Contextual Review Comments + +```bash +# Get PR diff with context +PR_DIFF=$(gh pr diff 123 --color never) +PR_FILES=$(gh pr view 123 --json files) + +# Generate review comments +COMMENTS=$(npx ruv-swarm github review-comment \ + --pr 123 \ + --diff "$PR_DIFF" \ + --files "$PR_FILES" \ + --style "constructive" \ + --include-examples \ + --suggest-fixes) + +# Post comments using gh CLI +echo "$COMMENTS" | jq -c '.[]' | while read -r comment; do + FILE=$(echo "$comment" | jq -r '.path') + LINE=$(echo "$comment" | jq -r '.line') + BODY=$(echo "$comment" | jq -r '.body') + COMMIT_ID=$(gh pr view 123 --json headRefOid -q .headRefOid) + + # Create inline review comments + gh api \ + --method POST \ + /repos/:owner/:repo/pulls/123/comments \ + -f path="$FILE" \ + -f line="$LINE" \ + -f body="$BODY" \ + -f commit_id="$COMMIT_ID" +done +``` + +### Batch Comment Management + +```bash +# Manage review comments efficiently +npx ruv-swarm github review-comments \ + --pr 123 \ + --group-by "agent,severity" \ + --summarize \ + --resolve-outdated +``` + +--- + +## 🚪 Quality Gates & Checks + +### Status Checks + +```yaml +# Required status checks in branch protection +protection_rules: + required_status_checks: + strict: true + contexts: + - "review-swarm/security" + - "review-swarm/performance" + - "review-swarm/architecture" + - "review-swarm/tests" +``` + +### Define Quality Gates + +```bash +# Set quality gate thresholds +npx ruv-swarm github quality-gates \ + --define '{ + "security": {"threshold": "no-critical"}, + "performance": {"regression": "<5%"}, + "coverage": {"minimum": "80%"}, + "architecture": {"complexity": "<10"}, + "duplication": {"maximum": "5%"} + }' +``` + +### Track Review Metrics + +```bash +# Monitor review effectiveness +npx ruv-swarm github review-metrics \ + --period 30d \ + --metrics "issues-found,false-positives,fix-rate,time-to-review" \ + --export-dashboard \ + --format json +``` + +--- + +## 🎓 Advanced Features + +### Context-Aware Reviews + +Analyze PRs with full project context: + +```bash +# Review with comprehensive context +npx ruv-swarm github review-context \ + --pr 123 \ + --load-related-prs \ + --analyze-impact \ + --check-breaking-changes \ + --dependency-analysis +``` + +### Learning from History + +Train review agents on your codebase patterns: + +```bash +# Learn from past reviews +npx ruv-swarm github review-learn \ + --analyze-past-reviews \ + --identify-patterns \ + --improve-suggestions \ + --reduce-false-positives + +# Train on your codebase +npx ruv-swarm github review-train \ + --learn-patterns \ + --adapt-to-style \ + --improve-accuracy +``` + +### Cross-PR Analysis + +Coordinate reviews across related pull requests: + +```bash +# Analyze related PRs together +npx ruv-swarm github review-batch \ + --prs "123,124,125" \ + --check-consistency \ + --verify-integration \ + --combined-impact +``` + +### Multi-PR Swarm Coordination + +```bash +# Coordinate swarms across related PRs +npx ruv-swarm github multi-pr \ + --prs "123,124,125" \ + --strategy "parallel" \ + --share-memory +``` + +--- + +## 🛠️ Custom Review Agents + +### Create Custom Agent + +```javascript +// custom-review-agent.js +class CustomReviewAgent { + constructor(config) { + this.config = config; + this.rules = config.rules || []; + } + + async review(pr) { + const issues = []; + + // Custom logic: Check for TODO comments in production code + if (await this.checkTodoComments(pr)) { + issues.push({ + severity: 'warning', + file: pr.file, + line: pr.line, + message: 'TODO comment found in production code', + suggestion: 'Resolve TODO or create issue to track it' + }); + } + + // Custom logic: Verify API versioning + if (await this.checkApiVersioning(pr)) { + issues.push({ + severity: 'error', + file: pr.file, + line: pr.line, + message: 'API endpoint missing versioning', + suggestion: 'Add /v1/, /v2/ prefix to API routes' + }); + } + + return issues; + } + + async checkTodoComments(pr) { + // Implementation + const todoRegex = /\/\/\s*TODO|\/\*\s*TODO/gi; + return todoRegex.test(pr.diff); + } + + async checkApiVersioning(pr) { + // Implementation + const apiRegex = /app\.(get|post|put|delete)\(['"]\/api\/(?!v\d+)/; + return apiRegex.test(pr.diff); + } +} + +module.exports = CustomReviewAgent; +``` + +### Register Custom Agent + +```bash +# Register custom review agent +npx ruv-swarm github register-agent \ + --name "custom-reviewer" \ + --file "./custom-review-agent.js" \ + --category "standards" +``` + +--- + +## 🔧 CI/CD Integration + +### Integration with Build Pipeline + +```yaml +# .github/workflows/build-and-review.yml +name: Build and Review +on: [pull_request] + +jobs: + build-and-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: npm install + - run: npm test + - run: npm run build + + swarm-review: + needs: build-and-test + runs-on: ubuntu-latest + steps: + - name: Run Swarm Review + run: | + npx ruv-swarm github review-all \ + --pr ${{ github.event.pull_request.number }} \ + --include-build-results +``` + +### Automated PR Fixes + +```bash +# Auto-fix common issues +npx ruv-swarm github pr-fix 123 \ + --issues "lint,test-failures,formatting" \ + --commit-fixes \ + --push-changes +``` + +### Progress Updates to PR + +```bash +# Post swarm progress to PR using gh CLI +PROGRESS=$(npx ruv-swarm github pr-progress 123 --format markdown) + +gh pr comment 123 --body "$PROGRESS" + +# Update PR labels based on progress +if [[ $(echo "$PROGRESS" | grep -o '[0-9]\+%' | sed 's/%//') -gt 90 ]]; then + gh pr edit 123 --add-label "ready-for-review" +fi +``` + +--- + +## 📋 Complete Workflow Examples + +### Example 1: Security-Critical PR + +```bash +# Review authentication system changes +npx ruv-swarm github review-init \ + --pr 456 \ + --agents "security,authentication,audit" \ + --depth "maximum" \ + --require-security-approval \ + --penetration-test +``` + +### Example 2: Performance-Sensitive PR + +```bash +# Review database optimization +npx ruv-swarm github review-init \ + --pr 789 \ + --agents "performance,database,caching" \ + --benchmark \ + --profile \ + --load-test +``` + +### Example 3: UI Component PR + +```bash +# Review new component library +npx ruv-swarm github review-init \ + --pr 321 \ + --agents "accessibility,style,i18n,docs" \ + --visual-regression \ + --component-tests \ + --responsive-check +``` + +### Example 4: Feature Development PR + +```bash +# Review new feature implementation +gh pr view 456 --json body,labels,files | \ + npx ruv-swarm github pr-init 456 \ + --topology hierarchical \ + --agents "architect,coder,tester,security" \ + --auto-assign-tasks +``` + +### Example 5: Bug Fix PR + +```bash +# Review bug fix with debugging focus +npx ruv-swarm github pr-init 789 \ + --topology mesh \ + --agents "debugger,analyst,tester" \ + --priority high \ + --regression-test +``` + +--- + +## 📊 Monitoring & Analytics + +### Review Dashboard + +```bash +# Launch real-time review dashboard +npx ruv-swarm github review-dashboard \ + --real-time \ + --show "agent-activity,issue-trends,fix-rates,coverage" +``` + +### Generate Review Reports + +```bash +# Create comprehensive review report +npx ruv-swarm github review-report \ + --format "markdown" \ + --include "summary,details,trends,recommendations" \ + --email-stakeholders \ + --export-pdf +``` + +### PR Swarm Analytics + +```bash +# Generate PR-specific analytics +npx ruv-swarm github pr-report 123 \ + --metrics "completion-time,agent-efficiency,token-usage,issue-density" \ + --format markdown \ + --compare-baseline +``` + +### Export to GitHub Insights + +```bash +# Export metrics to GitHub Insights +npx ruv-swarm github export-metrics \ + --pr 123 \ + --to-insights \ + --dashboard-url +``` + +--- + +## 🔐 Security Considerations + +### Best Practices + +1. **Token Permissions**: Ensure GitHub tokens have minimal required scopes +2. **Command Validation**: Validate all PR comments before execution +3. **Rate Limiting**: Implement rate limits for PR operations +4. **Audit Trail**: Log all swarm operations for compliance +5. **Secret Management**: Never expose API keys in PR comments or logs + +### Security Checklist + +- [ ] GitHub token scoped to repository only +- [ ] Webhook signatures verified +- [ ] Command injection protection enabled +- [ ] Rate limiting configured +- [ ] Audit logging enabled +- [ ] Secrets scanning active +- [ ] Branch protection rules enforced + +--- + +## 📚 Best Practices + +### 1. Review Configuration +- ✅ Define clear review criteria upfront +- ✅ Set appropriate severity thresholds +- ✅ Configure agent specializations for your stack +- ✅ Establish override procedures for emergencies + +### 2. Comment Quality +- ✅ Provide actionable, specific feedback +- ✅ Include code examples with suggestions +- ✅ Reference documentation and best practices +- ✅ Maintain respectful, constructive tone + +### 3. Performance Optimization +- ✅ Cache analysis results to avoid redundant work +- ✅ Use incremental reviews for large PRs +- ✅ Enable parallel agent execution +- ✅ Batch comment operations efficiently + +### 4. PR Templates + +```markdown + +## Swarm Configuration +- Topology: [mesh/hierarchical/ring/star] +- Max Agents: [number] +- Auto-spawn: [yes/no] +- Priority: [high/medium/low] + +## Tasks for Swarm +- [ ] Task 1 description +- [ ] Task 2 description +- [ ] Task 3 description + +## Review Focus Areas +- [ ] Security review +- [ ] Performance analysis +- [ ] Architecture validation +- [ ] Accessibility check +``` + +### 5. Auto-Merge When Ready + +```bash +# Auto-merge when swarm completes and passes checks +SWARM_STATUS=$(npx ruv-swarm github pr-status 123) + +if [[ "$SWARM_STATUS" == "complete" ]]; then + # Check review requirements + REVIEWS=$(gh pr view 123 --json reviews --jq '.reviews | length') + + if [[ $REVIEWS -ge 2 ]]; then + # Enable auto-merge + gh pr merge 123 --auto --squash + fi +fi +``` + +--- + +## 🔗 Integration with Codex + +### Workflow Pattern + +1. **Codex** reads PR diff and context +2. **Swarm** coordinates review approach based on PR type +3. **Agents** work in parallel on different review aspects +4. **Progress** updates posted to PR automatically +5. **Final review** performed before marking ready + +### Example: Complete PR Management + +```javascript +[Single Message - Parallel Execution]: + // Initialize coordination + mcp__claude-flow__swarm_init { topology: "hierarchical", maxAgents: 5 } + mcp__claude-flow__agent_spawn { type: "reviewer", name: "Senior Reviewer" } + mcp__claude-flow__agent_spawn { type: "tester", name: "QA Engineer" } + mcp__claude-flow__agent_spawn { type: "coordinator", name: "Merge Coordinator" } + + // Create and manage PR using gh CLI + Bash("gh pr create --title 'Feature: Add authentication' --base main") + Bash("gh pr view 54 --json files,diff") + Bash("gh pr review 54 --approve --body 'LGTM after automated review'") + + // Execute tests and validation + Bash("npm test") + Bash("npm run lint") + Bash("npm run build") + + // Track progress + TodoWrite { todos: [ + { content: "Complete code review", status: "completed", activeForm: "Completing code review" }, + { content: "Run test suite", status: "completed", activeForm: "Running test suite" }, + { content: "Validate security", status: "completed", activeForm: "Validating security" }, + { content: "Merge when ready", status: "pending", activeForm: "Merging when ready" } + ]} +``` + +--- + +## 🆘 Troubleshooting + +### Common Issues + +
+Issue: Review agents not spawning + +**Solution:** +```bash +# Check swarm status +npx ruv-swarm swarm-status + +# Verify GitHub CLI authentication +gh auth status + +# Re-initialize swarm +npx ruv-swarm github review-init --pr 123 --force +``` + +
+ +
+Issue: Comments not posting to PR + +**Solution:** +```bash +# Verify GitHub token permissions +gh auth status + +# Check API rate limits +gh api rate_limit + +# Use batch comment posting +npx ruv-swarm github review-comments --pr 123 --batch +``` + +
+ +
+Issue: Review taking too long + +**Solution:** +```bash +# Use incremental review for large PRs +npx ruv-swarm github review-init --pr 123 --incremental + +# Reduce agent count +npx ruv-swarm github review-init --pr 123 --agents "security,style" --max-agents 3 + +# Enable parallel processing +npx ruv-swarm github review-init --pr 123 --parallel --cache-results +``` + +
+ +--- + +## 📖 Additional Resources + +### Related Skills +- `github-pr-manager` - Comprehensive PR lifecycle management +- `github-workflow-automation` - Automate GitHub workflows +- `swarm-coordination` - Advanced swarm orchestration + +### Documentation +- [GitHub CLI Documentation](https://cli.github.com/manual/) +- [RUV Swarm Guide](https://github.com/ruvnet/ruv-swarm) +- [Codex Flow Integration](https://github.com/ruvnet/Codex-flow) + +### Support +- GitHub Issues: Report bugs and request features +- Community: Join discussions and share experiences +- Examples: Browse example configurations and workflows + +--- + +## 📄 License + +This skill is part of the Codex Flow project and is licensed under the MIT License. + +--- + +**Last Updated:** 2025-10-19 +**Version:** 1.0.0 +**Maintainer:** Codex Flow Team diff --git a/.agents/skills/github-multi-repo/SKILL.md b/.agents/skills/github-multi-repo/SKILL.md new file mode 100644 index 0000000..7df526a --- /dev/null +++ b/.agents/skills/github-multi-repo/SKILL.md @@ -0,0 +1,874 @@ +--- +name: github-multi-repo +version: 1.0.0 +description: Multi-repository coordination, synchronization, and architecture management with AI swarm orchestration +category: github-integration +tags: [multi-repo, synchronization, architecture, coordination, github] +author: Codex Flow Team +requires: + - ruv-swarm@^1.0.11 + - gh-cli@^2.0.0 +capabilities: + - cross-repository coordination + - package synchronization + - architecture optimization + - template management + - distributed workflows +--- + +# GitHub Multi-Repository Coordination Skill + +## Overview + +Advanced multi-repository coordination system that combines swarm intelligence, package synchronization, and repository architecture optimization. This skill enables organization-wide automation, cross-project collaboration, and scalable repository management. + +## Core Capabilities + +### 🔄 Multi-Repository Swarm Coordination +Cross-repository AI swarm orchestration for distributed development workflows. + +### 📦 Package Synchronization +Intelligent dependency resolution and version alignment across multiple packages. + +### 🏗️ Repository Architecture +Structure optimization and template management for scalable projects. + +### 🔗 Integration Management +Cross-package integration testing and deployment coordination. + +## Quick Start + +### Initialize Multi-Repo Coordination +```bash +# Basic swarm initialization +npx Codex-flow skill run github-multi-repo init \ + --repos "org/frontend,org/backend,org/shared" \ + --topology hierarchical + +# Advanced initialization with synchronization +npx Codex-flow skill run github-multi-repo init \ + --repos "org/frontend,org/backend,org/shared" \ + --topology mesh \ + --shared-memory \ + --sync-strategy eventual +``` + +### Synchronize Packages +```bash +# Synchronize package versions and dependencies +npx Codex-flow skill run github-multi-repo sync \ + --packages "Codex-flow,ruv-swarm" \ + --align-versions \ + --update-docs +``` + +### Optimize Architecture +```bash +# Analyze and optimize repository structure +npx Codex-flow skill run github-multi-repo optimize \ + --analyze-structure \ + --suggest-improvements \ + --create-templates +``` + +## Features + +### 1. Cross-Repository Swarm Orchestration + +#### Repository Discovery +```javascript +// Auto-discover related repositories with gh CLI +const REPOS = Bash(`gh repo list my-organization --limit 100 \ + --json name,description,languages,topics \ + --jq '.[] | select(.languages | keys | contains(["TypeScript"]))'`) + +// Analyze repository dependencies +const DEPS = Bash(`gh repo list my-organization --json name | \ + jq -r '.[].name' | while read -r repo; do + gh api repos/my-organization/$repo/contents/package.json \ + --jq '.content' 2>/dev/null | base64 -d | jq '{name, dependencies}' + done | jq -s '.'`) + +// Initialize swarm with discovered repositories +mcp__claude-flow__swarm_init({ + topology: "hierarchical", + maxAgents: 8, + metadata: { repos: REPOS, dependencies: DEPS } +}) +``` + +#### Synchronized Operations +```javascript +// Execute synchronized changes across repositories +[Parallel Multi-Repo Operations]: + // Spawn coordination agents + Task("Repository Coordinator", "Coordinate changes across all repositories", "coordinator") + Task("Dependency Analyzer", "Analyze cross-repo dependencies", "analyst") + Task("Integration Tester", "Validate cross-repo changes", "tester") + + // Get matching repositories + Bash(`gh repo list org --limit 100 --json name \ + --jq '.[] | select(.name | test("-service$")) | .name' > /tmp/repos.txt`) + + // Execute task across repositories + Bash(`cat /tmp/repos.txt | while read -r repo; do + gh repo clone org/$repo /tmp/$repo -- --depth=1 + cd /tmp/$repo + + # Apply changes + npm update + npm test + + # Create PR if successful + if [ $? -eq 0 ]; then + git checkout -b update-dependencies-$(date +%Y%m%d) + git add -A + git commit -m "chore: Update dependencies" + git push origin HEAD + gh pr create --title "Update dependencies" --body "Automated update" --label "dependencies" + fi + done`) + + // Track all operations + TodoWrite { todos: [ + { id: "discover", content: "Discover all service repositories", status: "completed" }, + { id: "update", content: "Update dependencies", status: "completed" }, + { id: "test", content: "Run integration tests", status: "in_progress" }, + { id: "pr", content: "Create pull requests", status: "pending" } + ]} +``` + +### 2. Package Synchronization + +#### Version Alignment +```javascript +// Synchronize package dependencies and versions +[Complete Package Sync]: + // Initialize sync swarm + mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 5 }) + + // Spawn sync agents + Task("Sync Coordinator", "Coordinate version alignment", "coordinator") + Task("Dependency Analyzer", "Analyze dependencies", "analyst") + Task("Integration Tester", "Validate synchronization", "tester") + + // Read package states + Read("/workspaces/ruv-FANN/Codex-flow/Codex-flow/package.json") + Read("/workspaces/ruv-FANN/ruv-swarm/npm/package.json") + + // Align versions using gh CLI + Bash(`gh api repos/:owner/:repo/git/refs \ + -f ref='refs/heads/sync/package-alignment' \ + -f sha=$(gh api repos/:owner/:repo/git/refs/heads/main --jq '.object.sha')`) + + // Update package.json files + Bash(`gh api repos/:owner/:repo/contents/package.json \ + --method PUT \ + -f message="feat: Align Node.js version requirements" \ + -f branch="sync/package-alignment" \ + -f content="$(cat aligned-package.json | base64)"`) + + // Store sync state + mcp__claude-flow__memory_usage({ + action: "store", + key: "sync/packages/status", + value: { + timestamp: Date.now(), + packages_synced: ["Codex-flow", "ruv-swarm"], + status: "synchronized" + } + }) +``` + +#### Documentation Synchronization +```javascript +// Synchronize AGENTS.md files across packages +[Documentation Sync]: + // Get source documentation + Bash(`gh api repos/:owner/:repo/contents/ruv-swarm/docs/AGENTS.md \ + --jq '.content' | base64 -d > /tmp/Codex-source.md`) + + // Update target documentation + Bash(`gh api repos/:owner/:repo/contents/Codex-flow/AGENTS.md \ + --method PUT \ + -f message="docs: Synchronize AGENTS.md" \ + -f branch="sync/documentation" \ + -f content="$(cat /tmp/Codex-source.md | base64)"`) + + // Track sync status + mcp__claude-flow__memory_usage({ + action: "store", + key: "sync/documentation/status", + value: { status: "synchronized", files: ["AGENTS.md"] } + }) +``` + +#### Cross-Package Integration +```javascript +// Coordinate feature implementation across packages +[Cross-Package Feature]: + // Push changes to all packages + mcp__github__push_files({ + branch: "feature/github-integration", + files: [ + { + path: "Codex-flow/.Codex/commands/github/github-modes.md", + content: "[GitHub modes documentation]" + }, + { + path: "ruv-swarm/src/github-coordinator/hooks.js", + content: "[GitHub coordination hooks]" + } + ], + message: "feat: Add GitHub workflow integration" + }) + + // Create coordinated PR + Bash(`gh pr create \ + --title "Feature: GitHub Workflow Integration" \ + --body "## 🚀 GitHub Integration + +### Features +- ✅ Multi-repo coordination +- ✅ Package synchronization +- ✅ Architecture optimization + +### Testing +- [x] Package dependency verification +- [x] Integration tests +- [x] Cross-package compatibility"`) +``` + +### 3. Repository Architecture + +#### Structure Analysis +```javascript +// Analyze and optimize repository structure +[Architecture Analysis]: + // Initialize architecture swarm + mcp__claude-flow__swarm_init({ topology: "hierarchical", maxAgents: 6 }) + + // Spawn architecture agents + Task("Senior Architect", "Analyze repository structure", "architect") + Task("Structure Analyst", "Identify optimization opportunities", "analyst") + Task("Performance Optimizer", "Optimize structure for scalability", "optimizer") + Task("Best Practices Researcher", "Research architecture patterns", "researcher") + + // Analyze current structures + LS("/workspaces/ruv-FANN/Codex-flow/Codex-flow") + LS("/workspaces/ruv-FANN/ruv-swarm/npm") + + // Search for best practices + Bash(`gh search repos "language:javascript template architecture" \ + --limit 10 \ + --json fullName,description,stargazersCount \ + --sort stars \ + --order desc`) + + // Store analysis results + mcp__claude-flow__memory_usage({ + action: "store", + key: "architecture/analysis/results", + value: { + repositories_analyzed: ["Codex-flow", "ruv-swarm"], + optimization_areas: ["structure", "workflows", "templates"], + recommendations: ["standardize_structure", "improve_workflows"] + } + }) +``` + +#### Template Creation +```javascript +// Create standardized repository template +[Template Creation]: + // Create template repository + mcp__github__create_repository({ + name: "Codex-project-template", + description: "Standardized template for Codex projects", + private: false, + autoInit: true + }) + + // Push template structure + mcp__github__push_files({ + repo: "Codex-project-template", + files: [ + { + path: ".Codex/commands/github/github-modes.md", + content: "[GitHub modes template]" + }, + { + path: ".Codex/config.json", + content: JSON.stringify({ + version: "1.0", + mcp_servers: { + "ruv-swarm": { + command: "npx", + args: ["ruv-swarm", "mcp", "start"] + } + } + }) + }, + { + path: "AGENTS.md", + content: "[Standardized AGENTS.md]" + }, + { + path: "package.json", + content: JSON.stringify({ + name: "Codex-project-template", + engines: { node: ">=20.0.0" }, + dependencies: { "ruv-swarm": "^1.0.11" } + }) + } + ], + message: "feat: Create standardized template" + }) +``` + +#### Cross-Repository Standardization +```javascript +// Synchronize structure across repositories +[Structure Standardization]: + const repositories = ["Codex-flow", "ruv-swarm", "Codex-extensions"] + + // Update common files across all repositories + repositories.forEach(repo => { + mcp__github__create_or_update_file({ + repo: "ruv-FANN", + path: `${repo}/.github/workflows/integration.yml`, + content: `name: Integration Tests +on: [push, pull_request] +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-node@v3 + with: { node-version: '20' } + - run: npm install && npm test`, + message: "ci: Standardize integration workflow", + branch: "structure/standardization" + }) + }) +``` + +### 4. Orchestration Workflows + +#### Dependency Management +```javascript +// Update dependencies across all repositories +[Organization-Wide Dependency Update]: + // Create tracking issue + TRACKING_ISSUE=$(Bash(`gh issue create \ + --title "Dependency Update: typescript@5.0.0" \ + --body "Tracking TypeScript update across all repositories" \ + --label "dependencies,tracking" \ + --json number -q .number`)) + + // Find all TypeScript repositories + TS_REPOS=$(Bash(`gh repo list org --limit 100 --json name | \ + jq -r '.[].name' | while read -r repo; do + if gh api repos/org/$repo/contents/package.json 2>/dev/null | \ + jq -r '.content' | base64 -d | grep -q '"typescript"'; then + echo "$repo" + fi + done`)) + + // Update each repository + Bash(`echo "$TS_REPOS" | while read -r repo; do + gh repo clone org/$repo /tmp/$repo -- --depth=1 + cd /tmp/$repo + + npm install --save-dev typescript@5.0.0 + + if npm test; then + git checkout -b update-typescript-5 + git add package.json package-lock.json + git commit -m "chore: Update TypeScript to 5.0.0 + +Part of #$TRACKING_ISSUE" + + git push origin HEAD + gh pr create \ + --title "Update TypeScript to 5.0.0" \ + --body "Updates TypeScript\n\nTracking: #$TRACKING_ISSUE" \ + --label "dependencies" + else + gh issue comment $TRACKING_ISSUE \ + --body "❌ Failed to update $repo - tests failing" + fi + done`) +``` + +#### Refactoring Operations +```javascript +// Coordinate large-scale refactoring +[Cross-Repo Refactoring]: + // Initialize refactoring swarm + mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 8 }) + + // Spawn specialized agents + Task("Refactoring Coordinator", "Coordinate refactoring across repos", "coordinator") + Task("Impact Analyzer", "Analyze refactoring impact", "analyst") + Task("Code Transformer", "Apply refactoring changes", "coder") + Task("Migration Guide Creator", "Create migration documentation", "documenter") + Task("Integration Tester", "Validate refactored code", "tester") + + // Execute refactoring + mcp__claude-flow__task_orchestrate({ + task: "Rename OldAPI to NewAPI across all repositories", + strategy: "sequential", + priority: "high" + }) +``` + +#### Security Updates +```javascript +// Coordinate security patches +[Security Patch Deployment]: + // Scan all repositories + Bash(`gh repo list org --limit 100 --json name | jq -r '.[].name' | \ + while read -r repo; do + gh repo clone org/$repo /tmp/$repo -- --depth=1 + cd /tmp/$repo + npm audit --json > /tmp/audit-$repo.json + done`) + + // Apply patches + Bash(`for repo in /tmp/audit-*.json; do + if [ $(jq '.vulnerabilities | length' $repo) -gt 0 ]; then + cd /tmp/$(basename $repo .json | sed 's/audit-//') + npm audit fix + + if npm test; then + git checkout -b security/patch-$(date +%Y%m%d) + git add -A + git commit -m "security: Apply security patches" + git push origin HEAD + gh pr create --title "Security patches" --label "security" + fi + fi + done`) +``` + +## Configuration + +### Multi-Repo Config File +```yaml +# .swarm/multi-repo.yml +version: 1 +organization: my-org + +repositories: + - name: frontend + url: github.com/my-org/frontend + role: ui + agents: [coder, designer, tester] + + - name: backend + url: github.com/my-org/backend + role: api + agents: [architect, coder, tester] + + - name: shared + url: github.com/my-org/shared + role: library + agents: [analyst, coder] + +coordination: + topology: hierarchical + communication: webhook + memory: redis://shared-memory + +dependencies: + - from: frontend + to: [backend, shared] + - from: backend + to: [shared] +``` + +### Repository Roles +```javascript +{ + "roles": { + "ui": { + "responsibilities": ["user-interface", "ux", "accessibility"], + "default-agents": ["designer", "coder", "tester"] + }, + "api": { + "responsibilities": ["endpoints", "business-logic", "data"], + "default-agents": ["architect", "coder", "security"] + }, + "library": { + "responsibilities": ["shared-code", "utilities", "types"], + "default-agents": ["analyst", "coder", "documenter"] + } + } +} +``` + +## Communication Strategies + +### 1. Webhook-Based Coordination +```javascript +const { MultiRepoSwarm } = require('ruv-swarm'); + +const swarm = new MultiRepoSwarm({ + webhook: { + url: 'https://swarm-coordinator.example.com', + secret: process.env.WEBHOOK_SECRET + } +}); + +swarm.on('repo:update', async (event) => { + await swarm.propagate(event, { + to: event.dependencies, + strategy: 'eventual-consistency' + }); +}); +``` + +### 2. Event Streaming +```yaml +# Kafka configuration for real-time coordination +kafka: + brokers: ['kafka1:9092', 'kafka2:9092'] + topics: + swarm-events: + partitions: 10 + replication: 3 + swarm-memory: + partitions: 5 + replication: 3 +``` + +## Synchronization Patterns + +### 1. Eventually Consistent +```javascript +{ + "sync": { + "strategy": "eventual", + "max-lag": "5m", + "retry": { + "attempts": 3, + "backoff": "exponential" + } + } +} +``` + +### 2. Strong Consistency +```javascript +{ + "sync": { + "strategy": "strong", + "consensus": "raft", + "quorum": 0.51, + "timeout": "30s" + } +} +``` + +### 3. Hybrid Approach +```javascript +{ + "sync": { + "default": "eventual", + "overrides": { + "security-updates": "strong", + "dependency-updates": "strong", + "documentation": "eventual" + } + } +} +``` + +## Use Cases + +### 1. Microservices Coordination +```bash +npx Codex-flow skill run github-multi-repo microservices \ + --services "auth,users,orders,payments" \ + --ensure-compatibility \ + --sync-contracts \ + --integration-tests +``` + +### 2. Library Updates +```bash +npx Codex-flow skill run github-multi-repo lib-update \ + --library "org/shared-lib" \ + --version "2.0.0" \ + --find-consumers \ + --update-imports \ + --run-tests +``` + +### 3. Organization-Wide Changes +```bash +npx Codex-flow skill run github-multi-repo org-policy \ + --policy "add-security-headers" \ + --repos "org/*" \ + --validate-compliance \ + --create-reports +``` + +## Architecture Patterns + +### Monorepo Structure +``` +ruv-FANN/ +├── packages/ +│ ├── Codex-flow/ +│ │ ├── src/ +│ │ ├── .Codex/ +│ │ └── package.json +│ ├── ruv-swarm/ +│ │ ├── src/ +│ │ ├── wasm/ +│ │ └── package.json +│ └── shared/ +│ ├── types/ +│ ├── utils/ +│ └── config/ +├── tools/ +│ ├── build/ +│ ├── test/ +│ └── deploy/ +├── docs/ +│ ├── architecture/ +│ ├── integration/ +│ └── examples/ +└── .github/ + ├── workflows/ + ├── templates/ + └── actions/ +``` + +### Command Structure +``` +.Codex/ +├── commands/ +│ ├── github/ +│ │ ├── github-modes.md +│ │ ├── pr-manager.md +│ │ ├── issue-tracker.md +│ │ └── sync-coordinator.md +│ ├── sparc/ +│ │ ├── sparc-modes.md +│ │ ├── coder.md +│ │ └── tester.md +│ └── swarm/ +│ ├── coordination.md +│ └── orchestration.md +├── templates/ +│ ├── issue.md +│ ├── pr.md +│ └── project.md +└── config.json +``` + +## Monitoring & Visualization + +### Multi-Repo Dashboard +```bash +npx Codex-flow skill run github-multi-repo dashboard \ + --port 3000 \ + --metrics "agent-activity,task-progress,memory-usage" \ + --real-time +``` + +### Dependency Graph +```bash +npx Codex-flow skill run github-multi-repo dep-graph \ + --format mermaid \ + --include-agents \ + --show-data-flow +``` + +### Health Monitoring +```bash +npx Codex-flow skill run github-multi-repo health-check \ + --repos "org/*" \ + --check "connectivity,memory,agents" \ + --alert-on-issues +``` + +## Best Practices + +### 1. Repository Organization +- Clear repository roles and boundaries +- Consistent naming conventions +- Documented dependencies +- Shared configuration standards + +### 2. Communication +- Use appropriate sync strategies +- Implement circuit breakers +- Monitor latency and failures +- Clear error propagation + +### 3. Security +- Secure cross-repo authentication +- Encrypted communication channels +- Audit trail for all operations +- Principle of least privilege + +### 4. Version Management +- Semantic versioning alignment +- Dependency compatibility validation +- Automated version bump coordination + +### 5. Testing Integration +- Cross-package test validation +- Integration test automation +- Performance regression detection + +## Performance Optimization + +### Caching Strategy +```bash +npx Codex-flow skill run github-multi-repo cache-strategy \ + --analyze-patterns \ + --suggest-cache-layers \ + --implement-invalidation +``` + +### Parallel Execution +```bash +npx Codex-flow skill run github-multi-repo parallel-optimize \ + --analyze-dependencies \ + --identify-parallelizable \ + --execute-optimal +``` + +### Resource Pooling +```bash +npx Codex-flow skill run github-multi-repo resource-pool \ + --share-agents \ + --distribute-load \ + --monitor-usage +``` + +## Troubleshooting + +### Connectivity Issues +```bash +npx Codex-flow skill run github-multi-repo diagnose-connectivity \ + --test-all-repos \ + --check-permissions \ + --verify-webhooks +``` + +### Memory Synchronization +```bash +npx Codex-flow skill run github-multi-repo debug-memory \ + --check-consistency \ + --identify-conflicts \ + --repair-state +``` + +### Performance Bottlenecks +```bash +npx Codex-flow skill run github-multi-repo perf-analysis \ + --profile-operations \ + --identify-bottlenecks \ + --suggest-optimizations +``` + +## Advanced Features + +### 1. Distributed Task Queue +```bash +npx Codex-flow skill run github-multi-repo queue \ + --backend redis \ + --workers 10 \ + --priority-routing \ + --dead-letter-queue +``` + +### 2. Cross-Repo Testing +```bash +npx Codex-flow skill run github-multi-repo test \ + --setup-test-env \ + --link-services \ + --run-e2e \ + --tear-down +``` + +### 3. Monorepo Migration +```bash +npx Codex-flow skill run github-multi-repo to-monorepo \ + --analyze-repos \ + --suggest-structure \ + --preserve-history \ + --create-migration-prs +``` + +## Examples + +### Full-Stack Application Update +```bash +npx Codex-flow skill run github-multi-repo fullstack-update \ + --frontend "org/web-app" \ + --backend "org/api-server" \ + --database "org/db-migrations" \ + --coordinate-deployment +``` + +### Cross-Team Collaboration +```bash +npx Codex-flow skill run github-multi-repo cross-team \ + --teams "frontend,backend,devops" \ + --task "implement-feature-x" \ + --assign-by-expertise \ + --track-progress +``` + +## Metrics and Reporting + +### Sync Quality Metrics +- Package version alignment percentage +- Documentation consistency score +- Integration test success rate +- Synchronization completion time + +### Architecture Health Metrics +- Repository structure consistency score +- Documentation coverage percentage +- Cross-repository integration success rate +- Template adoption and usage statistics + +### Automated Reporting +- Weekly sync status reports +- Dependency drift detection +- Documentation divergence alerts +- Integration health monitoring + +## Integration Points + +### Related Skills +- `github-workflow` - GitHub workflow automation +- `github-pr` - Pull request management +- `sparc-architect` - Architecture design +- `sparc-optimizer` - Performance optimization + +### Related Commands +- `/github sync-coordinator` - Cross-repo synchronization +- `/github release-manager` - Coordinated releases +- `/github repo-architect` - Repository optimization +- `/sparc architect` - Detailed architecture design + +## Support and Resources + +- Documentation: https://github.com/ruvnet/Codex-flow +- Issues: https://github.com/ruvnet/Codex-flow/issues +- Examples: `.Codex/examples/github-multi-repo/` + +--- + +**Version:** 1.0.0 +**Last Updated:** 2025-10-19 +**Maintainer:** Codex Flow Team diff --git a/.agents/skills/github-project-management/SKILL.md b/.agents/skills/github-project-management/SKILL.md new file mode 100644 index 0000000..1ea768d --- /dev/null +++ b/.agents/skills/github-project-management/SKILL.md @@ -0,0 +1,1277 @@ +--- +name: github-project-management +title: GitHub Project Management +version: 2.0.0 +category: github +description: Comprehensive GitHub project management with swarm-coordinated issue tracking, project board automation, and sprint planning +author: Codex +tags: + - github + - project-management + - issue-tracking + - project-boards + - sprint-planning + - agile + - swarm-coordination +difficulty: intermediate +prerequisites: + - GitHub CLI (gh) installed and authenticated + - ruv-swarm or Codex-flow MCP server configured + - Repository access permissions +tools_required: + - mcp__github__* + - mcp__claude-flow__* + - Bash + - Read + - Write + - TodoWrite +related_skills: + - github-pr-workflow + - github-release-management + - sparc-orchestrator +estimated_time: 30-45 minutes +--- + +# GitHub Project Management + +## Overview + +A comprehensive skill for managing GitHub projects using AI swarm coordination. This skill combines intelligent issue management, automated project board synchronization, and swarm-based coordination for efficient project delivery. + +## Quick Start + +### Basic Issue Creation with Swarm Coordination + +```bash +# Create a coordinated issue +gh issue create \ + --title "Feature: Advanced Authentication" \ + --body "Implement OAuth2 with social login..." \ + --label "enhancement,swarm-ready" + +# Initialize swarm for issue +npx Codex-flow@alpha hooks pre-task --description "Feature implementation" +``` + +### Project Board Quick Setup + +```bash +# Get project ID +PROJECT_ID=$(gh project list --owner @me --format json | \ + jq -r '.projects[0].id') + +# Initialize board sync +npx ruv-swarm github board-init \ + --project-id "$PROJECT_ID" \ + --sync-mode "bidirectional" +``` + +--- + +## Core Capabilities + +### 1. Issue Management & Triage + +
+Automated Issue Creation + +#### Single Issue with Swarm Coordination + +```javascript +// Initialize issue management swarm +mcp__claude-flow__swarm_init { topology: "star", maxAgents: 3 } +mcp__claude-flow__agent_spawn { type: "coordinator", name: "Issue Coordinator" } +mcp__claude-flow__agent_spawn { type: "researcher", name: "Requirements Analyst" } +mcp__claude-flow__agent_spawn { type: "coder", name: "Implementation Planner" } + +// Create comprehensive issue +mcp__github__create_issue { + owner: "org", + repo: "repository", + title: "Integration Review: Complete system integration", + body: `## 🔄 Integration Review + + ### Overview + Comprehensive review and integration between components. + + ### Objectives + - [ ] Verify dependencies and imports + - [ ] Ensure API integration + - [ ] Check hook system integration + - [ ] Validate data systems alignment + + ### Swarm Coordination + This issue will be managed by coordinated swarm agents for optimal progress tracking.`, + labels: ["integration", "review", "enhancement"], + assignees: ["username"] +} + +// Set up automated tracking +mcp__claude-flow__task_orchestrate { + task: "Monitor and coordinate issue progress with automated updates", + strategy: "adaptive", + priority: "medium" +} +``` + +#### Batch Issue Creation + +```bash +# Create multiple related issues using gh CLI +gh issue create \ + --title "Feature: Advanced GitHub Integration" \ + --body "Implement comprehensive GitHub workflow automation..." \ + --label "feature,github,high-priority" + +gh issue create \ + --title "Bug: Merge conflicts in integration branch" \ + --body "Resolve merge conflicts..." \ + --label "bug,integration,urgent" + +gh issue create \ + --title "Documentation: Update integration guides" \ + --body "Update all documentation..." \ + --label "documentation,integration" +``` + +
+ +
+Issue-to-Swarm Conversion + +#### Transform Issues into Swarm Tasks + +```bash +# Get issue details +ISSUE_DATA=$(gh issue view 456 --json title,body,labels,assignees,comments) + +# Create swarm from issue +npx ruv-swarm github issue-to-swarm 456 \ + --issue-data "$ISSUE_DATA" \ + --auto-decompose \ + --assign-agents + +# Batch process multiple issues +ISSUES=$(gh issue list --label "swarm-ready" --json number,title,body,labels) +npx ruv-swarm github issues-batch \ + --issues "$ISSUES" \ + --parallel + +# Update issues with swarm status +echo "$ISSUES" | jq -r '.[].number' | while read -r num; do + gh issue edit $num --add-label "swarm-processing" +done +``` + +#### Issue Comment Commands + +Execute swarm operations via issue comments: + +```markdown + +/swarm analyze +/swarm decompose 5 +/swarm assign @agent-coder +/swarm estimate +/swarm start +``` + +
+ +
+Automated Issue Triage + +#### Auto-Label Based on Content + +```javascript +// .github/swarm-labels.json +{ + "rules": [ + { + "keywords": ["bug", "error", "broken"], + "labels": ["bug", "swarm-debugger"], + "agents": ["debugger", "tester"] + }, + { + "keywords": ["feature", "implement", "add"], + "labels": ["enhancement", "swarm-feature"], + "agents": ["architect", "coder", "tester"] + }, + { + "keywords": ["slow", "performance", "optimize"], + "labels": ["performance", "swarm-optimizer"], + "agents": ["analyst", "optimizer"] + } + ] +} +``` + +#### Automated Triage System + +```bash +# Analyze and triage unlabeled issues +npx ruv-swarm github triage \ + --unlabeled \ + --analyze-content \ + --suggest-labels \ + --assign-priority + +# Find and link duplicate issues +npx ruv-swarm github find-duplicates \ + --threshold 0.8 \ + --link-related \ + --close-duplicates +``` + +
+ +
+Task Decomposition & Progress Tracking + +#### Break Down Issues into Subtasks + +```bash +# Get issue body +ISSUE_BODY=$(gh issue view 456 --json body --jq '.body') + +# Decompose into subtasks +SUBTASKS=$(npx ruv-swarm github issue-decompose 456 \ + --body "$ISSUE_BODY" \ + --max-subtasks 10 \ + --assign-priorities) + +# Update issue with checklist +CHECKLIST=$(echo "$SUBTASKS" | jq -r '.tasks[] | "- [ ] " + .description') +UPDATED_BODY="$ISSUE_BODY + +## Subtasks +$CHECKLIST" + +gh issue edit 456 --body "$UPDATED_BODY" + +# Create linked issues for major subtasks +echo "$SUBTASKS" | jq -r '.tasks[] | select(.priority == "high")' | while read -r task; do + TITLE=$(echo "$task" | jq -r '.title') + BODY=$(echo "$task" | jq -r '.description') + + gh issue create \ + --title "$TITLE" \ + --body "$BODY + +Parent issue: #456" \ + --label "subtask" +done +``` + +#### Automated Progress Updates + +```bash +# Get current issue state +CURRENT=$(gh issue view 456 --json body,labels) + +# Get swarm progress +PROGRESS=$(npx ruv-swarm github issue-progress 456) + +# Update checklist in issue body +UPDATED_BODY=$(echo "$CURRENT" | jq -r '.body' | \ + npx ruv-swarm github update-checklist --progress "$PROGRESS") + +# Edit issue with updated body +gh issue edit 456 --body "$UPDATED_BODY" + +# Post progress summary as comment +SUMMARY=$(echo "$PROGRESS" | jq -r ' +"## 📊 Progress Update + +**Completion**: \(.completion)% +**ETA**: \(.eta) + +### Completed Tasks +\(.completed | map("- ✅ " + .) | join("\n")) + +### In Progress +\(.in_progress | map("- 🔄 " + .) | join("\n")) + +### Remaining +\(.remaining | map("- ⏳ " + .) | join("\n")) + +--- +🤖 Automated update by swarm agent"') + +gh issue comment 456 --body "$SUMMARY" + +# Update labels based on progress +if [[ $(echo "$PROGRESS" | jq -r '.completion') -eq 100 ]]; then + gh issue edit 456 --add-label "ready-for-review" --remove-label "in-progress" +fi +``` + +
+ +
+Stale Issue Management + +#### Auto-Close Stale Issues with Swarm Analysis + +```bash +# Find stale issues +STALE_DATE=$(date -d '30 days ago' --iso-8601) +STALE_ISSUES=$(gh issue list --state open --json number,title,updatedAt,labels \ + --jq ".[] | select(.updatedAt < \"$STALE_DATE\")") + +# Analyze each stale issue +echo "$STALE_ISSUES" | jq -r '.number' | while read -r num; do + # Get full issue context + ISSUE=$(gh issue view $num --json title,body,comments,labels) + + # Analyze with swarm + ACTION=$(npx ruv-swarm github analyze-stale \ + --issue "$ISSUE" \ + --suggest-action) + + case "$ACTION" in + "close") + gh issue comment $num --body "This issue has been inactive for 30 days and will be closed in 7 days if there's no further activity." + gh issue edit $num --add-label "stale" + ;; + "keep") + gh issue edit $num --remove-label "stale" 2>/dev/null || true + ;; + "needs-info") + gh issue comment $num --body "This issue needs more information. Please provide additional context or it may be closed as stale." + gh issue edit $num --add-label "needs-info" + ;; + esac +done + +# Close issues that have been stale for 37+ days +gh issue list --label stale --state open --json number,updatedAt \ + --jq ".[] | select(.updatedAt < \"$(date -d '37 days ago' --iso-8601)\") | .number" | \ + while read -r num; do + gh issue close $num --comment "Closing due to inactivity. Feel free to reopen if this is still relevant." + done +``` + +
+ +### 2. Project Board Automation + +
+Board Initialization & Configuration + +#### Connect Swarm to GitHub Project + +```bash +# Get project details +PROJECT_ID=$(gh project list --owner @me --format json | \ + jq -r '.projects[] | select(.title == "Development Board") | .id') + +# Initialize swarm with project +npx ruv-swarm github board-init \ + --project-id "$PROJECT_ID" \ + --sync-mode "bidirectional" \ + --create-views "swarm-status,agent-workload,priority" + +# Create project fields for swarm tracking +gh project field-create $PROJECT_ID --owner @me \ + --name "Swarm Status" \ + --data-type "SINGLE_SELECT" \ + --single-select-options "pending,in_progress,completed" +``` + +#### Board Mapping Configuration + +```yaml +# .github/board-sync.yml +version: 1 +project: + name: "AI Development Board" + number: 1 + +mapping: + # Map swarm task status to board columns + status: + pending: "Backlog" + assigned: "Ready" + in_progress: "In Progress" + review: "Review" + completed: "Done" + blocked: "Blocked" + + # Map agent types to labels + agents: + coder: "🔧 Development" + tester: "🧪 Testing" + analyst: "📊 Analysis" + designer: "🎨 Design" + architect: "🏗️ Architecture" + + # Map priority to project fields + priority: + critical: "🔴 Critical" + high: "🟡 High" + medium: "🟢 Medium" + low: "⚪ Low" + + # Custom fields + fields: + - name: "Agent Count" + type: number + source: task.agents.length + - name: "Complexity" + type: select + source: task.complexity + - name: "ETA" + type: date + source: task.estimatedCompletion +``` + +
+ +
+Task Synchronization + +#### Real-time Board Sync + +```bash +# Sync swarm tasks with project cards +npx ruv-swarm github board-sync \ + --map-status '{ + "todo": "To Do", + "in_progress": "In Progress", + "review": "Review", + "done": "Done" + }' \ + --auto-move-cards \ + --update-metadata + +# Enable real-time board updates +npx ruv-swarm github board-realtime \ + --webhook-endpoint "https://api.example.com/github-sync" \ + --update-frequency "immediate" \ + --batch-updates false +``` + +#### Convert Issues to Project Cards + +```bash +# List issues with label +ISSUES=$(gh issue list --label "enhancement" --json number,title,body) + +# Add issues to project +echo "$ISSUES" | jq -r '.[].number' | while read -r issue; do + gh project item-add $PROJECT_ID --owner @me --url "https://github.com/$GITHUB_REPOSITORY/issues/$issue" +done + +# Process with swarm +npx ruv-swarm github board-import-issues \ + --issues "$ISSUES" \ + --add-to-column "Backlog" \ + --parse-checklist \ + --assign-agents +``` + +
+ +
+Smart Card Management + +#### Auto-Assignment + +```bash +# Automatically assign cards to agents +npx ruv-swarm github board-auto-assign \ + --strategy "load-balanced" \ + --consider "expertise,workload,availability" \ + --update-cards +``` + +#### Intelligent Card State Transitions + +```bash +# Smart card movement based on rules +npx ruv-swarm github board-smart-move \ + --rules '{ + "auto-progress": "when:all-subtasks-done", + "auto-review": "when:tests-pass", + "auto-done": "when:pr-merged" + }' +``` + +#### Bulk Operations + +```bash +# Bulk card operations +npx ruv-swarm github board-bulk \ + --filter "status:blocked" \ + --action "add-label:needs-attention" \ + --notify-assignees +``` + +
+ +
+Custom Views & Dashboards + +#### View Configuration + +```javascript +// Custom board views +{ + "views": [ + { + "name": "Swarm Overview", + "type": "board", + "groupBy": "status", + "filters": ["is:open"], + "sort": "priority:desc" + }, + { + "name": "Agent Workload", + "type": "table", + "groupBy": "assignedAgent", + "columns": ["title", "status", "priority", "eta"], + "sort": "eta:asc" + }, + { + "name": "Sprint Progress", + "type": "roadmap", + "dateField": "eta", + "groupBy": "milestone" + } + ] +} +``` + +#### Dashboard Configuration + +```javascript +// Dashboard with performance widgets +{ + "dashboard": { + "widgets": [ + { + "type": "chart", + "title": "Task Completion Rate", + "data": "completed-per-day", + "visualization": "line" + }, + { + "type": "gauge", + "title": "Sprint Progress", + "data": "sprint-completion", + "target": 100 + }, + { + "type": "heatmap", + "title": "Agent Activity", + "data": "agent-tasks-per-day" + } + ] + } +} +``` + +
+ +### 3. Sprint Planning & Tracking + +
+Sprint Management + +#### Initialize Sprint with Swarm Coordination + +```bash +# Manage sprints with swarms +npx ruv-swarm github sprint-manage \ + --sprint "Sprint 23" \ + --auto-populate \ + --capacity-planning \ + --track-velocity + +# Track milestone progress +npx ruv-swarm github milestone-track \ + --milestone "v2.0 Release" \ + --update-board \ + --show-dependencies \ + --predict-completion +``` + +#### Agile Development Board Setup + +```bash +# Setup agile board +npx ruv-swarm github agile-board \ + --methodology "scrum" \ + --sprint-length "2w" \ + --ceremonies "planning,review,retro" \ + --metrics "velocity,burndown" +``` + +#### Kanban Flow Board Setup + +```bash +# Setup kanban board +npx ruv-swarm github kanban-board \ + --wip-limits '{ + "In Progress": 5, + "Review": 3 + }' \ + --cycle-time-tracking \ + --continuous-flow +``` + +
+ +
+Progress Tracking & Analytics + +#### Board Analytics + +```bash +# Fetch project data +PROJECT_DATA=$(gh project item-list $PROJECT_ID --owner @me --format json) + +# Get issue metrics +ISSUE_METRICS=$(echo "$PROJECT_DATA" | jq -r '.items[] | select(.content.type == "Issue")' | \ + while read -r item; do + ISSUE_NUM=$(echo "$item" | jq -r '.content.number') + gh issue view $ISSUE_NUM --json createdAt,closedAt,labels,assignees + done) + +# Generate analytics with swarm +npx ruv-swarm github board-analytics \ + --project-data "$PROJECT_DATA" \ + --issue-metrics "$ISSUE_METRICS" \ + --metrics "throughput,cycle-time,wip" \ + --group-by "agent,priority,type" \ + --time-range "30d" \ + --export "dashboard" +``` + +#### Performance Reports + +```bash +# Track and visualize progress +npx ruv-swarm github board-progress \ + --show "burndown,velocity,cycle-time" \ + --time-period "sprint" \ + --export-metrics + +# Generate reports +npx ruv-swarm github board-report \ + --type "sprint-summary" \ + --format "markdown" \ + --include "velocity,burndown,blockers" \ + --distribute "slack,email" +``` + +#### KPI Tracking + +```bash +# Track board performance +npx ruv-swarm github board-kpis \ + --metrics '[ + "average-cycle-time", + "throughput-per-sprint", + "blocked-time-percentage", + "first-time-pass-rate" + ]' \ + --dashboard-url + +# Track team performance +npx ruv-swarm github team-metrics \ + --board "Development" \ + --per-member \ + --include "velocity,quality,collaboration" \ + --anonymous-option +``` + +
+ +
+Release Planning + +#### Release Coordination + +```bash +# Plan releases using board data +npx ruv-swarm github release-plan-board \ + --analyze-velocity \ + --estimate-completion \ + --identify-risks \ + --optimize-scope +``` + +
+ +### 4. Advanced Coordination + +
+Multi-Board Synchronization + +#### Cross-Board Sync + +```bash +# Sync across multiple boards +npx ruv-swarm github multi-board-sync \ + --boards "Development,QA,Release" \ + --sync-rules '{ + "Development->QA": "when:ready-for-test", + "QA->Release": "when:tests-pass" + }' + +# Cross-organization sync +npx ruv-swarm github cross-org-sync \ + --source "org1/Project-A" \ + --target "org2/Project-B" \ + --field-mapping "custom" \ + --conflict-resolution "source-wins" +``` + +
+ +
+Issue Dependencies & Epic Management + +#### Dependency Resolution + +```bash +# Handle issue dependencies +npx ruv-swarm github issue-deps 456 \ + --resolve-order \ + --parallel-safe \ + --update-blocking +``` + +#### Epic Coordination + +```bash +# Coordinate epic-level swarms +npx ruv-swarm github epic-swarm \ + --epic 123 \ + --child-issues "456,457,458" \ + --orchestrate +``` + +
+ +
+Cross-Repository Coordination + +#### Multi-Repo Issue Management + +```bash +# Handle issues across repositories +npx ruv-swarm github cross-repo \ + --issue "org/repo#456" \ + --related "org/other-repo#123" \ + --coordinate +``` + +
+ +
+Team Collaboration + +#### Work Distribution + +```bash +# Distribute work among team +npx ruv-swarm github board-distribute \ + --strategy "skills-based" \ + --balance-workload \ + --respect-preferences \ + --notify-assignments +``` + +#### Standup Automation + +```bash +# Generate standup reports +npx ruv-swarm github standup-report \ + --team "frontend" \ + --include "yesterday,today,blockers" \ + --format "slack" \ + --schedule "daily-9am" +``` + +#### Review Coordination + +```bash +# Coordinate reviews via board +npx ruv-swarm github review-coordinate \ + --board "Code Review" \ + --assign-reviewers \ + --track-feedback \ + --ensure-coverage +``` + +
+ +--- + +## Issue Templates + +### Integration Issue Template + +```markdown +## 🔄 Integration Task + +### Overview +[Brief description of integration requirements] + +### Objectives +- [ ] Component A integration +- [ ] Component B validation +- [ ] Testing and verification +- [ ] Documentation updates + +### Integration Areas +#### Dependencies +- [ ] Package.json updates +- [ ] Version compatibility +- [ ] Import statements + +#### Functionality +- [ ] Core feature integration +- [ ] API compatibility +- [ ] Performance validation + +#### Testing +- [ ] Unit tests +- [ ] Integration tests +- [ ] End-to-end validation + +### Swarm Coordination +- **Coordinator**: Overall progress tracking +- **Analyst**: Technical validation +- **Tester**: Quality assurance +- **Documenter**: Documentation updates + +### Progress Tracking +Updates will be posted automatically by swarm agents during implementation. + +--- +🤖 Generated with Codex +``` + +### Bug Report Template + +```markdown +## 🐛 Bug Report + +### Problem Description +[Clear description of the issue] + +### Expected Behavior +[What should happen] + +### Actual Behavior +[What actually happens] + +### Reproduction Steps +1. [Step 1] +2. [Step 2] +3. [Step 3] + +### Environment +- Package: [package name and version] +- Node.js: [version] +- OS: [operating system] + +### Investigation Plan +- [ ] Root cause analysis +- [ ] Fix implementation +- [ ] Testing and validation +- [ ] Regression testing + +### Swarm Assignment +- **Debugger**: Issue investigation +- **Coder**: Fix implementation +- **Tester**: Validation and testing + +--- +🤖 Generated with Codex +``` + +### Feature Request Template + +```markdown +## ✨ Feature Request + +### Feature Description +[Clear description of the proposed feature] + +### Use Cases +1. [Use case 1] +2. [Use case 2] +3. [Use case 3] + +### Acceptance Criteria +- [ ] Criterion 1 +- [ ] Criterion 2 +- [ ] Criterion 3 + +### Implementation Approach +#### Design +- [ ] Architecture design +- [ ] API design +- [ ] UI/UX mockups + +#### Development +- [ ] Core implementation +- [ ] Integration with existing features +- [ ] Performance optimization + +#### Testing +- [ ] Unit tests +- [ ] Integration tests +- [ ] User acceptance testing + +### Swarm Coordination +- **Architect**: Design and planning +- **Coder**: Implementation +- **Tester**: Quality assurance +- **Documenter**: Documentation + +--- +🤖 Generated with Codex +``` + +### Swarm Task Template + +```markdown + +name: Swarm Task +description: Create a task for AI swarm processing +body: + - type: dropdown + id: topology + attributes: + label: Swarm Topology + options: + - mesh + - hierarchical + - ring + - star + - type: input + id: agents + attributes: + label: Required Agents + placeholder: "coder, tester, analyst" + - type: textarea + id: tasks + attributes: + label: Task Breakdown + placeholder: | + 1. Task one description + 2. Task two description +``` + +--- + +## Workflow Integration + +### GitHub Actions for Issue Management + +```yaml +# .github/workflows/issue-swarm.yml +name: Issue Swarm Handler +on: + issues: + types: [opened, labeled, commented] + +jobs: + swarm-process: + runs-on: ubuntu-latest + steps: + - name: Process Issue + uses: ruvnet/swarm-action@v1 + with: + command: | + if [[ "${{ github.event.label.name }}" == "swarm-ready" ]]; then + npx ruv-swarm github issue-init ${{ github.event.issue.number }} + fi +``` + +### Board Integration Workflow + +```bash +# Sync with project board +npx ruv-swarm github issue-board-sync \ + --project "Development" \ + --column-mapping '{ + "To Do": "pending", + "In Progress": "active", + "Done": "completed" + }' +``` + +--- + +## Specialized Issue Strategies + +### Bug Investigation Swarm + +```bash +# Specialized bug handling +npx ruv-swarm github bug-swarm 456 \ + --reproduce \ + --isolate \ + --fix \ + --test +``` + +### Feature Implementation Swarm + +```bash +# Feature implementation swarm +npx ruv-swarm github feature-swarm 456 \ + --design \ + --implement \ + --document \ + --demo +``` + +### Technical Debt Refactoring + +```bash +# Refactoring swarm +npx ruv-swarm github debt-swarm 456 \ + --analyze-impact \ + --plan-migration \ + --execute \ + --validate +``` + +--- + +## Best Practices + +### 1. Swarm-Coordinated Issue Management +- Always initialize swarm for complex issues +- Assign specialized agents based on issue type +- Use memory for progress coordination +- Regular automated progress updates + +### 2. Board Organization +- Clear column definitions with consistent naming +- Systematic labeling strategy across repositories +- Regular board grooming and maintenance +- Well-defined automation rules + +### 3. Data Integrity +- Bidirectional sync validation +- Conflict resolution strategies +- Comprehensive audit trails +- Regular backups of project data + +### 4. Team Adoption +- Comprehensive training materials +- Clear, documented workflows +- Regular team reviews and retrospectives +- Active feedback loops for improvement + +### 5. Smart Labeling and Organization +- Consistent labeling strategy across repositories +- Priority-based issue sorting and assignment +- Milestone integration for project coordination +- Agent-type to label mapping + +### 6. Automated Progress Tracking +- Regular automated updates with swarm coordination +- Progress metrics and completion tracking +- Cross-issue dependency management +- Real-time status synchronization + +--- + +## Troubleshooting + +### Sync Issues + +```bash +# Diagnose sync problems +npx ruv-swarm github board-diagnose \ + --check "permissions,webhooks,rate-limits" \ + --test-sync \ + --show-conflicts +``` + +### Performance Optimization + +```bash +# Optimize board performance +npx ruv-swarm github board-optimize \ + --analyze-size \ + --archive-completed \ + --index-fields \ + --cache-views +``` + +### Data Recovery + +```bash +# Recover board data +npx ruv-swarm github board-recover \ + --backup-id "2024-01-15" \ + --restore-cards \ + --preserve-current \ + --merge-conflicts +``` + +--- + +## Metrics & Analytics + +### Performance Metrics + +Automatic tracking of: +- Issue creation and resolution times +- Agent productivity metrics +- Project milestone progress +- Cross-repository coordination efficiency +- Sprint velocity and burndown +- Cycle time and throughput +- Work-in-progress limits + +### Reporting Features + +- Weekly progress summaries +- Agent performance analytics +- Project health metrics +- Integration success rates +- Team collaboration metrics +- Quality and defect tracking + +### Issue Resolution Time + +```bash +# Analyze swarm performance +npx ruv-swarm github issue-metrics \ + --issue 456 \ + --metrics "time-to-close,agent-efficiency,subtask-completion" +``` + +### Swarm Effectiveness + +```bash +# Generate effectiveness report +npx ruv-swarm github effectiveness \ + --issues "closed:>2024-01-01" \ + --compare "with-swarm,without-swarm" +``` + +--- + +## Security & Permissions + +1. **Command Authorization**: Validate user permissions before executing commands +2. **Rate Limiting**: Prevent spam and abuse of issue commands +3. **Audit Logging**: Track all swarm operations on issues and boards +4. **Data Privacy**: Respect private repository settings +5. **Access Control**: Proper GitHub permissions for board operations +6. **Webhook Security**: Secure webhook endpoints for real-time updates + +--- + +## Integration with Other Skills + +### Seamless Integration With: +- `github-pr-workflow` - Link issues to pull requests automatically +- `github-release-management` - Coordinate release issues and milestones +- `sparc-orchestrator` - Complex project coordination workflows +- `sparc-tester` - Automated testing workflows for issues + +--- + +## Complete Workflow Example + +### Full-Stack Feature Development + +```bash +# 1. Create feature issue with swarm coordination +gh issue create \ + --title "Feature: Real-time Collaboration" \ + --body "$(cat < +npx ruv-swarm github issue-decompose +npx ruv-swarm github triage --unlabeled + +# Project Boards +npx ruv-swarm github board-init --project-id +npx ruv-swarm github board-sync +npx ruv-swarm github board-analytics + +# Sprint Management +npx ruv-swarm github sprint-manage --sprint "Sprint X" +npx ruv-swarm github milestone-track --milestone "vX.X" + +# Analytics +npx ruv-swarm github issue-metrics --issue +npx ruv-swarm github board-kpis +``` + +--- + +## Additional Resources + +- [GitHub CLI Documentation](https://cli.github.com/manual/) +- [GitHub Projects Documentation](https://docs.github.com/en/issues/planning-and-tracking-with-projects) +- [Swarm Coordination Guide](https://github.com/ruvnet/ruv-swarm) +- [Codex Flow Documentation](https://github.com/ruvnet/Codex-flow) + +--- + +**Last Updated**: 2025-10-19 +**Version**: 2.0.0 +**Maintainer**: Codex diff --git a/.agents/skills/github-release-management/SKILL.md b/.agents/skills/github-release-management/SKILL.md new file mode 100644 index 0000000..5cc4c81 --- /dev/null +++ b/.agents/skills/github-release-management/SKILL.md @@ -0,0 +1,1081 @@ +--- +name: github-release-management +version: 2.0.0 +description: Comprehensive GitHub release orchestration with AI swarm coordination for automated versioning, testing, deployment, and rollback management +category: github +tags: [release, deployment, versioning, automation, ci-cd, swarm, orchestration] +author: Codex Flow Team +requires: + - gh (GitHub CLI) + - Codex-flow + - ruv-swarm (optional for enhanced coordination) + - mcp-github (optional for MCP integration) +dependencies: + - git + - npm or yarn + - node >= 20.0.0 +related_skills: + - github-pr-management + - github-issue-tracking + - github-workflow-automation + - multi-repo-coordination +--- + +# GitHub Release Management Skill + +Intelligent release automation and orchestration using AI swarms for comprehensive software releases - from changelog generation to multi-platform deployment with rollback capabilities. + +## Quick Start + +### Simple Release Flow +```bash +# Plan and create a release +gh release create v2.0.0 \ + --draft \ + --generate-notes \ + --title "Release v2.0.0" + +# Orchestrate with swarm +npx Codex-flow github release-create \ + --version "2.0.0" \ + --build-artifacts \ + --deploy-targets "npm,docker,github" +``` + +### Full Automated Release +```bash +# Initialize release swarm +npx Codex-flow swarm init --topology hierarchical + +# Execute complete release pipeline +npx Codex-flow sparc pipeline "Release v2.0.0 with full validation" +``` + +--- + +## Core Capabilities + +### 1. Release Planning & Version Management +- Semantic version analysis and suggestion +- Breaking change detection from commits +- Release timeline generation +- Multi-package version coordination + +### 2. Automated Testing & Validation +- Multi-stage test orchestration +- Cross-platform compatibility testing +- Performance regression detection +- Security vulnerability scanning + +### 3. Build & Deployment Orchestration +- Multi-platform build coordination +- Parallel artifact generation +- Progressive deployment strategies +- Automated rollback mechanisms + +### 4. Documentation & Communication +- Automated changelog generation +- Release notes with categorization +- Migration guide creation +- Stakeholder notification + +--- + +## Progressive Disclosure: Level 1 - Basic Usage + +### Essential Release Commands + +#### Create Release Draft +```bash +# Get last release tag +LAST_TAG=$(gh release list --limit 1 --json tagName -q '.[0].tagName') + +# Generate changelog from commits +CHANGELOG=$(gh api repos/:owner/:repo/compare/${LAST_TAG}...HEAD \ + --jq '.commits[].commit.message') + +# Create draft release +gh release create v2.0.0 \ + --draft \ + --title "Release v2.0.0" \ + --notes "$CHANGELOG" \ + --target main +``` + +#### Basic Version Bump +```bash +# Update package.json version +npm version patch # or minor, major + +# Push version tag +git push --follow-tags +``` + +#### Simple Deployment +```bash +# Build and publish npm package +npm run build +npm publish + +# Create GitHub release +gh release create $(npm pkg get version) \ + --generate-notes +``` + +### Quick Integration Example +```javascript +// Simple release preparation in Codex +[Single Message]: + // Update version files + Edit("package.json", { old: '"version": "1.0.0"', new: '"version": "2.0.0"' }) + + // Generate changelog + Bash("gh api repos/:owner/:repo/compare/v1.0.0...HEAD --jq '.commits[].commit.message' > CHANGELOG.md") + + // Create release branch + Bash("git checkout -b release/v2.0.0") + Bash("git add -A && git commit -m 'release: Prepare v2.0.0'") + + // Create PR + Bash("gh pr create --title 'Release v2.0.0' --body 'Automated release preparation'") +``` + +--- + +## Progressive Disclosure: Level 2 - Swarm Coordination + +### AI Swarm Release Orchestration + +#### Initialize Release Swarm +```javascript +// Set up coordinated release team +[Single Message - Swarm Initialization]: + mcp__claude-flow__swarm_init { + topology: "hierarchical", + maxAgents: 6, + strategy: "balanced" + } + + // Spawn specialized agents + mcp__claude-flow__agent_spawn { type: "coordinator", name: "Release Director" } + mcp__claude-flow__agent_spawn { type: "coder", name: "Version Manager" } + mcp__claude-flow__agent_spawn { type: "tester", name: "QA Engineer" } + mcp__claude-flow__agent_spawn { type: "reviewer", name: "Release Reviewer" } + mcp__claude-flow__agent_spawn { type: "analyst", name: "Deployment Analyst" } + mcp__claude-flow__agent_spawn { type: "researcher", name: "Compatibility Checker" } +``` + +#### Coordinated Release Workflow +```javascript +[Single Message - Full Release Coordination]: + // Create release branch + Bash("gh api repos/:owner/:repo/git/refs --method POST -f ref='refs/heads/release/v2.0.0' -f sha=$(gh api repos/:owner/:repo/git/refs/heads/main --jq '.object.sha')") + + // Orchestrate release preparation + mcp__claude-flow__task_orchestrate { + task: "Prepare release v2.0.0 with comprehensive testing and validation", + strategy: "sequential", + priority: "critical", + maxAgents: 6 + } + + // Update all release files + Write("package.json", "[updated version]") + Write("CHANGELOG.md", "[release changelog]") + Write("RELEASE_NOTES.md", "[detailed notes]") + + // Run comprehensive validation + Bash("npm install && npm test && npm run lint && npm run build") + + // Create release PR + Bash(`gh pr create \ + --title "Release v2.0.0: Feature Set and Improvements" \ + --head "release/v2.0.0" \ + --base "main" \ + --body "$(cat RELEASE_NOTES.md)"`) + + // Track progress + TodoWrite { todos: [ + { content: "Prepare release branch", status: "completed", priority: "critical" }, + { content: "Run validation suite", status: "completed", priority: "high" }, + { content: "Create release PR", status: "completed", priority: "high" }, + { content: "Code review approval", status: "pending", priority: "high" }, + { content: "Merge and deploy", status: "pending", priority: "critical" } + ]} + + // Store release state + mcp__claude-flow__memory_usage { + action: "store", + key: "release/v2.0.0/status", + value: JSON.stringify({ + version: "2.0.0", + stage: "validation_complete", + timestamp: Date.now(), + ready_for_review: true + }) + } +``` + +### Release Agent Specializations + +#### Changelog Agent +```bash +# Get merged PRs between versions +PRS=$(gh pr list --state merged --base main --json number,title,labels,author,mergedAt \ + --jq ".[] | select(.mergedAt > \"$(gh release view v1.0.0 --json publishedAt -q .publishedAt)\")") + +# Get commit history +COMMITS=$(gh api repos/:owner/:repo/compare/v1.0.0...HEAD \ + --jq '.commits[].commit.message') + +# Generate categorized changelog +npx Codex-flow github changelog \ + --prs "$PRS" \ + --commits "$COMMITS" \ + --from v1.0.0 \ + --to HEAD \ + --categorize \ + --add-migration-guide +``` + +**Capabilities:** +- Semantic commit analysis +- Breaking change detection +- Contributor attribution +- Migration guide generation +- Multi-language support + +#### Version Agent +```bash +# Intelligent version suggestion +npx Codex-flow github version-suggest \ + --current v1.2.3 \ + --analyze-commits \ + --check-compatibility \ + --suggest-pre-release +``` + +**Logic:** +- Analyzes commit messages and PR labels +- Detects breaking changes via keywords +- Suggests appropriate version bump +- Handles pre-release versioning +- Validates version constraints + +#### Build Agent +```bash +# Multi-platform build coordination +npx Codex-flow github release-build \ + --platforms "linux,macos,windows" \ + --architectures "x64,arm64" \ + --parallel \ + --optimize-size +``` + +**Features:** +- Cross-platform compilation +- Parallel build execution +- Artifact optimization and compression +- Dependency bundling +- Build caching and reuse + +#### Test Agent +```bash +# Comprehensive pre-release testing +npx Codex-flow github release-test \ + --suites "unit,integration,e2e,performance" \ + --environments "node:16,node:18,node:20" \ + --fail-fast false \ + --generate-report +``` + +#### Deploy Agent +```bash +# Multi-target deployment orchestration +npx Codex-flow github release-deploy \ + --targets "npm,docker,github,s3" \ + --staged-rollout \ + --monitor-metrics \ + --auto-rollback +``` + +--- + +## Progressive Disclosure: Level 3 - Advanced Workflows + +### Multi-Package Release Coordination + +#### Monorepo Release Strategy +```javascript +[Single Message - Multi-Package Release]: + // Initialize mesh topology for cross-package coordination + mcp__claude-flow__swarm_init { topology: "mesh", maxAgents: 8 } + + // Spawn package-specific agents + Task("Package A Manager", "Coordinate Codex-flow package release v1.0.72", "coder") + Task("Package B Manager", "Coordinate ruv-swarm package release v1.0.12", "coder") + Task("Integration Tester", "Validate cross-package compatibility", "tester") + Task("Version Coordinator", "Align dependencies and versions", "coordinator") + + // Update all packages simultaneously + Write("packages/Codex-flow/package.json", "[v1.0.72 content]") + Write("packages/ruv-swarm/package.json", "[v1.0.12 content]") + Write("CHANGELOG.md", "[consolidated changelog]") + + // Run cross-package validation + Bash("cd packages/Codex-flow && npm install && npm test") + Bash("cd packages/ruv-swarm && npm install && npm test") + Bash("npm run test:integration") + + // Create unified release PR + Bash(`gh pr create \ + --title "Release: Codex-flow v1.0.72, ruv-swarm v1.0.12" \ + --body "Multi-package coordinated release with cross-compatibility validation"`) +``` + +### Progressive Deployment Strategy + +#### Staged Rollout Configuration +```yaml +# .github/release-deployment.yml +deployment: + strategy: progressive + stages: + - name: canary + percentage: 5 + duration: 1h + metrics: + - error-rate < 0.1% + - latency-p99 < 200ms + auto-advance: true + + - name: partial + percentage: 25 + duration: 4h + validation: automated-tests + approval: qa-team + + - name: rollout + percentage: 50 + duration: 8h + monitor: true + + - name: full + percentage: 100 + approval: release-manager + rollback-enabled: true +``` + +#### Execute Staged Deployment +```bash +# Deploy with progressive rollout +npx Codex-flow github release-deploy \ + --version v2.0.0 \ + --strategy progressive \ + --config .github/release-deployment.yml \ + --monitor-metrics \ + --auto-rollback-on-error +``` + +### Multi-Repository Coordination + +#### Coordinated Multi-Repo Release +```bash +# Synchronize releases across repositories +npx Codex-flow github multi-release \ + --repos "frontend:v2.0.0,backend:v2.1.0,cli:v1.5.0" \ + --ensure-compatibility \ + --atomic-release \ + --synchronized \ + --rollback-all-on-failure +``` + +#### Cross-Repo Dependency Management +```javascript +[Single Message - Cross-Repo Release]: + // Initialize star topology for centralized coordination + mcp__claude-flow__swarm_init { topology: "star", maxAgents: 6 } + + // Spawn repo-specific coordinators + Task("Frontend Release", "Release frontend v2.0.0 with API compatibility", "coordinator") + Task("Backend Release", "Release backend v2.1.0 with breaking changes", "coordinator") + Task("CLI Release", "Release CLI v1.5.0 with new commands", "coordinator") + Task("Compatibility Checker", "Validate cross-repo compatibility", "researcher") + + // Coordinate version updates across repos + Bash("gh api repos/org/frontend/dispatches --method POST -f event_type='release' -F client_payload[version]=v2.0.0") + Bash("gh api repos/org/backend/dispatches --method POST -f event_type='release' -F client_payload[version]=v2.1.0") + Bash("gh api repos/org/cli/dispatches --method POST -f event_type='release' -F client_payload[version]=v1.5.0") + + // Monitor all releases + mcp__claude-flow__swarm_monitor { interval: 5, duration: 300 } +``` + +### Hotfix Emergency Procedures + +#### Emergency Hotfix Workflow +```bash +# Fast-track critical bug fix +npx Codex-flow github emergency-release \ + --issue 789 \ + --severity critical \ + --target-version v1.2.4 \ + --cherry-pick-commits \ + --bypass-checks security-only \ + --fast-track \ + --notify-all +``` + +#### Automated Hotfix Process +```javascript +[Single Message - Emergency Hotfix]: + // Create hotfix branch from last stable release + Bash("git checkout -b hotfix/v1.2.4 v1.2.3") + + // Cherry-pick critical fixes + Bash("git cherry-pick abc123def") + + // Fast validation + Bash("npm run test:critical && npm run build") + + // Create emergency release + Bash(`gh release create v1.2.4 \ + --title "HOTFIX v1.2.4: Critical Security Patch" \ + --notes "Emergency release addressing CVE-2024-XXXX" \ + --prerelease=false`) + + // Immediate deployment + Bash("npm publish --tag hotfix") + + // Notify stakeholders + Bash(`gh issue create \ + --title "🚨 HOTFIX v1.2.4 Deployed" \ + --body "Critical security patch deployed. Please update immediately." \ + --label "critical,security,hotfix"`) +``` + +--- + +## Progressive Disclosure: Level 4 - Enterprise Features + +### Release Configuration Management + +#### Comprehensive Release Config +```yaml +# .github/release-swarm.yml +version: 2.0.0 + +release: + versioning: + strategy: semantic + breaking-keywords: ["BREAKING", "BREAKING CHANGE", "!"] + feature-keywords: ["feat", "feature"] + fix-keywords: ["fix", "bugfix"] + + changelog: + sections: + - title: "🚀 Features" + labels: ["feature", "enhancement"] + emoji: true + - title: "🐛 Bug Fixes" + labels: ["bug", "fix"] + - title: "💥 Breaking Changes" + labels: ["breaking"] + highlight: true + - title: "📚 Documentation" + labels: ["docs", "documentation"] + - title: "⚡ Performance" + labels: ["performance", "optimization"] + - title: "🔒 Security" + labels: ["security"] + priority: critical + + artifacts: + - name: npm-package + build: npm run build + test: npm run test:all + publish: npm publish + registry: https://registry.npmjs.org + + - name: docker-image + build: docker build -t app:$VERSION . + test: docker run app:$VERSION npm test + publish: docker push app:$VERSION + platforms: [linux/amd64, linux/arm64] + + - name: binaries + build: ./scripts/build-binaries.sh + platforms: [linux, macos, windows] + architectures: [x64, arm64] + upload: github-release + sign: true + + validation: + pre-release: + - lint: npm run lint + - typecheck: npm run typecheck + - unit-tests: npm run test:unit + - integration-tests: npm run test:integration + - security-scan: npm audit + - license-check: npm run license-check + + post-release: + - smoke-tests: npm run test:smoke + - deployment-validation: ./scripts/validate-deployment.sh + - performance-baseline: npm run benchmark + + deployment: + environments: + - name: staging + auto-deploy: true + validation: npm run test:e2e + approval: false + + - name: production + auto-deploy: false + approval-required: true + approvers: ["release-manager", "tech-lead"] + rollback-enabled: true + health-checks: + - endpoint: /health + expected: 200 + timeout: 30s + + monitoring: + metrics: + - error-rate: <1% + - latency-p95: <500ms + - availability: >99.9% + - memory-usage: <80% + + alerts: + - type: slack + channel: releases + on: [deploy, rollback, error] + - type: email + recipients: ["team@company.com"] + on: [critical-error, rollback] + - type: pagerduty + service: production-releases + on: [critical-error] + + rollback: + auto-rollback: + triggers: + - error-rate > 5% + - latency-p99 > 2000ms + - availability < 99% + grace-period: 5m + + manual-rollback: + preserve-data: true + notify-users: true + create-incident: true +``` + +### Advanced Testing Strategies + +#### Comprehensive Validation Suite +```bash +# Pre-release validation with all checks +npx Codex-flow github release-validate \ + --checks " + version-conflicts, + dependency-compatibility, + api-breaking-changes, + security-vulnerabilities, + performance-regression, + documentation-completeness, + license-compliance, + backwards-compatibility + " \ + --block-on-failure \ + --generate-report \ + --upload-results +``` + +#### Backward Compatibility Testing +```bash +# Test against previous versions +npx Codex-flow github compat-test \ + --previous-versions "v1.0,v1.1,v1.2" \ + --api-contracts \ + --data-migrations \ + --integration-tests \ + --generate-report +``` + +#### Performance Regression Detection +```bash +# Benchmark against baseline +npx Codex-flow github performance-test \ + --baseline v1.9.0 \ + --candidate v2.0.0 \ + --metrics "throughput,latency,memory,cpu" \ + --threshold 5% \ + --fail-on-regression +``` + +### Release Monitoring & Analytics + +#### Real-Time Release Monitoring +```bash +# Monitor release health post-deployment +npx Codex-flow github release-monitor \ + --version v2.0.0 \ + --metrics "error-rate,latency,throughput,adoption" \ + --alert-thresholds \ + --duration 24h \ + --export-dashboard +``` + +#### Release Analytics & Insights +```bash +# Analyze release performance and adoption +npx Codex-flow github release-analytics \ + --version v2.0.0 \ + --compare-with v1.9.0 \ + --metrics "adoption,performance,stability,feedback" \ + --generate-insights \ + --export-report +``` + +#### Automated Rollback Configuration +```bash +# Configure intelligent auto-rollback +npx Codex-flow github rollback-config \ + --triggers '{ + "error-rate": ">5%", + "latency-p99": ">1000ms", + "availability": "<99.9%", + "failed-health-checks": ">3" + }' \ + --grace-period 5m \ + --notify-on-rollback \ + --preserve-metrics +``` + +### Security & Compliance + +#### Security Scanning +```bash +# Comprehensive security validation +npx Codex-flow github release-security \ + --scan-dependencies \ + --check-secrets \ + --audit-permissions \ + --sign-artifacts \ + --sbom-generation \ + --vulnerability-report +``` + +#### Compliance Validation +```bash +# Ensure regulatory compliance +npx Codex-flow github release-compliance \ + --standards "SOC2,GDPR,HIPAA" \ + --license-audit \ + --data-governance \ + --audit-trail \ + --generate-attestation +``` + +--- + +## GitHub Actions Integration + +### Complete Release Workflow +```yaml +# .github/workflows/release.yml +name: Intelligent Release Workflow +on: + push: + tags: ['v*'] + +jobs: + release-orchestration: + runs-on: ubuntu-latest + permissions: + contents: write + packages: write + issues: write + + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: '20' + cache: 'npm' + + - name: Authenticate GitHub CLI + run: echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token + + - name: Initialize Release Swarm + run: | + # Extract version from tag + RELEASE_TAG=${{ github.ref_name }} + PREV_TAG=$(gh release list --limit 2 --json tagName -q '.[1].tagName') + + # Get merged PRs for changelog + PRS=$(gh pr list --state merged --base main --json number,title,labels,author,mergedAt \ + --jq ".[] | select(.mergedAt > \"$(gh release view $PREV_TAG --json publishedAt -q .publishedAt)\")") + + # Get commit history + COMMITS=$(gh api repos/${{ github.repository }}/compare/${PREV_TAG}...HEAD \ + --jq '.commits[].commit.message') + + # Initialize swarm coordination + npx Codex-flow@alpha swarm init --topology hierarchical + + # Store release context + echo "$PRS" > /tmp/release-prs.json + echo "$COMMITS" > /tmp/release-commits.txt + + - name: Generate Release Changelog + run: | + # Generate intelligent changelog + CHANGELOG=$(npx Codex-flow@alpha github changelog \ + --prs "$(cat /tmp/release-prs.json)" \ + --commits "$(cat /tmp/release-commits.txt)" \ + --from $PREV_TAG \ + --to $RELEASE_TAG \ + --categorize \ + --add-migration-guide \ + --format markdown) + + echo "$CHANGELOG" > RELEASE_CHANGELOG.md + + - name: Build Release Artifacts + run: | + # Install dependencies + npm ci + + # Run comprehensive validation + npm run lint + npm run typecheck + npm run test:all + npm run build + + # Build platform-specific binaries + npx Codex-flow@alpha github release-build \ + --platforms "linux,macos,windows" \ + --architectures "x64,arm64" \ + --parallel + + - name: Security Scan + run: | + # Run security validation + npm audit --audit-level=moderate + + npx Codex-flow@alpha github release-security \ + --scan-dependencies \ + --check-secrets \ + --sign-artifacts + + - name: Create GitHub Release + run: | + # Update release with generated changelog + gh release edit ${{ github.ref_name }} \ + --notes "$(cat RELEASE_CHANGELOG.md)" \ + --draft=false + + # Upload all artifacts + for file in dist/*; do + gh release upload ${{ github.ref_name }} "$file" + done + + - name: Deploy to Package Registries + run: | + # Publish to npm + echo "//registry.npmjs.org/:_authToken=${{ secrets.NPM_TOKEN }}" > .npmrc + npm publish + + # Build and push Docker images + docker build -t ${{ github.repository }}:${{ github.ref_name }} . + docker push ${{ github.repository }}:${{ github.ref_name }} + + - name: Post-Release Validation + run: | + # Run smoke tests + npm run test:smoke + + # Validate deployment + npx Codex-flow@alpha github release-validate \ + --version ${{ github.ref_name }} \ + --smoke-tests \ + --health-checks + + - name: Create Release Announcement + run: | + # Create announcement issue + gh issue create \ + --title "🎉 Released ${{ github.ref_name }}" \ + --body "$(cat RELEASE_CHANGELOG.md)" \ + --label "announcement,release" + + # Notify via discussion + gh api repos/${{ github.repository }}/discussions \ + --method POST \ + -f title="Release ${{ github.ref_name }} Now Available" \ + -f body="$(cat RELEASE_CHANGELOG.md)" \ + -f category_id="$(gh api repos/${{ github.repository }}/discussions/categories --jq '.[] | select(.slug=="announcements") | .id')" + + - name: Monitor Release + run: | + # Start release monitoring + npx Codex-flow@alpha github release-monitor \ + --version ${{ github.ref_name }} \ + --duration 1h \ + --alert-on-errors & +``` + +### Hotfix Workflow +```yaml +# .github/workflows/hotfix.yml +name: Emergency Hotfix Workflow +on: + issues: + types: [labeled] + +jobs: + emergency-hotfix: + if: contains(github.event.issue.labels.*.name, 'critical-hotfix') + runs-on: ubuntu-latest + + steps: + - name: Create Hotfix Branch + run: | + LAST_STABLE=$(gh release list --limit 1 --json tagName -q '.[0].tagName') + HOTFIX_VERSION=$(echo $LAST_STABLE | awk -F. '{print $1"."$2"."$3+1}') + + git checkout -b hotfix/$HOTFIX_VERSION $LAST_STABLE + + - name: Fast-Track Testing + run: | + npm ci + npm run test:critical + npm run build + + - name: Emergency Release + run: | + npx Codex-flow@alpha github emergency-release \ + --issue ${{ github.event.issue.number }} \ + --severity critical \ + --fast-track \ + --notify-all +``` + +--- + +## Best Practices & Patterns + +### Release Planning Guidelines + +#### 1. Regular Release Cadence +- **Weekly**: Patch releases with bug fixes +- **Bi-weekly**: Minor releases with features +- **Quarterly**: Major releases with breaking changes +- **On-demand**: Hotfixes for critical issues + +#### 2. Feature Freeze Strategy +- Code freeze 3 days before release +- Only critical bug fixes allowed +- Beta testing period for major releases +- Stakeholder communication plan + +#### 3. Version Management Rules +- Strict semantic versioning compliance +- Breaking changes only in major versions +- Deprecation warnings one minor version ahead +- Cross-package version synchronization + +### Automation Recommendations + +#### 1. Comprehensive CI/CD Pipeline +- Automated testing at every stage +- Security scanning before release +- Performance benchmarking +- Documentation generation + +#### 2. Progressive Deployment +- Canary releases for early detection +- Staged rollouts with monitoring +- Automated health checks +- Quick rollback mechanisms + +#### 3. Monitoring & Observability +- Real-time error tracking +- Performance metrics collection +- User adoption analytics +- Feedback collection automation + +### Documentation Standards + +#### 1. Changelog Requirements +- Categorized changes by type +- Breaking changes highlighted +- Migration guides for major versions +- Contributor attribution + +#### 2. Release Notes Content +- High-level feature summaries +- Detailed technical changes +- Upgrade instructions +- Known issues and limitations + +#### 3. API Documentation +- Automated API doc generation +- Example code updates +- Deprecation notices +- Version compatibility matrix + +--- + +## Troubleshooting & Common Issues + +### Issue: Failed Release Build +```bash +# Debug build failures +npx Codex-flow@alpha diagnostic-run \ + --component build \ + --verbose + +# Retry with isolated environment +docker run --rm -v $(pwd):/app node:20 \ + bash -c "cd /app && npm ci && npm run build" +``` + +### Issue: Test Failures in CI +```bash +# Run tests with detailed output +npm run test -- --verbose --coverage + +# Check for environment-specific issues +npm run test:ci + +# Compare local vs CI environment +npx Codex-flow@alpha github compat-test \ + --environments "local,ci" \ + --compare +``` + +### Issue: Deployment Rollback Needed +```bash +# Immediate rollback to previous version +npx Codex-flow@alpha github rollback \ + --to-version v1.9.9 \ + --reason "Critical bug in v2.0.0" \ + --preserve-data \ + --notify-users + +# Investigate rollback cause +npx Codex-flow@alpha github release-analytics \ + --version v2.0.0 \ + --identify-issues +``` + +### Issue: Version Conflicts +```bash +# Check and resolve version conflicts +npx Codex-flow@alpha github release-validate \ + --checks version-conflicts \ + --auto-resolve + +# Align multi-package versions +npx Codex-flow@alpha github version-sync \ + --packages "package-a,package-b" \ + --strategy semantic +``` + +--- + +## Performance Metrics & Benchmarks + +### Expected Performance +- **Release Planning**: < 2 minutes +- **Build Process**: 3-8 minutes (varies by project) +- **Test Execution**: 5-15 minutes +- **Deployment**: 2-5 minutes per target +- **Complete Pipeline**: 15-30 minutes + +### Optimization Tips +1. **Parallel Execution**: Use swarm coordination for concurrent tasks +2. **Caching**: Enable build and dependency caching +3. **Incremental Builds**: Only rebuild changed components +4. **Test Optimization**: Run critical tests first, full suite in parallel + +### Success Metrics +- **Release Frequency**: Target weekly minor releases +- **Lead Time**: < 2 hours from commit to production +- **Failure Rate**: < 2% of releases require rollback +- **MTTR**: < 30 minutes for critical hotfixes + +--- + +## Related Resources + +### Documentation +- [GitHub CLI Documentation](https://cli.github.com/manual/) +- [Semantic Versioning Spec](https://semver.org/) +- [Codex Flow SPARC Guide](../../docs/sparc-methodology.md) +- [Swarm Coordination Patterns](../../docs/swarm-patterns.md) + +### Related Skills +- **github-pr-management**: PR review and merge automation +- **github-workflow-automation**: CI/CD workflow orchestration +- **multi-repo-coordination**: Cross-repository synchronization +- **deployment-orchestration**: Advanced deployment strategies + +### Support & Community +- Issues: https://github.com/ruvnet/Codex-flow/issues +- Discussions: https://github.com/ruvnet/Codex-flow/discussions +- Documentation: https://Codex-flow.dev/docs + +--- + +## Appendix: Release Checklist Template + +### Pre-Release Checklist +- [ ] Version numbers updated across all packages +- [ ] Changelog generated and reviewed +- [ ] Breaking changes documented with migration guide +- [ ] All tests passing (unit, integration, e2e) +- [ ] Security scan completed with no critical issues +- [ ] Performance benchmarks within acceptable range +- [ ] Documentation updated (API docs, README, examples) +- [ ] Release notes drafted and reviewed +- [ ] Stakeholders notified of upcoming release +- [ ] Deployment plan reviewed and approved + +### Release Checklist +- [ ] Release branch created and validated +- [ ] CI/CD pipeline completed successfully +- [ ] Artifacts built and verified +- [ ] GitHub release created with proper notes +- [ ] Packages published to registries +- [ ] Docker images pushed to container registry +- [ ] Deployment to staging successful +- [ ] Smoke tests passing in staging +- [ ] Production deployment completed +- [ ] Health checks passing + +### Post-Release Checklist +- [ ] Release announcement published +- [ ] Monitoring dashboards reviewed +- [ ] Error rates within normal range +- [ ] Performance metrics stable +- [ ] User feedback collected +- [ ] Documentation links verified +- [ ] Release retrospective scheduled +- [ ] Next release planning initiated + +--- + +**Version**: 2.0.0 +**Last Updated**: 2025-10-19 +**Maintained By**: Codex Flow Team diff --git a/.agents/skills/github-workflow-automation/SKILL.md b/.agents/skills/github-workflow-automation/SKILL.md new file mode 100644 index 0000000..f8e33c0 --- /dev/null +++ b/.agents/skills/github-workflow-automation/SKILL.md @@ -0,0 +1,1065 @@ +--- +name: github-workflow-automation +version: 1.0.0 +category: github +description: Advanced GitHub Actions workflow automation with AI swarm coordination, intelligent CI/CD pipelines, and comprehensive repository management +tags: + - github + - github-actions + - ci-cd + - workflow-automation + - swarm-coordination + - deployment + - security +authors: + - Codex-flow +requires: + - gh (GitHub CLI) + - git + - Codex-flow@alpha + - node (v16+) +priority: high +progressive_disclosure: true +--- + +# GitHub Workflow Automation Skill + +## Overview + +This skill provides comprehensive GitHub Actions automation with AI swarm coordination. It integrates intelligent CI/CD pipelines, workflow orchestration, and repository management to create self-organizing, adaptive GitHub workflows. + +## Quick Start + +
+💡 Basic Usage - Click to expand + +### Initialize GitHub Workflow Automation +```bash +# Start with a simple workflow +npx ruv-swarm actions generate-workflow \ + --analyze-codebase \ + --detect-languages \ + --create-optimal-pipeline +``` + +### Common Commands +```bash +# Optimize existing workflow +npx ruv-swarm actions optimize \ + --workflow ".github/workflows/ci.yml" \ + --suggest-parallelization + +# Analyze failed runs +gh run view --json jobs,conclusion | \ + npx ruv-swarm actions analyze-failure \ + --suggest-fixes +``` + +
+ +## Core Capabilities + +### 🤖 Swarm-Powered GitHub Modes + +
+Available GitHub Integration Modes + +#### 1. gh-coordinator +**GitHub workflow orchestration and coordination** +- **Coordination Mode**: Hierarchical +- **Max Parallel Operations**: 10 +- **Batch Optimized**: Yes +- **Best For**: Complex GitHub workflows, multi-repo coordination + +```bash +# Usage example +npx Codex-flow@alpha github gh-coordinator \ + "Coordinate multi-repo release across 5 repositories" +``` + +#### 2. pr-manager +**Pull request management and review coordination** +- **Review Mode**: Automated +- **Multi-reviewer**: Yes +- **Conflict Resolution**: Intelligent + +```bash +# Create PR with automated review +gh pr create --title "Feature: New capability" \ + --body "Automated PR with swarm review" | \ + npx ruv-swarm actions pr-validate \ + --spawn-agents "linter,tester,security,docs" +``` + +#### 3. issue-tracker +**Issue management and project coordination** +- **Issue Workflow**: Automated +- **Label Management**: Smart +- **Progress Tracking**: Real-time + +```bash +# Create coordinated issue workflow +npx Codex-flow@alpha github issue-tracker \ + "Manage sprint issues with automated tracking" +``` + +#### 4. release-manager +**Release coordination and deployment** +- **Release Pipeline**: Automated +- **Versioning**: Semantic +- **Deployment**: Multi-stage + +```bash +# Automated release management +npx Codex-flow@alpha github release-manager \ + "Create v2.0.0 release with changelog and deployment" +``` + +#### 5. repo-architect +**Repository structure and organization** +- **Structure Optimization**: Yes +- **Multi-repo Support**: Yes +- **Template Management**: Advanced + +```bash +# Optimize repository structure +npx Codex-flow@alpha github repo-architect \ + "Restructure monorepo with optimal organization" +``` + +#### 6. code-reviewer +**Automated code review and quality assurance** +- **Review Quality**: Deep +- **Security Analysis**: Yes +- **Performance Check**: Automated + +```bash +# Automated code review +gh pr view 123 --json files | \ + npx ruv-swarm actions pr-validate \ + --deep-review \ + --security-scan +``` + +#### 7. ci-orchestrator +**CI/CD pipeline coordination** +- **Pipeline Management**: Advanced +- **Test Coordination**: Parallel +- **Deployment**: Automated + +```bash +# Orchestrate CI/CD pipeline +npx Codex-flow@alpha github ci-orchestrator \ + "Setup parallel test execution with smart caching" +``` + +#### 8. security-guardian +**Security and compliance management** +- **Security Scan**: Automated +- **Compliance Check**: Continuous +- **Vulnerability Management**: Proactive + +```bash +# Security audit +npx ruv-swarm actions security \ + --deep-scan \ + --compliance-check \ + --create-issues +``` + +
+ +### 🔧 Workflow Templates + +
+Production-Ready GitHub Actions Templates + +#### 1. Intelligent CI with Swarms +```yaml +# .github/workflows/swarm-ci.yml +name: Intelligent CI with Swarms +on: [push, pull_request] + +jobs: + swarm-analysis: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Initialize Swarm + uses: ruvnet/swarm-action@v1 + with: + topology: mesh + max-agents: 6 + + - name: Analyze Changes + run: | + npx ruv-swarm actions analyze \ + --commit ${{ github.sha }} \ + --suggest-tests \ + --optimize-pipeline +``` + +#### 2. Multi-Language Detection +```yaml +# .github/workflows/polyglot-swarm.yml +name: Polyglot Project Handler +on: push + +jobs: + detect-and-build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Detect Languages + id: detect + run: | + npx ruv-swarm actions detect-stack \ + --output json > stack.json + + - name: Dynamic Build Matrix + run: | + npx ruv-swarm actions create-matrix \ + --from stack.json \ + --parallel-builds +``` + +#### 3. Adaptive Security Scanning +```yaml +# .github/workflows/security-swarm.yml +name: Intelligent Security Scan +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +jobs: + security-swarm: + runs-on: ubuntu-latest + steps: + - name: Security Analysis Swarm + run: | + SECURITY_ISSUES=$(npx ruv-swarm actions security \ + --deep-scan \ + --format json) + + echo "$SECURITY_ISSUES" | jq -r '.issues[]? | @base64' | while read -r issue; do + _jq() { + echo ${issue} | base64 --decode | jq -r ${1} + } + gh issue create \ + --title "$(_jq '.title')" \ + --body "$(_jq '.body')" \ + --label "security,critical" + done +``` + +#### 4. Self-Healing Pipeline +```yaml +# .github/workflows/self-healing.yml +name: Self-Healing Pipeline +on: workflow_run + +jobs: + heal-pipeline: + if: ${{ github.event.workflow_run.conclusion == 'failure' }} + runs-on: ubuntu-latest + steps: + - name: Diagnose and Fix + run: | + npx ruv-swarm actions self-heal \ + --run-id ${{ github.event.workflow_run.id }} \ + --auto-fix-common \ + --create-pr-complex +``` + +#### 5. Progressive Deployment +```yaml +# .github/workflows/smart-deployment.yml +name: Smart Deployment +on: + push: + branches: [main] + +jobs: + progressive-deploy: + runs-on: ubuntu-latest + steps: + - name: Analyze Risk + id: risk + run: | + npx ruv-swarm actions deploy-risk \ + --changes ${{ github.sha }} \ + --history 30d + + - name: Choose Strategy + run: | + npx ruv-swarm actions deploy-strategy \ + --risk ${{ steps.risk.outputs.level }} \ + --auto-execute +``` + +#### 6. Performance Regression Detection +```yaml +# .github/workflows/performance-guard.yml +name: Performance Guard +on: pull_request + +jobs: + perf-swarm: + runs-on: ubuntu-latest + steps: + - name: Performance Analysis + run: | + npx ruv-swarm actions perf-test \ + --baseline main \ + --threshold 10% \ + --auto-profile-regression +``` + +#### 7. PR Validation Swarm +```yaml +# .github/workflows/pr-validation.yml +name: PR Validation Swarm +on: pull_request + +jobs: + validate: + runs-on: ubuntu-latest + steps: + - name: Multi-Agent Validation + run: | + PR_DATA=$(gh pr view ${{ github.event.pull_request.number }} --json files,labels) + + RESULTS=$(npx ruv-swarm actions pr-validate \ + --spawn-agents "linter,tester,security,docs" \ + --parallel \ + --pr-data "$PR_DATA") + + gh pr comment ${{ github.event.pull_request.number }} \ + --body "$RESULTS" +``` + +#### 8. Intelligent Release +```yaml +# .github/workflows/intelligent-release.yml +name: Intelligent Release +on: + push: + tags: ['v*'] + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Release Swarm + run: | + npx ruv-swarm actions release \ + --analyze-changes \ + --generate-notes \ + --create-artifacts \ + --publish-smart +``` + +
+ +### 📊 Monitoring & Analytics + +
+Workflow Analysis & Optimization + +#### Workflow Analytics +```bash +# Analyze workflow performance +npx ruv-swarm actions analytics \ + --workflow "ci.yml" \ + --period 30d \ + --identify-bottlenecks \ + --suggest-improvements +``` + +#### Cost Optimization +```bash +# Optimize GitHub Actions costs +npx ruv-swarm actions cost-optimize \ + --analyze-usage \ + --suggest-caching \ + --recommend-self-hosted +``` + +#### Failure Pattern Analysis +```bash +# Identify failure patterns +npx ruv-swarm actions failure-patterns \ + --period 90d \ + --classify-failures \ + --suggest-preventions +``` + +#### Resource Management +```bash +# Optimize resource usage +npx ruv-swarm actions resources \ + --analyze-usage \ + --suggest-runners \ + --cost-optimize +``` + +
+ +## Advanced Features + +### 🧪 Dynamic Test Strategies + +
+Intelligent Test Selection & Execution + +#### Smart Test Selection +```yaml +# Automatically select relevant tests +- name: Swarm Test Selection + run: | + npx ruv-swarm actions smart-test \ + --changed-files ${{ steps.files.outputs.all }} \ + --impact-analysis \ + --parallel-safe +``` + +#### Dynamic Test Matrix +```yaml +# Generate test matrix from code analysis +jobs: + generate-matrix: + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - id: set-matrix + run: | + MATRIX=$(npx ruv-swarm actions test-matrix \ + --detect-frameworks \ + --optimize-coverage) + echo "matrix=${MATRIX}" >> $GITHUB_OUTPUT + + test: + needs: generate-matrix + strategy: + matrix: ${{fromJson(needs.generate-matrix.outputs.matrix)}} +``` + +#### Intelligent Parallelization +```bash +# Determine optimal parallelization +npx ruv-swarm actions parallel-strategy \ + --analyze-dependencies \ + --time-estimates \ + --cost-aware +``` + +
+ +### 🔮 Predictive Analysis + +
+AI-Powered Workflow Predictions + +#### Predictive Failures +```bash +# Predict potential failures +npx ruv-swarm actions predict \ + --analyze-history \ + --identify-risks \ + --suggest-preventive +``` + +#### Workflow Recommendations +```bash +# Get workflow recommendations +npx ruv-swarm actions recommend \ + --analyze-repo \ + --suggest-workflows \ + --industry-best-practices +``` + +#### Automated Optimization +```bash +# Continuously optimize workflows +npx ruv-swarm actions auto-optimize \ + --monitor-performance \ + --apply-improvements \ + --track-savings +``` + +
+ +### 🎯 Custom Actions Development + +
+Build Your Own Swarm Actions + +#### Custom Swarm Action Template +```javascript +// action.yml +name: 'Swarm Custom Action' +description: 'Custom swarm-powered action' +inputs: + task: + description: 'Task for swarm' + required: true +runs: + using: 'node16' + main: 'dist/index.js' + +// index.js +const { SwarmAction } = require('ruv-swarm'); + +async function run() { + const swarm = new SwarmAction({ + topology: 'mesh', + agents: ['analyzer', 'optimizer'] + }); + + await swarm.execute(core.getInput('task')); +} + +run().catch(error => core.setFailed(error.message)); +``` + +
+ +## Integration with Codex-Flow + +### 🔄 Swarm Coordination Patterns + +
+MCP-Based GitHub Workflow Coordination + +#### Initialize GitHub Swarm +```javascript +// Step 1: Initialize swarm coordination +mcp__claude-flow__swarm_init { + topology: "hierarchical", + maxAgents: 8 +} + +// Step 2: Spawn specialized agents +mcp__claude-flow__agent_spawn { type: "coordinator", name: "GitHub Coordinator" } +mcp__claude-flow__agent_spawn { type: "reviewer", name: "Code Reviewer" } +mcp__claude-flow__agent_spawn { type: "tester", name: "QA Agent" } +mcp__claude-flow__agent_spawn { type: "analyst", name: "Security Analyst" } + +// Step 3: Orchestrate GitHub workflow +mcp__claude-flow__task_orchestrate { + task: "Complete PR review and merge workflow", + strategy: "parallel", + priority: "high" +} +``` + +#### GitHub Hooks Integration +```bash +# Pre-task: Setup GitHub context +npx Codex-flow@alpha hooks pre-task \ + --description "PR review workflow" \ + --context "pr-123" + +# During task: Track progress +npx Codex-flow@alpha hooks notify \ + --message "Completed security scan" \ + --type "github-action" + +# Post-task: Export results +npx Codex-flow@alpha hooks post-task \ + --task-id "pr-review-123" \ + --export-github-summary +``` + +
+ +### 📦 Batch Operations + +
+Concurrent GitHub Operations + +#### Parallel GitHub CLI Commands +```javascript +// Single message with all GitHub operations +[Concurrent Execution]: + Bash("gh issue create --title 'Feature A' --body 'Description A' --label 'enhancement'") + Bash("gh issue create --title 'Feature B' --body 'Description B' --label 'enhancement'") + Bash("gh pr create --title 'PR 1' --head 'feature-a' --base 'main'") + Bash("gh pr create --title 'PR 2' --head 'feature-b' --base 'main'") + Bash("gh pr checks 123 --watch") + TodoWrite { todos: [ + {content: "Review security scan results", status: "pending"}, + {content: "Merge approved PRs", status: "pending"}, + {content: "Update changelog", status: "pending"} + ]} +``` + +
+ +## Best Practices + +### 🏗️ Workflow Organization + +
+Structure Your GitHub Workflows + +#### 1. Use Reusable Workflows +```yaml +# .github/workflows/reusable-swarm.yml +name: Reusable Swarm Workflow +on: + workflow_call: + inputs: + topology: + required: true + type: string + +jobs: + swarm-task: + runs-on: ubuntu-latest + steps: + - name: Initialize Swarm + run: | + npx ruv-swarm init --topology ${{ inputs.topology }} +``` + +#### 2. Implement Proper Caching +```yaml +- name: Cache Swarm Dependencies + uses: actions/cache@v3 + with: + path: ~/.npm + key: ${{ runner.os }}-swarm-${{ hashFiles('**/package-lock.json') }} +``` + +#### 3. Set Appropriate Timeouts +```yaml +jobs: + swarm-task: + timeout-minutes: 30 + steps: + - name: Swarm Operation + timeout-minutes: 10 +``` + +#### 4. Use Workflow Dependencies +```yaml +jobs: + setup: + runs-on: ubuntu-latest + + test: + needs: setup + runs-on: ubuntu-latest + + deploy: + needs: [setup, test] + runs-on: ubuntu-latest +``` + +
+ +### 🔒 Security Best Practices + +
+Secure Your GitHub Workflows + +#### 1. Store Configurations Securely +```yaml +- name: Setup Swarm + env: + SWARM_CONFIG: ${{ secrets.SWARM_CONFIG }} + API_KEY: ${{ secrets.API_KEY }} + run: | + npx ruv-swarm init --config "$SWARM_CONFIG" +``` + +#### 2. Use OIDC Authentication +```yaml +permissions: + id-token: write + contents: read + +- name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: arn:aws:iam::123456789012:role/GitHubAction + aws-region: us-east-1 +``` + +#### 3. Implement Least-Privilege +```yaml +permissions: + contents: read + pull-requests: write + issues: write +``` + +#### 4. Audit Swarm Operations +```yaml +- name: Audit Swarm Actions + run: | + npx ruv-swarm actions audit \ + --export-logs \ + --compliance-report +``` + +
+ +### ⚡ Performance Optimization + +
+Maximize Workflow Performance + +#### 1. Cache Swarm Dependencies +```yaml +- uses: actions/cache@v3 + with: + path: | + ~/.npm + node_modules + key: ${{ runner.os }}-swarm-${{ hashFiles('**/package-lock.json') }} +``` + +#### 2. Use Appropriate Runner Sizes +```yaml +jobs: + heavy-task: + runs-on: ubuntu-latest-4-cores + steps: + - name: Intensive Swarm Operation +``` + +#### 3. Implement Early Termination +```yaml +- name: Quick Fail Check + run: | + if ! npx ruv-swarm actions pre-check; then + echo "Pre-check failed, terminating early" + exit 1 + fi +``` + +#### 4. Optimize Parallel Execution +```yaml +strategy: + matrix: + include: + - runner: ubuntu-latest + task: test + - runner: ubuntu-latest + task: lint + - runner: ubuntu-latest + task: security + max-parallel: 3 +``` + +
+ +## Debugging & Troubleshooting + +### 🐛 Debug Tools + +
+Debug GitHub Workflow Issues + +#### Debug Mode +```yaml +- name: Debug Swarm + run: | + npx ruv-swarm actions debug \ + --verbose \ + --trace-agents \ + --export-logs + env: + ACTIONS_STEP_DEBUG: true +``` + +#### Performance Profiling +```bash +# Profile workflow performance +npx ruv-swarm actions profile \ + --workflow "ci.yml" \ + --identify-slow-steps \ + --suggest-optimizations +``` + +#### Failure Analysis +```bash +# Analyze failed runs +gh run view --json jobs,conclusion | \ + npx ruv-swarm actions analyze-failure \ + --suggest-fixes \ + --auto-retry-flaky +``` + +#### Log Analysis +```bash +# Download and analyze logs +gh run download +npx ruv-swarm actions analyze-logs \ + --directory ./logs \ + --identify-errors +``` + +
+ +## Real-World Examples + +### 🚀 Complete Workflows + +
+Production-Ready Integration Examples + +#### Example 1: Full-Stack Application CI/CD +```yaml +name: Full-Stack CI/CD with Swarms +on: + push: + branches: [main, develop] + pull_request: + +jobs: + initialize: + runs-on: ubuntu-latest + outputs: + swarm-id: ${{ steps.init.outputs.swarm-id }} + steps: + - id: init + run: | + SWARM_ID=$(npx ruv-swarm init --topology mesh --output json | jq -r '.id') + echo "swarm-id=${SWARM_ID}" >> $GITHUB_OUTPUT + + backend: + needs: initialize + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Backend Tests + run: | + npx ruv-swarm agents spawn --type tester \ + --task "Run backend test suite" \ + --swarm-id ${{ needs.initialize.outputs.swarm-id }} + + frontend: + needs: initialize + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Frontend Tests + run: | + npx ruv-swarm agents spawn --type tester \ + --task "Run frontend test suite" \ + --swarm-id ${{ needs.initialize.outputs.swarm-id }} + + security: + needs: initialize + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Security Scan + run: | + npx ruv-swarm agents spawn --type security \ + --task "Security audit" \ + --swarm-id ${{ needs.initialize.outputs.swarm-id }} + + deploy: + needs: [backend, frontend, security] + if: github.ref == 'refs/heads/main' + runs-on: ubuntu-latest + steps: + - name: Deploy + run: | + npx ruv-swarm actions deploy \ + --strategy progressive \ + --swarm-id ${{ needs.initialize.outputs.swarm-id }} +``` + +#### Example 2: Monorepo Management +```yaml +name: Monorepo Coordination +on: push + +jobs: + detect-changes: + runs-on: ubuntu-latest + outputs: + packages: ${{ steps.detect.outputs.packages }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - id: detect + run: | + PACKAGES=$(npx ruv-swarm actions detect-changes \ + --monorepo \ + --output json) + echo "packages=${PACKAGES}" >> $GITHUB_OUTPUT + + build-packages: + needs: detect-changes + runs-on: ubuntu-latest + strategy: + matrix: + package: ${{ fromJson(needs.detect-changes.outputs.packages) }} + steps: + - name: Build Package + run: | + npx ruv-swarm actions build \ + --package ${{ matrix.package }} \ + --parallel-deps +``` + +#### Example 3: Multi-Repo Synchronization +```bash +# Synchronize multiple repositories +npx Codex-flow@alpha github sync-coordinator \ + "Synchronize version updates across: + - github.com/org/repo-a + - github.com/org/repo-b + - github.com/org/repo-c + + Update dependencies, align versions, create PRs" +``` + +
+ +## Command Reference + +### 📚 Quick Command Guide + +
+All Available Commands + +#### Workflow Generation +```bash +npx ruv-swarm actions generate-workflow [options] + --analyze-codebase Analyze repository structure + --detect-languages Detect programming languages + --create-optimal-pipeline Generate optimized workflow +``` + +#### Optimization +```bash +npx ruv-swarm actions optimize [options] + --workflow Path to workflow file + --suggest-parallelization Suggest parallel execution + --reduce-redundancy Remove redundant steps + --estimate-savings Estimate time/cost savings +``` + +#### Analysis +```bash +npx ruv-swarm actions analyze [options] + --commit Analyze specific commit + --suggest-tests Suggest test improvements + --optimize-pipeline Optimize pipeline structure +``` + +#### Testing +```bash +npx ruv-swarm actions smart-test [options] + --changed-files Files that changed + --impact-analysis Analyze test impact + --parallel-safe Only parallel-safe tests +``` + +#### Security +```bash +npx ruv-swarm actions security [options] + --deep-scan Deep security analysis + --format Output format (json/text) + --create-issues Auto-create GitHub issues +``` + +#### Deployment +```bash +npx ruv-swarm actions deploy [options] + --strategy Deployment strategy + --risk Risk assessment level + --auto-execute Execute automatically +``` + +#### Monitoring +```bash +npx ruv-swarm actions analytics [options] + --workflow Workflow to analyze + --period Analysis period + --identify-bottlenecks Find bottlenecks + --suggest-improvements Improvement suggestions +``` + +
+ +## Integration Checklist + +### ✅ Setup Verification + +
+Verify Your Setup + +- [ ] GitHub CLI (`gh`) installed and authenticated +- [ ] Git configured with user credentials +- [ ] Node.js v16+ installed +- [ ] `Codex-flow@alpha` package available +- [ ] Repository has `.github/workflows` directory +- [ ] GitHub Actions enabled on repository +- [ ] Necessary secrets configured +- [ ] Runner permissions verified + +#### Quick Setup Script +```bash +#!/bin/bash +# setup-github-automation.sh + +# Install dependencies +npm install -g Codex-flow@alpha + +# Verify GitHub CLI +gh auth status || gh auth login + +# Create workflow directory +mkdir -p .github/workflows + +# Generate initial workflow +npx ruv-swarm actions generate-workflow \ + --analyze-codebase \ + --create-optimal-pipeline > .github/workflows/ci.yml + +echo "✅ GitHub workflow automation setup complete" +``` + +
+ +## Related Skills + +- `github-pr-enhancement` - Advanced PR management +- `release-coordination` - Release automation +- `swarm-coordination` - Multi-agent orchestration +- `ci-cd-optimization` - Pipeline optimization + +## Support & Documentation + +- **GitHub CLI Docs**: https://cli.github.com/manual/ +- **GitHub Actions**: https://docs.github.com/en/actions +- **Codex-Flow**: https://github.com/ruvnet/Codex-flow +- **Ruv-Swarm**: https://github.com/ruvnet/ruv-swarm + +## Version History + +- **v1.0.0** (2025-01-19): Initial skill consolidation + - Merged workflow-automation.md (441 lines) + - Merged github-modes.md (146 lines) + - Added progressive disclosure + - Enhanced with swarm coordination patterns + - Added comprehensive examples and best practices + +--- + +**Skill Status**: ✅ Production Ready +**Last Updated**: 2025-01-19 +**Maintainer**: Codex-flow team diff --git a/.agents/skills/hooks-automation/SKILL.md b/.agents/skills/hooks-automation/SKILL.md new file mode 100644 index 0000000..4ee9309 --- /dev/null +++ b/.agents/skills/hooks-automation/SKILL.md @@ -0,0 +1,1201 @@ +--- +name: Hooks Automation +description: Automated coordination, formatting, and learning from Codex operations using intelligent hooks with MCP integration. Includes pre/post task hooks, session management, Git integration, memory coordination, and neural pattern training for enhanced development workflows. +--- + +# Hooks Automation + +Intelligent automation system that coordinates, validates, and learns from Codex operations through hooks integrated with MCP tools and neural pattern training. + +## What This Skill Does + +This skill provides a comprehensive hook system that automatically manages development operations, coordinates swarm agents, maintains session state, and continuously learns from coding patterns. It enables automated agent assignment, code formatting, performance tracking, and cross-session memory persistence. + +**Key Capabilities:** +- **Pre-Operation Hooks**: Validate, prepare, and auto-assign agents before operations +- **Post-Operation Hooks**: Format, analyze, and train patterns after operations +- **Session Management**: Persist state, restore context, generate summaries +- **Memory Coordination**: Synchronize knowledge across swarm agents +- **Git Integration**: Automated commit hooks with quality verification +- **Neural Training**: Continuous learning from successful patterns +- **MCP Integration**: Seamless coordination with swarm tools + +## Prerequisites + +**Required:** +- Codex Flow CLI installed (`npm install -g Codex-flow@alpha`) +- Codex with hooks enabled +- `.Codex/settings.json` with hook configurations + +**Optional:** +- MCP servers configured (Codex-flow, ruv-swarm, flow-nexus) +- Git repository for version control +- Testing framework for quality verification + +## Quick Start + +### Initialize Hooks System + +```bash +# Initialize with default hooks configuration +npx Codex-flow init --hooks +``` + +This creates: +- `.Codex/settings.json` with pre-configured hooks +- Hook command documentation in `.Codex/commands/hooks/` +- Default hook handlers for common operations + +### Basic Hook Usage + +```bash +# Pre-task hook (auto-spawns agents) +npx Codex-flow hook pre-task --description "Implement authentication" + +# Post-edit hook (auto-formats and stores in memory) +npx Codex-flow hook post-edit --file "src/auth.js" --memory-key "auth/login" + +# Session end hook (saves state and metrics) +npx Codex-flow hook session-end --session-id "dev-session" --export-metrics +``` + +--- + +## Complete Guide + +### Available Hooks + +#### Pre-Operation Hooks + +Hooks that execute BEFORE operations to prepare and validate: + +**pre-edit** - Validate and assign agents before file modifications +```bash +npx Codex-flow hook pre-edit [options] + +Options: + --file, -f File path to be edited + --auto-assign-agent Automatically assign best agent (default: true) + --validate-syntax Pre-validate syntax before edit + --check-conflicts Check for merge conflicts + --backup-file Create backup before editing + +Examples: + npx Codex-flow hook pre-edit --file "src/auth/login.js" + npx Codex-flow hook pre-edit -f "config/db.js" --validate-syntax + npx Codex-flow hook pre-edit -f "production.env" --backup-file --check-conflicts +``` + +**Features:** +- Auto agent assignment based on file type +- Syntax validation to prevent broken code +- Conflict detection for concurrent edits +- Automatic file backups for safety + +**pre-bash** - Check command safety and resource requirements +```bash +npx Codex-flow hook pre-bash --command + +Options: + --command, -c Command to validate + --check-safety Verify command safety (default: true) + --estimate-resources Estimate resource usage + --require-confirmation Request user confirmation for risky commands + +Examples: + npx Codex-flow hook pre-bash -c "rm -rf /tmp/cache" + npx Codex-flow hook pre-bash --command "docker build ." --estimate-resources +``` + +**Features:** +- Command safety validation +- Resource requirement estimation +- Destructive command confirmation +- Permission checks + +**pre-task** - Auto-spawn agents and prepare for complex tasks +```bash +npx Codex-flow hook pre-task [options] + +Options: + --description, -d Task description for context + --auto-spawn-agents Automatically spawn required agents (default: true) + --load-memory Load relevant memory from previous sessions + --optimize-topology Select optimal swarm topology + --estimate-complexity Analyze task complexity + +Examples: + npx Codex-flow hook pre-task --description "Implement user authentication" + npx Codex-flow hook pre-task -d "Continue API dev" --load-memory + npx Codex-flow hook pre-task -d "Refactor codebase" --optimize-topology +``` + +**Features:** +- Automatic agent spawning based on task analysis +- Memory loading for context continuity +- Topology optimization for task structure +- Complexity estimation and time prediction + +**pre-search** - Prepare and optimize search operations +```bash +npx Codex-flow hook pre-search --query + +Options: + --query, -q Search query + --check-cache Check cache first (default: true) + --optimize-query Optimize search pattern + +Examples: + npx Codex-flow hook pre-search -q "authentication middleware" +``` + +**Features:** +- Cache checking for faster results +- Query optimization +- Search pattern improvement + +#### Post-Operation Hooks + +Hooks that execute AFTER operations to process and learn: + +**post-edit** - Auto-format, validate, and update memory +```bash +npx Codex-flow hook post-edit [options] + +Options: + --file, -f File path that was edited + --auto-format Automatically format code (default: true) + --memory-key, -m Store edit context in memory + --train-patterns Train neural patterns from edit + --validate-output Validate edited file + +Examples: + npx Codex-flow hook post-edit --file "src/components/Button.jsx" + npx Codex-flow hook post-edit -f "api/auth.js" --memory-key "auth/login" + npx Codex-flow hook post-edit -f "utils/helpers.ts" --train-patterns +``` + +**Features:** +- Language-specific auto-formatting (Prettier, Black, gofmt) +- Memory storage for edit context and decisions +- Neural pattern training for continuous improvement +- Output validation with linting + +**post-bash** - Log execution and update metrics +```bash +npx Codex-flow hook post-bash --command + +Options: + --command, -c Command that was executed + --log-output Log command output (default: true) + --update-metrics Update performance metrics + --store-result Store result in memory + +Examples: + npx Codex-flow hook post-bash -c "npm test" --update-metrics +``` + +**Features:** +- Command execution logging +- Performance metric tracking +- Result storage for analysis +- Error pattern detection + +**post-task** - Performance analysis and decision storage +```bash +npx Codex-flow hook post-task [options] + +Options: + --task-id, -t Task identifier for tracking + --analyze-performance Generate performance metrics (default: true) + --store-decisions Save task decisions to memory + --export-learnings Export neural pattern learnings + --generate-report Create task completion report + +Examples: + npx Codex-flow hook post-task --task-id "auth-implementation" + npx Codex-flow hook post-task -t "api-refactor" --analyze-performance + npx Codex-flow hook post-task -t "bug-fix-123" --store-decisions +``` + +**Features:** +- Execution time and token usage measurement +- Decision and implementation choice recording +- Neural learning pattern export +- Completion report generation + +**post-search** - Cache results and improve patterns +```bash +npx Codex-flow hook post-search --query --results + +Options: + --query, -q Original search query + --results, -r Results file path + --cache-results Cache for future use (default: true) + --train-patterns Improve search patterns + +Examples: + npx Codex-flow hook post-search -q "auth" -r "results.json" --train-patterns +``` + +**Features:** +- Result caching for faster subsequent searches +- Search pattern improvement +- Relevance scoring + +#### MCP Integration Hooks + +Hooks that coordinate with MCP swarm tools: + +**mcp-initialized** - Persist swarm configuration +```bash +npx Codex-flow hook mcp-initialized --swarm-id + +Features: +- Save swarm topology and configuration +- Store agent roster in memory +- Initialize coordination namespace +``` + +**agent-spawned** - Update agent roster and memory +```bash +npx Codex-flow hook agent-spawned --agent-id --type + +Features: +- Register agent in coordination memory +- Update agent roster +- Initialize agent-specific memory namespace +``` + +**task-orchestrated** - Monitor task progress +```bash +npx Codex-flow hook task-orchestrated --task-id + +Features: +- Track task progress through memory +- Monitor agent assignments +- Update coordination state +``` + +**neural-trained** - Save pattern improvements +```bash +npx Codex-flow hook neural-trained --pattern + +Features: +- Export trained neural patterns +- Update coordination models +- Share learning across agents +``` + +#### Memory Coordination Hooks + +**memory-write** - Triggered when agents write to coordination memory +```bash +Features: +- Validate memory key format +- Update cross-agent indexes +- Trigger dependent hooks +- Notify subscribed agents +``` + +**memory-read** - Triggered when agents read from coordination memory +```bash +Features: +- Log access patterns +- Update popularity metrics +- Preload related data +- Track usage statistics +``` + +**memory-sync** - Synchronize memory across swarm agents +```bash +npx Codex-flow hook memory-sync --namespace + +Features: +- Sync memory state across agents +- Resolve conflicts +- Propagate updates +- Maintain consistency +``` + +#### Session Hooks + +**session-start** - Initialize new session +```bash +npx Codex-flow hook session-start --session-id + +Options: + --session-id, -s Session identifier + --load-context Load context from previous session + --init-agents Initialize required agents + +Features: +- Create session directory +- Initialize metrics tracking +- Load previous context +- Set up coordination namespace +``` + +**session-restore** - Load previous session state +```bash +npx Codex-flow hook session-restore --session-id + +Options: + --session-id, -s Session to restore + --restore-memory Restore memory state (default: true) + --restore-agents Restore agent configurations + +Examples: + npx Codex-flow hook session-restore --session-id "swarm-20241019" + npx Codex-flow hook session-restore -s "feature-auth" --restore-memory +``` + +**Features:** +- Load previous session context +- Restore memory state and decisions +- Reconfigure agents to previous state +- Resume in-progress tasks + +**session-end** - Cleanup and persist session state +```bash +npx Codex-flow hook session-end [options] + +Options: + --session-id, -s Session identifier to end + --save-state Save current session state (default: true) + --export-metrics Export session metrics + --generate-summary Create session summary + --cleanup-temp Remove temporary files + +Examples: + npx Codex-flow hook session-end --session-id "dev-session-2024" + npx Codex-flow hook session-end -s "feature-auth" --export-metrics --generate-summary + npx Codex-flow hook session-end -s "quick-fix" --cleanup-temp +``` + +**Features:** +- Save current context and progress +- Export session metrics (duration, commands, tokens, files) +- Generate work summary with decisions and next steps +- Cleanup temporary files and optimize storage + +**notify** - Custom notifications with swarm status +```bash +npx Codex-flow hook notify --message + +Options: + --message, -m Notification message + --level Notification level (info|warning|error) + --swarm-status Include swarm status (default: true) + --broadcast Send to all agents + +Examples: + npx Codex-flow hook notify -m "Task completed" --level info + npx Codex-flow hook notify -m "Critical error" --level error --broadcast +``` + +**Features:** +- Send notifications to coordination system +- Include swarm status and metrics +- Broadcast to all agents +- Log important events + +### Configuration + +#### Basic Configuration + +Edit `.Codex/settings.json` to configure hooks: + +```json +{ + "hooks": { + "PreToolUse": [ + { + "matcher": "^(Write|Edit|MultiEdit)$", + "hooks": [{ + "type": "command", + "command": "npx Codex-flow hook pre-edit --file '${tool.params.file_path}' --memory-key 'swarm/editor/current'" + }] + }, + { + "matcher": "^Bash$", + "hooks": [{ + "type": "command", + "command": "npx Codex-flow hook pre-bash --command '${tool.params.command}'" + }] + } + ], + "PostToolUse": [ + { + "matcher": "^(Write|Edit|MultiEdit)$", + "hooks": [{ + "type": "command", + "command": "npx Codex-flow hook post-edit --file '${tool.params.file_path}' --memory-key 'swarm/editor/complete' --auto-format --train-patterns" + }] + }, + { + "matcher": "^Bash$", + "hooks": [{ + "type": "command", + "command": "npx Codex-flow hook post-bash --command '${tool.params.command}' --update-metrics" + }] + } + ] + } +} +``` + +#### Advanced Configuration + +Complete hook configuration with all features: + +```json +{ + "hooks": { + "enabled": true, + "debug": false, + "timeout": 5000, + + "PreToolUse": [ + { + "matcher": "^(Write|Edit|MultiEdit)$", + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook pre-edit --file '${tool.params.file_path}' --auto-assign-agent --validate-syntax", + "timeout": 3000, + "continueOnError": true + } + ] + }, + { + "matcher": "^Task$", + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook pre-task --description '${tool.params.task}' --auto-spawn-agents --load-memory", + "async": true + } + ] + }, + { + "matcher": "^Grep$", + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook pre-search --query '${tool.params.pattern}' --check-cache" + } + ] + } + ], + + "PostToolUse": [ + { + "matcher": "^(Write|Edit|MultiEdit)$", + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook post-edit --file '${tool.params.file_path}' --memory-key 'edits/${tool.params.file_path}' --auto-format --train-patterns", + "async": true + } + ] + }, + { + "matcher": "^Task$", + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook post-task --task-id '${result.task_id}' --analyze-performance --store-decisions --export-learnings", + "async": true + } + ] + }, + { + "matcher": "^Grep$", + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook post-search --query '${tool.params.pattern}' --cache-results --train-patterns" + } + ] + } + ], + + "SessionStart": [ + { + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook session-start --session-id '${session.id}' --load-context" + } + ] + } + ], + + "SessionEnd": [ + { + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook session-end --session-id '${session.id}' --export-metrics --generate-summary --cleanup-temp" + } + ] + } + ] + } +} +``` + +#### Protected File Patterns + +Add protection for sensitive files: + +```json +{ + "hooks": { + "PreToolUse": [ + { + "matcher": "^(Write|Edit|MultiEdit)$", + "hooks": [ + { + "type": "command", + "command": "npx Codex-flow hook check-protected --file '${tool.params.file_path}'" + } + ] + } + ] + } +} +``` + +#### Automatic Testing + +Run tests after file modifications: + +```json +{ + "hooks": { + "PostToolUse": [ + { + "matcher": "^Write$", + "hooks": [ + { + "type": "command", + "command": "test -f '${tool.params.file_path%.js}.test.js' && npm test '${tool.params.file_path%.js}.test.js'", + "continueOnError": true + } + ] + } + ] + } +} +``` + +### MCP Tool Integration + +Hooks automatically integrate with MCP tools for coordination: + +#### Pre-Task Hook with Agent Spawning + +```javascript +// Hook command +npx Codex-flow hook pre-task --description "Build REST API" + +// Internally calls MCP tools: +mcp__claude-flow__agent_spawn { + type: "backend-dev", + capabilities: ["api", "database", "testing"] +} + +mcp__claude-flow__memory_usage { + action: "store", + key: "swarm/task/api-build/context", + namespace: "coordination", + value: JSON.stringify({ + description: "Build REST API", + agents: ["backend-dev"], + started: Date.now() + }) +} +``` + +#### Post-Edit Hook with Memory Storage + +```javascript +// Hook command +npx Codex-flow hook post-edit --file "api/auth.js" + +// Internally calls MCP tools: +mcp__claude-flow__memory_usage { + action: "store", + key: "swarm/edits/api/auth.js", + namespace: "coordination", + value: JSON.stringify({ + file: "api/auth.js", + timestamp: Date.now(), + changes: { added: 45, removed: 12 }, + formatted: true, + linted: true + }) +} + +mcp__claude-flow__neural_train { + pattern_type: "coordination", + training_data: { /* edit patterns */ } +} +``` + +#### Session End Hook with State Persistence + +```javascript +// Hook command +npx Codex-flow hook session-end --session-id "dev-2024" + +// Internally calls MCP tools: +mcp__claude-flow__memory_persist { + sessionId: "dev-2024" +} + +mcp__claude-flow__swarm_status { + swarmId: "current" +} + +// Generates metrics and summary +``` + +### Memory Coordination Protocol + +All hooks follow a standardized memory coordination pattern: + +#### Three-Phase Memory Protocol + +**Phase 1: STATUS** - Hook starts +```javascript +mcp__claude-flow__memory_usage { + action: "store", + key: "swarm/hooks/pre-edit/status", + namespace: "coordination", + value: JSON.stringify({ + status: "running", + hook: "pre-edit", + file: "src/auth.js", + timestamp: Date.now() + }) +} +``` + +**Phase 2: PROGRESS** - Hook processes +```javascript +mcp__claude-flow__memory_usage { + action: "store", + key: "swarm/hooks/pre-edit/progress", + namespace: "coordination", + value: JSON.stringify({ + progress: 50, + action: "validating syntax", + file: "src/auth.js" + }) +} +``` + +**Phase 3: COMPLETE** - Hook finishes +```javascript +mcp__claude-flow__memory_usage { + action: "store", + key: "swarm/hooks/pre-edit/complete", + namespace: "coordination", + value: JSON.stringify({ + status: "complete", + result: "success", + agent_assigned: "backend-dev", + syntax_valid: true, + backup_created: true + }) +} +``` + +### Hook Response Format + +Hooks return JSON responses to control operation flow: + +#### Continue Response +```json +{ + "continue": true, + "reason": "All validations passed", + "metadata": { + "agent_assigned": "backend-dev", + "syntax_valid": true, + "file": "src/auth.js" + } +} +``` + +#### Block Response +```json +{ + "continue": false, + "reason": "Protected file - manual review required", + "metadata": { + "file": ".env.production", + "protection_level": "high", + "requires": "manual_approval" + } +} +``` + +#### Warning Response +```json +{ + "continue": true, + "reason": "Syntax valid but complexity high", + "warnings": [ + "Cyclomatic complexity: 15 (threshold: 10)", + "Consider refactoring for better maintainability" + ], + "metadata": { + "complexity": 15, + "threshold": 10 + } +} +``` + +### Git Integration + +Hooks can integrate with Git operations for quality control: + +#### Pre-Commit Hook +```bash +# Add to .git/hooks/pre-commit or use husky + +#!/bin/bash +# Run quality checks before commit + +# Get staged files +FILES=$(git diff --cached --name-only --diff-filter=ACM) + +for FILE in $FILES; do + # Run pre-edit hook for validation + npx Codex-flow hook pre-edit --file "$FILE" --validate-syntax + + if [ $? -ne 0 ]; then + echo "Validation failed for $FILE" + exit 1 + fi + + # Run post-edit hook for formatting + npx Codex-flow hook post-edit --file "$FILE" --auto-format +done + +# Run tests +npm test + +exit $? +``` + +#### Post-Commit Hook +```bash +# Add to .git/hooks/post-commit + +#!/bin/bash +# Track commit metrics + +COMMIT_HASH=$(git rev-parse HEAD) +COMMIT_MSG=$(git log -1 --pretty=%B) + +npx Codex-flow hook notify \ + --message "Commit completed: $COMMIT_MSG" \ + --level info \ + --swarm-status +``` + +#### Pre-Push Hook +```bash +# Add to .git/hooks/pre-push + +#!/bin/bash +# Quality gate before push + +# Run full test suite +npm run test:all + +# Run quality checks +npx Codex-flow hook session-end \ + --generate-report \ + --export-metrics + +# Verify quality thresholds +TRUTH_SCORE=$(npx Codex-flow metrics score --format json | jq -r '.truth_score') + +if (( $(echo "$TRUTH_SCORE < 0.95" | bc -l) )); then + echo "Truth score below threshold: $TRUTH_SCORE < 0.95" + exit 1 +fi + +exit 0 +``` + +### Agent Coordination Workflow + +How agents use hooks for coordination: + +#### Agent Workflow Example + +```bash +# Agent 1: Backend Developer +# STEP 1: Pre-task preparation +npx Codex-flow hook pre-task \ + --description "Implement user authentication API" \ + --auto-spawn-agents \ + --load-memory + +# STEP 2: Work begins - pre-edit validation +npx Codex-flow hook pre-edit \ + --file "api/auth.js" \ + --auto-assign-agent \ + --validate-syntax + +# STEP 3: Edit file (via Codex Edit tool) +# ... code changes ... + +# STEP 4: Post-edit processing +npx Codex-flow hook post-edit \ + --file "api/auth.js" \ + --memory-key "swarm/backend/auth-api" \ + --auto-format \ + --train-patterns + +# STEP 5: Notify coordination system +npx Codex-flow hook notify \ + --message "Auth API implementation complete" \ + --swarm-status \ + --broadcast + +# STEP 6: Task completion +npx Codex-flow hook post-task \ + --task-id "auth-api" \ + --analyze-performance \ + --store-decisions \ + --export-learnings +``` + +```bash +# Agent 2: Test Engineer (receives notification) +# STEP 1: Check memory for API details +npx Codex-flow hook session-restore \ + --session-id "swarm-current" \ + --restore-memory + +# Memory contains: swarm/backend/auth-api with implementation details + +# STEP 2: Generate tests +npx Codex-flow hook pre-task \ + --description "Write tests for auth API" \ + --load-memory + +# STEP 3: Create test file +npx Codex-flow hook post-edit \ + --file "api/auth.test.js" \ + --memory-key "swarm/testing/auth-api-tests" \ + --train-patterns + +# STEP 4: Share test results +npx Codex-flow hook notify \ + --message "Auth API tests complete - 100% coverage" \ + --broadcast +``` + +### Custom Hook Creation + +Create custom hooks for specific workflows: + +#### Custom Hook Template + +```javascript +// .Codex/hooks/custom-quality-check.js + +module.exports = { + name: 'custom-quality-check', + type: 'pre', + matcher: /\.(ts|js)$/, + + async execute(context) { + const { file, content } = context; + + // Custom validation logic + const complexity = await analyzeComplexity(content); + const securityIssues = await scanSecurity(content); + + // Store in memory + await storeInMemory({ + key: `quality/${file}`, + value: { complexity, securityIssues } + }); + + // Return decision + if (complexity > 15 || securityIssues.length > 0) { + return { + continue: false, + reason: 'Quality checks failed', + warnings: [ + `Complexity: ${complexity} (max: 15)`, + `Security issues: ${securityIssues.length}` + ] + }; + } + + return { + continue: true, + reason: 'Quality checks passed', + metadata: { complexity, securityIssues: 0 } + }; + } +}; +``` + +#### Register Custom Hook + +```json +{ + "hooks": { + "PreToolUse": [ + { + "matcher": "^(Write|Edit)$", + "hooks": [ + { + "type": "script", + "script": ".Codex/hooks/custom-quality-check.js" + } + ] + } + ] + } +} +``` + +### Real-World Examples + +#### Example 1: Full-Stack Development Workflow + +```bash +# Session start - initialize coordination +npx Codex-flow hook session-start --session-id "fullstack-feature" + +# Pre-task planning +npx Codex-flow hook pre-task \ + --description "Build user profile feature - frontend + backend + tests" \ + --auto-spawn-agents \ + --optimize-topology + +# Backend work +npx Codex-flow hook pre-edit --file "api/profile.js" +# ... implement backend ... +npx Codex-flow hook post-edit \ + --file "api/profile.js" \ + --memory-key "profile/backend" \ + --train-patterns + +# Frontend work (reads backend details from memory) +npx Codex-flow hook pre-edit --file "components/Profile.jsx" +# ... implement frontend ... +npx Codex-flow hook post-edit \ + --file "components/Profile.jsx" \ + --memory-key "profile/frontend" \ + --train-patterns + +# Testing (reads both backend and frontend from memory) +npx Codex-flow hook pre-task \ + --description "Test profile feature" \ + --load-memory + +# Session end - export everything +npx Codex-flow hook session-end \ + --session-id "fullstack-feature" \ + --export-metrics \ + --generate-summary +``` + +#### Example 2: Debugging with Hooks + +```bash +# Start debugging session +npx Codex-flow hook session-start --session-id "debug-memory-leak" + +# Pre-task: analyze issue +npx Codex-flow hook pre-task \ + --description "Debug memory leak in event handlers" \ + --load-memory \ + --estimate-complexity + +# Search for event emitters +npx Codex-flow hook pre-search --query "EventEmitter" +# ... search executes ... +npx Codex-flow hook post-search \ + --query "EventEmitter" \ + --cache-results + +# Fix the issue +npx Codex-flow hook pre-edit \ + --file "services/events.js" \ + --backup-file +# ... fix code ... +npx Codex-flow hook post-edit \ + --file "services/events.js" \ + --memory-key "debug/memory-leak-fix" \ + --validate-output + +# Verify fix +npx Codex-flow hook post-task \ + --task-id "memory-leak-fix" \ + --analyze-performance \ + --generate-report + +# End session +npx Codex-flow hook session-end \ + --session-id "debug-memory-leak" \ + --export-metrics +``` + +#### Example 3: Multi-Agent Refactoring + +```bash +# Initialize swarm for refactoring +npx Codex-flow hook pre-task \ + --description "Refactor legacy codebase to modern patterns" \ + --auto-spawn-agents \ + --optimize-topology + +# Agent 1: Code Analyzer +npx Codex-flow hook pre-task --description "Analyze code complexity" +# ... analysis ... +npx Codex-flow hook post-task \ + --task-id "analysis" \ + --store-decisions + +# Agent 2: Refactoring (reads analysis from memory) +npx Codex-flow hook session-restore \ + --session-id "swarm-refactor" \ + --restore-memory + +for file in src/**/*.js; do + npx Codex-flow hook pre-edit --file "$file" --backup-file + # ... refactor ... + npx Codex-flow hook post-edit \ + --file "$file" \ + --memory-key "refactor/$file" \ + --auto-format \ + --train-patterns +done + +# Agent 3: Testing (reads refactored code from memory) +npx Codex-flow hook pre-task \ + --description "Generate tests for refactored code" \ + --load-memory + +# Broadcast completion +npx Codex-flow hook notify \ + --message "Refactoring complete - all tests passing" \ + --broadcast +``` + +### Performance Tips + +1. **Keep Hooks Lightweight** - Target < 100ms execution time +2. **Use Async for Heavy Operations** - Don't block the main flow +3. **Cache Aggressively** - Store frequently accessed data +4. **Batch Related Operations** - Combine multiple actions +5. **Use Memory Wisely** - Set appropriate TTLs +6. **Monitor Hook Performance** - Track execution times +7. **Parallelize When Possible** - Run independent hooks concurrently + +### Debugging Hooks + +Enable debug mode for troubleshooting: + +```bash +# Enable debug output +export CLAUDE_FLOW_DEBUG=true + +# Test specific hook with verbose output +npx Codex-flow hook pre-edit --file "test.js" --debug + +# Check hook execution logs +cat .Codex-flow/logs/hooks-$(date +%Y-%m-%d).log + +# Validate configuration +npx Codex-flow hook validate-config +``` + +### Benefits + +- **Automatic Agent Assignment**: Right agent for every file type +- **Consistent Code Formatting**: Language-specific formatters +- **Continuous Learning**: Neural patterns improve over time +- **Cross-Session Memory**: Context persists between sessions +- **Performance Tracking**: Comprehensive metrics and analytics +- **Automatic Coordination**: Agents sync via memory +- **Smart Agent Spawning**: Task-based agent selection +- **Quality Gates**: Pre-commit validation and verification +- **Error Prevention**: Syntax validation before edits +- **Knowledge Sharing**: Decisions stored and shared +- **Reduced Manual Work**: Automation of repetitive tasks +- **Better Collaboration**: Seamless multi-agent coordination + +### Best Practices + +1. **Configure Hooks Early** - Set up during project initialization +2. **Use Memory Keys Strategically** - Organize with clear namespaces +3. **Enable Auto-Formatting** - Maintain code consistency +4. **Train Patterns Continuously** - Learn from successful operations +5. **Monitor Performance** - Track hook execution times +6. **Validate Configuration** - Test hooks before production use +7. **Document Custom Hooks** - Maintain hook documentation +8. **Set Appropriate Timeouts** - Prevent hanging operations +9. **Handle Errors Gracefully** - Use continueOnError when appropriate +10. **Review Metrics Regularly** - Optimize based on usage patterns + +### Troubleshooting + +#### Hooks Not Executing +- Verify `.Codex/settings.json` syntax +- Check hook matcher patterns +- Enable debug mode +- Review permission settings +- Ensure Codex-flow CLI is in PATH + +#### Hook Timeouts +- Increase timeout values in configuration +- Make hooks asynchronous for heavy operations +- Optimize hook logic +- Check network connectivity for MCP tools + +#### Memory Issues +- Set appropriate TTLs for memory keys +- Clean up old memory entries +- Use memory namespaces effectively +- Monitor memory usage + +#### Performance Problems +- Profile hook execution times +- Use caching for repeated operations +- Batch operations when possible +- Reduce hook complexity + +### Related Commands + +- `npx Codex-flow init --hooks` - Initialize hooks system +- `npx Codex-flow hook --list` - List available hooks +- `npx Codex-flow hook --test ` - Test specific hook +- `npx Codex-flow memory usage` - Manage memory +- `npx Codex-flow agent spawn` - Spawn agents +- `npx Codex-flow swarm init` - Initialize swarm + +### Integration with Other Skills + +This skill works seamlessly with: +- **SPARC Methodology** - Hooks enhance SPARC workflows +- **Pair Programming** - Automated quality in pairing sessions +- **Verification Quality** - Truth-score validation in hooks +- **GitHub Workflows** - Git integration for commits/PRs +- **Performance Analysis** - Metrics collection in hooks +- **Swarm Advanced** - Multi-agent coordination via hooks diff --git a/.agents/skills/pair-programming/SKILL.md b/.agents/skills/pair-programming/SKILL.md new file mode 100644 index 0000000..a76e93c --- /dev/null +++ b/.agents/skills/pair-programming/SKILL.md @@ -0,0 +1,1202 @@ +--- +name: Pair Programming +description: AI-assisted pair programming with multiple modes (driver/navigator/switch), real-time verification, quality monitoring, and comprehensive testing. Supports TDD, debugging, refactoring, and learning sessions. Features automatic role switching, continuous code review, security scanning, and performance optimization with truth-score verification. +--- + +# Pair Programming + +Collaborative AI pair programming with intelligent role management, real-time quality monitoring, and comprehensive development workflows. + +## What This Skill Does + +This skill provides professional pair programming capabilities with AI assistance, supporting multiple collaboration modes, continuous verification, and integrated testing. It manages driver/navigator roles, performs real-time code review, tracks quality metrics, and ensures high standards through truth-score verification. + +**Key Capabilities:** +- **Multiple Modes**: Driver, Navigator, Switch, TDD, Review, Mentor, Debug +- **Real-Time Verification**: Automatic quality scoring with rollback on failures +- **Role Management**: Seamless switching between driver/navigator roles +- **Testing Integration**: Auto-generate tests, track coverage, continuous testing +- **Code Review**: Security scanning, performance analysis, best practice enforcement +- **Session Persistence**: Auto-save, recovery, export, and sharing + +## Prerequisites + +**Required:** +- Codex Flow CLI installed (`npm install -g Codex-flow@alpha`) +- Git repository (optional but recommended) + +**Recommended:** +- Testing framework (Jest, pytest, etc.) +- Linter configured (ESLint, pylint, etc.) +- Code formatter (Prettier, Black, etc.) + +## Quick Start + +### Basic Session +```bash +# Start simple pair programming +Codex-flow pair --start +``` + +### TDD Session +```bash +# Test-driven development +Codex-flow pair --start \ + --mode tdd \ + --test-first \ + --coverage 90 +``` + +--- + +## Complete Guide + +### Session Control Commands + +#### Starting Sessions +```bash +# Basic start +Codex-flow pair --start + +# Expert refactoring session +Codex-flow pair --start \ + --agent senior-dev \ + --focus refactor \ + --verify \ + --threshold 0.98 + +# Debugging session +Codex-flow pair --start \ + --agent debugger-expert \ + --focus debug \ + --review + +# Learning session +Codex-flow pair --start \ + --mode mentor \ + --pace slow \ + --examples +``` + +#### Session Management +```bash +# Check status +Codex-flow pair --status + +# View history +Codex-flow pair --history + +# Pause session +/pause [--reason ] + +# Resume session +/resume + +# End session +Codex-flow pair --end [--save] [--report] +``` + +### Available Modes + +#### Driver Mode +You write code while AI provides guidance. + +```bash +Codex-flow pair --start --mode driver +``` + +**Your Responsibilities:** +- Write actual code +- Implement solutions +- Make immediate decisions +- Handle syntax and structure + +**AI Navigator:** +- Strategic guidance +- Spot potential issues +- Suggest improvements +- Real-time review +- Track overall direction + +**Best For:** +- Learning new patterns +- Implementing familiar features +- Quick iterations +- Hands-on debugging + +**Commands:** +``` +/suggest - Get implementation suggestions +/review - Request code review +/explain - Ask for explanations +/optimize - Request optimization ideas +/patterns - Get pattern recommendations +``` + +#### Navigator Mode +AI writes code while you provide direction. + +```bash +Codex-flow pair --start --mode navigator +``` + +**Your Responsibilities:** +- Provide high-level direction +- Review generated code +- Make architectural decisions +- Ensure business requirements + +**AI Driver:** +- Write implementation code +- Handle syntax details +- Implement your guidance +- Manage boilerplate +- Execute refactoring + +**Best For:** +- Rapid prototyping +- Boilerplate generation +- Learning from AI patterns +- Exploring solutions + +**Commands:** +``` +/implement - Direct implementation +/refactor - Request refactoring +/test - Generate tests +/document - Add documentation +/alternate - See alternative approaches +``` + +#### Switch Mode +Automatically alternates roles at intervals. + +```bash +# Default 10-minute intervals +Codex-flow pair --start --mode switch + +# 5-minute intervals (rapid) +Codex-flow pair --start --mode switch --interval 5m + +# 15-minute intervals (deep focus) +Codex-flow pair --start --mode switch --interval 15m +``` + +**Handoff Process:** +1. 30-second warning before switch +2. Current driver completes thought +3. Context summary generated +4. Roles swap smoothly +5. New driver continues + +**Best For:** +- Balanced collaboration +- Knowledge sharing +- Complex features +- Extended sessions + +#### Specialized Modes + +**TDD Mode** - Test-Driven Development: +```bash +Codex-flow pair --start \ + --mode tdd \ + --test-first \ + --coverage 100 +``` +Workflow: Write failing test → Implement → Refactor → Repeat + +**Review Mode** - Continuous code review: +```bash +Codex-flow pair --start \ + --mode review \ + --strict \ + --security +``` +Features: Real-time feedback, security scanning, performance analysis + +**Mentor Mode** - Learning-focused: +```bash +Codex-flow pair --start \ + --mode mentor \ + --explain-all \ + --pace slow +``` +Features: Detailed explanations, step-by-step guidance, pattern teaching + +**Debug Mode** - Problem-solving: +```bash +Codex-flow pair --start \ + --mode debug \ + --verbose \ + --trace +``` +Features: Issue identification, root cause analysis, fix suggestions + +### In-Session Commands + +#### Code Commands +``` +/explain [--level basic|detailed|expert] + Explain the current code or selection + +/suggest [--type refactor|optimize|security|style] + Get improvement suggestions + +/implement + Request implementation (navigator mode) + +/refactor [--pattern ] [--scope function|file|module] + Refactor selected code + +/optimize [--target speed|memory|both] + Optimize code for performance + +/document [--format jsdoc|markdown|inline] + Add documentation to code + +/comment [--verbose] + Add inline comments + +/pattern [--example] + Apply a design pattern +``` + +#### Testing Commands +``` +/test [--watch] [--coverage] [--only ] + Run test suite + +/test-gen [--type unit|integration|e2e] + Generate tests for current code + +/coverage [--report html|json|terminal] + Check test coverage + +/mock [--realistic] + Generate mock data or functions + +/test-watch [--on-save] + Enable test watching + +/snapshot [--update] + Create test snapshots +``` + +#### Review Commands +``` +/review [--scope current|file|changes] [--strict] + Perform code review + +/security [--deep] [--fix] + Security analysis + +/perf [--profile] [--suggestions] + Performance analysis + +/quality [--detailed] + Check code quality metrics + +/lint [--fix] [--config ] + Run linters + +/complexity [--threshold ] + Analyze code complexity +``` + +#### Navigation Commands +``` +/goto [:line[:column]] + Navigate to file or location + +/find [--regex] [--case-sensitive] + Search in project + +/recent [--limit ] + Show recent files + +/bookmark [add|list|goto|remove] [] + Manage bookmarks + +/history [--limit ] [--filter ] + Show command history + +/tree [--depth ] [--filter ] + Show project structure +``` + +#### Git Commands +``` +/diff [--staged] [--file ] + Show git diff + +/commit [--message ] [--amend] + Commit with verification + +/branch [create|switch|delete|list] [] + Branch operations + +/stash [save|pop|list|apply] [] + Stash operations + +/log [--oneline] [--limit ] + View git log + +/blame [] + Show git blame +``` + +#### AI Partner Commands +``` +/agent [switch|info|config] [] + Manage AI agent + +/teach + Teach the AI your preferences + +/feedback [positive|negative] + Provide feedback to AI + +/personality [professional|friendly|concise|verbose] + Adjust AI personality + +/expertise [add|remove|list] [] + Set AI expertise focus +``` + +#### Metrics Commands +``` +/metrics [--period today|session|week|all] + Show session metrics + +/score [--breakdown] + Show quality scores + +/productivity [--chart] + Show productivity metrics + +/leaderboard [--personal|team] + Show improvement leaderboard +``` + +#### Role & Mode Commands +``` +/switch [--immediate] + Switch driver/navigator roles + +/mode + Change mode (driver|navigator|switch|tdd|review|mentor|debug) + +/role + Show current role + +/handoff + Prepare role handoff +``` + +### Command Shortcuts + +| Alias | Full Command | +|-------|-------------| +| `/s` | `/suggest` | +| `/e` | `/explain` | +| `/t` | `/test` | +| `/r` | `/review` | +| `/c` | `/commit` | +| `/g` | `/goto` | +| `/f` | `/find` | +| `/h` | `/help` | +| `/sw` | `/switch` | +| `/st` | `/status` | + +### Configuration + +#### Basic Configuration +Create `.Codex-flow/pair-config.json`: + +```json +{ + "pair": { + "enabled": true, + "defaultMode": "switch", + "defaultAgent": "auto", + "autoStart": false, + "theme": "professional" + } +} +``` + +#### Complete Configuration + +```json +{ + "pair": { + "general": { + "enabled": true, + "defaultMode": "switch", + "defaultAgent": "senior-dev", + "language": "javascript", + "timezone": "UTC" + }, + + "modes": { + "driver": { + "enabled": true, + "suggestions": true, + "realTimeReview": true, + "autoComplete": false + }, + "navigator": { + "enabled": true, + "codeGeneration": true, + "explanations": true, + "alternatives": true + }, + "switch": { + "enabled": true, + "interval": "10m", + "warning": "30s", + "autoSwitch": true, + "pauseOnIdle": true + } + }, + + "verification": { + "enabled": true, + "threshold": 0.95, + "autoRollback": true, + "preCommitCheck": true, + "continuousMonitoring": true, + "blockOnFailure": true + }, + + "testing": { + "enabled": true, + "autoRun": true, + "framework": "jest", + "onSave": true, + "coverage": { + "enabled": true, + "minimum": 80, + "enforce": true, + "reportFormat": "html" + } + }, + + "review": { + "enabled": true, + "continuous": true, + "preCommit": true, + "security": true, + "performance": true, + "style": true, + "complexity": { + "maxComplexity": 10, + "maxDepth": 4, + "maxLines": 100 + } + }, + + "git": { + "enabled": true, + "autoCommit": false, + "commitTemplate": "feat: {message}", + "signCommits": false, + "pushOnEnd": false, + "branchProtection": true + }, + + "session": { + "autoSave": true, + "saveInterval": "5m", + "maxDuration": "4h", + "idleTimeout": "15m", + "breakReminder": "45m", + "metricsInterval": "1m" + }, + + "ai": { + "model": "advanced", + "temperature": 0.7, + "maxTokens": 4000, + "personality": "professional", + "expertise": ["backend", "testing", "security"], + "learningEnabled": true + } + } +} +``` + +#### Built-in Agents + +```json +{ + "agents": { + "senior-dev": { + "expertise": ["architecture", "patterns", "optimization"], + "style": "thorough", + "reviewLevel": "strict" + }, + "tdd-specialist": { + "expertise": ["testing", "mocks", "coverage"], + "style": "test-first", + "reviewLevel": "comprehensive" + }, + "debugger-expert": { + "expertise": ["debugging", "profiling", "tracing"], + "style": "analytical", + "reviewLevel": "focused" + }, + "junior-dev": { + "expertise": ["learning", "basics", "documentation"], + "style": "questioning", + "reviewLevel": "educational" + } + } +} +``` + +#### CLI Configuration +```bash +# Set configuration +Codex-flow pair config set defaultMode switch +Codex-flow pair config set verification.threshold 0.98 + +# Get configuration +Codex-flow pair config get +Codex-flow pair config get defaultMode + +# Export/Import +Codex-flow pair config export > config.json +Codex-flow pair config import config.json + +# Reset +Codex-flow pair config reset +``` + +#### Profile Management + +Create reusable profiles: + +```bash +# Create profile +Codex-flow pair profile create refactoring \ + --mode driver \ + --verify true \ + --threshold 0.98 \ + --focus refactor + +# Use profile +Codex-flow pair --start --profile refactoring + +# List profiles +Codex-flow pair profile list +``` + +Profile configuration: +```json +{ + "profiles": { + "refactoring": { + "mode": "driver", + "verification": { + "enabled": true, + "threshold": 0.98 + }, + "focus": "refactor" + }, + "debugging": { + "mode": "navigator", + "agent": "debugger-expert", + "trace": true, + "verbose": true + }, + "learning": { + "mode": "mentor", + "pace": "slow", + "explanations": "detailed", + "examples": true + } + } +} +``` + +### Real-World Examples + +#### Example 1: Feature Implementation + +Implementing user authentication with JWT tokens: + +```bash +# Session setup +Codex-flow pair --start \ + --mode switch \ + --agent senior-dev \ + --focus implement \ + --verify \ + --test +``` + +**Session Flow:** +``` +👥 Starting pair programming for authentication feature... + +[DRIVER: You - 10 minutes] +/explain JWT authentication flow +> AI explains JWT concepts and best practices + +/suggest implementation approach +> AI suggests using middleware pattern with refresh tokens + +# You write the basic auth middleware structure + +[SWITCH TO NAVIGATOR] + +[NAVIGATOR: AI - 10 minutes] +/implement JWT token generation with refresh tokens +> AI generates secure token implementation + +/test-gen +> AI creates comprehensive test suite + +[SWITCH TO DRIVER] + +[DRIVER: You - 10 minutes] +# You refine the implementation +/review --security +> AI performs security review, suggests improvements + +/commit --message "feat: JWT authentication with refresh tokens" +✅ Truth Score: 0.98 - Committed successfully +``` + +#### Example 2: Bug Fixing + +Debugging a memory leak in Node.js: + +```bash +# Session setup +Codex-flow pair --start \ + --mode navigator \ + --agent debugger-expert \ + --focus debug \ + --trace +``` + +**Session Flow:** +``` +👥 Starting debugging session... + +/status +> Analyzing application for memory issues... + +/perf --profile +> Memory usage growing: 150MB → 450MB over 10 minutes + +/find "new EventEmitter" --regex +> Found 3 instances of EventEmitter creation + +/inspect eventEmitters --deep +> Discovering listeners not being removed + +/suggest fix for memory leak +> AI suggests: "Add removeListener in cleanup functions" + +/implement cleanup functions for all event emitters +> AI generates proper cleanup code + +/test +> Memory stable at 150MB ✅ + +/commit --message "fix: memory leak in event emitters" +``` + +#### Example 3: TDD Session + +Building shopping cart with test-driven development: + +```bash +# Session setup +Codex-flow pair --start \ + --mode tdd \ + --agent tdd-specialist \ + --test-first +``` + +**Session Flow:** +``` +👥 TDD Session: Shopping Cart Feature + +[RED PHASE] +/test-gen "add item to cart" +> AI writes failing test: + ✗ should add item to cart + ✗ should update quantity for existing item + ✗ should calculate total price + +[GREEN PHASE] +/implement minimal cart functionality +> You write just enough code to pass tests + +/test +> Tests passing: 3/3 ✅ + +[REFACTOR PHASE] +/refactor --pattern repository +> AI refactors to repository pattern + +/test +> Tests still passing: 3/3 ✅ + +[NEXT CYCLE] +/test-gen "remove item from cart" +> AI writes new failing tests... +``` + +#### Example 4: Code Refactoring + +Modernizing legacy code: + +```bash +# Session setup +Codex-flow pair --start \ + --mode driver \ + --focus refactor \ + --verify \ + --threshold 0.98 +``` + +**Session Flow:** +``` +👥 Refactoring Session: Modernizing UserService + +/analyze UserService.js +> AI identifies: + - Callback hell (5 levels deep) + - No error handling + - Tight coupling + - No tests + +/suggest refactoring plan +> AI suggests: + 1. Convert callbacks to async/await + 2. Add error boundaries + 3. Extract dependencies + 4. Add unit tests + +/test-gen --before-refactor +> AI generates tests for current behavior + +/refactor callbacks to async/await +# You refactor with AI guidance + +/test +> All tests passing ✅ + +/review --compare +> AI shows before/after comparison +> Code complexity: 35 → 12 +> Truth score: 0.99 ✅ + +/commit --message "refactor: modernize UserService with async/await" +``` + +#### Example 5: Performance Optimization + +Optimizing slow React application: + +```bash +# Session setup +Codex-flow pair --start \ + --mode switch \ + --agent performance-expert \ + --focus optimize \ + --profile +``` + +**Session Flow:** +``` +👥 Performance Optimization Session + +/perf --profile +> React DevTools Profiler Results: + - ProductList: 450ms render + - CartSummary: 200ms render + - Unnecessary re-renders: 15 + +/suggest optimizations for ProductList +> AI suggests: + 1. Add React.memo + 2. Use useMemo for expensive calculations + 3. Implement virtualization for long lists + +/implement React.memo and useMemo +# You implement with AI guidance + +/perf --profile +> ProductList: 45ms render (90% improvement!) ✅ + +/implement virtualization with react-window +> AI implements virtual scrolling + +/perf --profile +> ProductList: 12ms render (97% improvement!) ✅ +> FPS: 60 stable ✅ + +/commit --message "perf: optimize ProductList with memoization and virtualization" +``` + +#### Example 6: API Development + +Building RESTful API with Express: + +```bash +# Session setup +Codex-flow pair --start \ + --mode navigator \ + --agent backend-expert \ + --focus implement \ + --test +``` + +**Session Flow:** +``` +👥 API Development Session + +/design REST API for blog platform +> AI designs endpoints: + POST /api/posts + GET /api/posts + GET /api/posts/:id + PUT /api/posts/:id + DELETE /api/posts/:id + +/implement CRUD endpoints with validation +> AI implements with Express + Joi validation + +/test-gen --integration +> AI generates integration tests + +/security --api +> AI adds: + - Rate limiting + - Input sanitization + - JWT authentication + - CORS configuration + +/document --openapi +> AI generates OpenAPI documentation + +/test --integration +> All endpoints tested: 15/15 ✅ +``` + +### Session Templates + +#### Quick Start Templates + +```bash +# Refactoring template +Codex-flow pair --template refactor +# Focus: Code improvement +# Verification: High (0.98) +# Testing: After each change +# Review: Continuous + +# Feature template +Codex-flow pair --template feature +# Focus: Implementation +# Verification: Standard (0.95) +# Testing: On completion +# Review: Pre-commit + +# Debug template +Codex-flow pair --template debug +# Focus: Problem solving +# Verification: Moderate (0.90) +# Testing: Regression tests +# Review: Root cause + +# Learning template +Codex-flow pair --template learn +# Mode: Mentor +# Pace: Slow +# Explanations: Detailed +# Examples: Many +``` + +### Session Management + +#### Session Status + +```bash +Codex-flow pair --status +``` + +**Output:** +``` +👥 Pair Programming Session +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Session ID: pair_1755021234567 +Duration: 45 minutes +Status: Active + +Partner: senior-dev +Current Role: DRIVER (you) +Mode: Switch (10m intervals) +Next Switch: in 3 minutes + +📊 Metrics: +├── Truth Score: 0.982 ✅ +├── Lines Changed: 234 +├── Files Modified: 5 +├── Tests Added: 12 +├── Coverage: 87% ↑3% +└── Commits: 3 + +🎯 Focus: Implementation +📝 Current File: src/auth/login.js +``` + +#### Session History + +```bash +Codex-flow pair --history +``` + +**Output:** +``` +📚 Session History +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +1. 2024-01-15 14:30 - 16:45 (2h 15m) + Partner: expert-coder + Focus: Refactoring + Truth Score: 0.975 + Changes: +340 -125 lines + +2. 2024-01-14 10:00 - 11:30 (1h 30m) + Partner: tdd-specialist + Focus: Testing + Truth Score: 0.991 + Tests Added: 24 + +3. 2024-01-13 15:00 - 17:00 (2h) + Partner: debugger-expert + Focus: Bug Fixing + Truth Score: 0.968 + Issues Fixed: 5 +``` + +#### Session Persistence + +```bash +# Save session +Codex-flow pair --save [--name ] + +# Load session +Codex-flow pair --load + +# Export session +Codex-flow pair --export [--format json|md] + +# Generate report +Codex-flow pair --report +``` + +#### Background Sessions + +```bash +# Start in background +Codex-flow pair --start --background + +# Monitor background session +Codex-flow pair --monitor + +# Attach to background session +Codex-flow pair --attach + +# End background session +Codex-flow pair --end +``` + +### Advanced Features + +#### Custom Commands + +Define in configuration: + +```json +{ + "customCommands": { + "tdd": "/test-gen && /test --watch", + "full-review": "/lint --fix && /test && /review --strict", + "quick-fix": "/suggest --type fix && /implement && /test" + } +} +``` + +Use custom commands: +``` +/custom tdd +/custom full-review +``` + +#### Command Chaining + +``` +/test && /commit && /push +/lint --fix && /test && /review --strict +``` + +#### Session Recording + +```bash +# Start with recording +Codex-flow pair --start --record + +# Replay session +Codex-flow pair --replay + +# Session analytics +Codex-flow pair --analytics +``` + +#### Integration Options + +**With Git:** +```bash +Codex-flow pair --start --git --auto-commit +``` + +**With CI/CD:** +```bash +Codex-flow pair --start --ci --non-interactive +``` + +**With IDE:** +```bash +Codex-flow pair --start --ide vscode +``` + +### Best Practices + +#### Session Practices +1. **Clear Goals** - Define session objectives upfront +2. **Appropriate Mode** - Choose based on task type +3. **Enable Verification** - For critical code paths +4. **Regular Testing** - Maintain quality continuously +5. **Session Notes** - Document important decisions +6. **Regular Breaks** - Take breaks every 45-60 minutes + +#### Code Practices +1. **Test Early** - Run tests after each change +2. **Verify Before Commit** - Check truth scores +3. **Review Security** - Always for sensitive code +4. **Profile Performance** - Use `/perf` for optimization +5. **Save Sessions** - For complex work +6. **Learn from AI** - Ask questions frequently + +#### Mode Selection +- **Driver Mode**: When learning, controlling implementation +- **Navigator Mode**: For rapid prototyping, generation +- **Switch Mode**: Long sessions, balanced collaboration +- **TDD Mode**: Building with tests +- **Review Mode**: Quality focus +- **Mentor Mode**: Learning priority +- **Debug Mode**: Fixing issues + +### Troubleshooting + +#### Session Won't Start +- Check agent availability +- Verify configuration file syntax +- Ensure clean workspace +- Review log files + +#### Session Disconnected +- Use `--recover` to restore +- Check network connection +- Verify background processes +- Review auto-save files + +#### Poor Performance +- Reduce verification threshold +- Disable continuous testing +- Check system resources +- Use lighter AI model + +#### Configuration Issues +- Validate JSON syntax +- Check file permissions +- Review priority order (CLI > env > project > user > global) +- Run `Codex-flow pair config validate` + +### Quality Metrics + +#### Truth Score Thresholds +``` +Error: < 0.90 ❌ +Warning: 0.90 - 0.95 ⚠️ +Good: 0.95 - 0.98 ✅ +Excellent: > 0.98 🌟 +``` + +#### Coverage Thresholds +``` +Error: < 70% ❌ +Warning: 70% - 80% ⚠️ +Good: 80% - 90% ✅ +Excellent: > 90% 🌟 +``` + +#### Complexity Thresholds +``` +Error: > 15 ❌ +Warning: 10 - 15 ⚠️ +Good: 5 - 10 ✅ +Excellent: < 5 🌟 +``` + +### Environment Variables + +Override configuration via environment: + +```bash +export CLAUDE_PAIR_MODE=driver +export CLAUDE_PAIR_VERIFY=true +export CLAUDE_PAIR_THRESHOLD=0.98 +export CLAUDE_PAIR_AGENT=senior-dev +export CLAUDE_PAIR_AUTO_TEST=true +``` + +### Command History + +Navigate history: +- `↑/↓` - Navigate through command history +- `Ctrl+R` - Search command history +- `!!` - Repeat last command +- `!` - Run command n from history + +### Keyboard Shortcuts (Configurable) + +Default shortcuts: +```json +{ + "shortcuts": { + "switch": "ctrl+shift+s", + "suggest": "ctrl+space", + "review": "ctrl+r", + "test": "ctrl+t" + } +} +``` + +### Related Commands + +- `Codex-flow pair --help` - Show help +- `Codex-flow pair config` - Manage configuration +- `Codex-flow pair profile` - Manage profiles +- `Codex-flow pair templates` - List templates +- `Codex-flow pair agents` - List available agents diff --git a/.agents/skills/reasoningbank-agentdb/SKILL.md b/.agents/skills/reasoningbank-agentdb/SKILL.md new file mode 100644 index 0000000..08add6f --- /dev/null +++ b/.agents/skills/reasoningbank-agentdb/SKILL.md @@ -0,0 +1,446 @@ +--- +name: "ReasoningBank with AgentDB" +description: "Implement ReasoningBank adaptive learning with AgentDB's 150x faster vector database. Includes trajectory tracking, verdict judgment, memory distillation, and pattern recognition. Use when building self-learning agents, optimizing decision-making, or implementing experience replay systems." +--- + +# ReasoningBank with AgentDB + +## What This Skill Does + +Provides ReasoningBank adaptive learning patterns using AgentDB's high-performance backend (150x-12,500x faster). Enables agents to learn from experiences, judge outcomes, distill memories, and improve decision-making over time with 100% backward compatibility. + +**Performance**: 150x faster pattern retrieval, 500x faster batch operations, <1ms memory access. + +## Prerequisites + +- Node.js 18+ +- AgentDB v1.0.7+ (via agentic-flow) +- Understanding of reinforcement learning concepts (optional) + +--- + +## Quick Start with CLI + +### Initialize ReasoningBank Database + +```bash +# Initialize AgentDB for ReasoningBank +npx agentdb@latest init ./.agentdb/reasoningbank.db --dimension 1536 + +# Start MCP server for Codex integration +npx agentdb@latest mcp +Codex mcp add agentdb npx agentdb@latest mcp +``` + +### Migrate from Legacy ReasoningBank + +```bash +# Automatic migration with validation +npx agentdb@latest migrate --source .swarm/memory.db + +# Verify migration +npx agentdb@latest stats ./.agentdb/reasoningbank.db +``` + +--- + +## Quick Start with API + +```typescript +import { createAgentDBAdapter, computeEmbedding } from 'agentic-flow/reasoningbank'; + +// Initialize ReasoningBank with AgentDB +const rb = await createAgentDBAdapter({ + dbPath: '.agentdb/reasoningbank.db', + enableLearning: true, // Enable learning plugins + enableReasoning: true, // Enable reasoning agents + cacheSize: 1000, // 1000 pattern cache +}); + +// Store successful experience +const query = "How to optimize database queries?"; +const embedding = await computeEmbedding(query); + +await rb.insertPattern({ + id: '', + type: 'experience', + domain: 'database-optimization', + pattern_data: JSON.stringify({ + embedding, + pattern: { + query, + approach: 'indexing + query optimization', + outcome: 'success', + metrics: { latency_reduction: 0.85 } + } + }), + confidence: 0.95, + usage_count: 1, + success_count: 1, + created_at: Date.now(), + last_used: Date.now(), +}); + +// Retrieve similar experiences with reasoning +const result = await rb.retrieveWithReasoning(embedding, { + domain: 'database-optimization', + k: 5, + useMMR: true, // Diverse results + synthesizeContext: true, // Rich context synthesis +}); + +console.log('Memories:', result.memories); +console.log('Context:', result.context); +console.log('Patterns:', result.patterns); +``` + +--- + +## Core ReasoningBank Concepts + +### 1. Trajectory Tracking + +Track agent execution paths and outcomes: + +```typescript +// Record trajectory (sequence of actions) +const trajectory = { + task: 'optimize-api-endpoint', + steps: [ + { action: 'analyze-bottleneck', result: 'found N+1 query' }, + { action: 'add-eager-loading', result: 'reduced queries' }, + { action: 'add-caching', result: 'improved latency' } + ], + outcome: 'success', + metrics: { latency_before: 2500, latency_after: 150 } +}; + +const embedding = await computeEmbedding(JSON.stringify(trajectory)); + +await rb.insertPattern({ + id: '', + type: 'trajectory', + domain: 'api-optimization', + pattern_data: JSON.stringify({ embedding, pattern: trajectory }), + confidence: 0.9, + usage_count: 1, + success_count: 1, + created_at: Date.now(), + last_used: Date.now(), +}); +``` + +### 2. Verdict Judgment + +Judge whether a trajectory was successful: + +```typescript +// Retrieve similar past trajectories +const similar = await rb.retrieveWithReasoning(queryEmbedding, { + domain: 'api-optimization', + k: 10, +}); + +// Judge based on similarity to successful patterns +const verdict = similar.memories.filter(m => + m.pattern.outcome === 'success' && + m.similarity > 0.8 +).length > 5 ? 'likely_success' : 'needs_review'; + +console.log('Verdict:', verdict); +console.log('Confidence:', similar.memories[0]?.similarity || 0); +``` + +### 3. Memory Distillation + +Consolidate similar experiences into patterns: + +```typescript +// Get all experiences in domain +const experiences = await rb.retrieveWithReasoning(embedding, { + domain: 'api-optimization', + k: 100, + optimizeMemory: true, // Automatic consolidation +}); + +// Distill into high-level pattern +const distilledPattern = { + domain: 'api-optimization', + pattern: 'For N+1 queries: add eager loading, then cache', + success_rate: 0.92, + sample_size: experiences.memories.length, + confidence: 0.95 +}; + +await rb.insertPattern({ + id: '', + type: 'distilled-pattern', + domain: 'api-optimization', + pattern_data: JSON.stringify({ + embedding: await computeEmbedding(JSON.stringify(distilledPattern)), + pattern: distilledPattern + }), + confidence: 0.95, + usage_count: 0, + success_count: 0, + created_at: Date.now(), + last_used: Date.now(), +}); +``` + +--- + +## Integration with Reasoning Agents + +AgentDB provides 4 reasoning modules that enhance ReasoningBank: + +### 1. PatternMatcher + +Find similar successful patterns: + +```typescript +const result = await rb.retrieveWithReasoning(queryEmbedding, { + domain: 'problem-solving', + k: 10, + useMMR: true, // Maximal Marginal Relevance for diversity +}); + +// PatternMatcher returns diverse, relevant memories +result.memories.forEach(mem => { + console.log(`Pattern: ${mem.pattern.approach}`); + console.log(`Similarity: ${mem.similarity}`); + console.log(`Success Rate: ${mem.success_count / mem.usage_count}`); +}); +``` + +### 2. ContextSynthesizer + +Generate rich context from multiple memories: + +```typescript +const result = await rb.retrieveWithReasoning(queryEmbedding, { + domain: 'code-optimization', + synthesizeContext: true, // Enable context synthesis + k: 5, +}); + +// ContextSynthesizer creates coherent narrative +console.log('Synthesized Context:', result.context); +// "Based on 5 similar optimizations, the most effective approach +// involves profiling, identifying bottlenecks, and applying targeted +// improvements. Success rate: 87%" +``` + +### 3. MemoryOptimizer + +Automatically consolidate and prune: + +```typescript +const result = await rb.retrieveWithReasoning(queryEmbedding, { + domain: 'testing', + optimizeMemory: true, // Enable automatic optimization +}); + +// MemoryOptimizer consolidates similar patterns and prunes low-quality +console.log('Optimizations:', result.optimizations); +// { consolidated: 15, pruned: 3, improved_quality: 0.12 } +``` + +### 4. ExperienceCurator + +Filter by quality and relevance: + +```typescript +const result = await rb.retrieveWithReasoning(queryEmbedding, { + domain: 'debugging', + k: 20, + minConfidence: 0.8, // Only high-confidence experiences +}); + +// ExperienceCurator returns only quality experiences +result.memories.forEach(mem => { + console.log(`Confidence: ${mem.confidence}`); + console.log(`Success Rate: ${mem.success_count / mem.usage_count}`); +}); +``` + +--- + +## Legacy API Compatibility + +AgentDB maintains 100% backward compatibility with legacy ReasoningBank: + +```typescript +import { + retrieveMemories, + judgeTrajectory, + distillMemories +} from 'agentic-flow/reasoningbank'; + +// Legacy API works unchanged (uses AgentDB backend automatically) +const memories = await retrieveMemories(query, { + domain: 'code-generation', + agent: 'coder' +}); + +const verdict = await judgeTrajectory(trajectory, query); + +const newMemories = await distillMemories( + trajectory, + verdict, + query, + { domain: 'code-generation' } +); +``` + +--- + +## Performance Characteristics + +- **Pattern Search**: 150x faster (100µs vs 15ms) +- **Memory Retrieval**: <1ms (with cache) +- **Batch Insert**: 500x faster (2ms vs 1s for 100 patterns) +- **Trajectory Judgment**: <5ms (including retrieval + analysis) +- **Memory Distillation**: <50ms (consolidate 100 patterns) + +--- + +## Advanced Patterns + +### Hierarchical Memory + +Organize memories by abstraction level: + +```typescript +// Low-level: Specific implementation +await rb.insertPattern({ + type: 'concrete', + domain: 'debugging/null-pointer', + pattern_data: JSON.stringify({ + embedding, + pattern: { bug: 'NPE in UserService.getUser()', fix: 'Add null check' } + }), + confidence: 0.9, + // ... +}); + +// Mid-level: Pattern across similar cases +await rb.insertPattern({ + type: 'pattern', + domain: 'debugging', + pattern_data: JSON.stringify({ + embedding, + pattern: { category: 'null-pointer', approach: 'defensive-checks' } + }), + confidence: 0.85, + // ... +}); + +// High-level: General principle +await rb.insertPattern({ + type: 'principle', + domain: 'software-engineering', + pattern_data: JSON.stringify({ + embedding, + pattern: { principle: 'fail-fast with clear errors' } + }), + confidence: 0.95, + // ... +}); +``` + +### Multi-Domain Learning + +Transfer learning across domains: + +```typescript +// Learn from backend optimization +const backendExperience = await rb.retrieveWithReasoning(embedding, { + domain: 'backend-optimization', + k: 10, +}); + +// Apply to frontend optimization +const transferredKnowledge = backendExperience.memories.map(mem => ({ + ...mem, + domain: 'frontend-optimization', + adapted: true, +})); +``` + +--- + +## CLI Operations + +### Database Management + +```bash +# Export trajectories and patterns +npx agentdb@latest export ./.agentdb/reasoningbank.db ./backup.json + +# Import experiences +npx agentdb@latest import ./experiences.json + +# Get statistics +npx agentdb@latest stats ./.agentdb/reasoningbank.db +# Shows: total patterns, domains, confidence distribution +``` + +### Migration + +```bash +# Migrate from legacy ReasoningBank +npx agentdb@latest migrate --source .swarm/memory.db --target .agentdb/reasoningbank.db + +# Validate migration +npx agentdb@latest stats .agentdb/reasoningbank.db +``` + +--- + +## Troubleshooting + +### Issue: Migration fails +```bash +# Check source database exists +ls -la .swarm/memory.db + +# Run with verbose logging +DEBUG=agentdb:* npx agentdb@latest migrate --source .swarm/memory.db +``` + +### Issue: Low confidence scores +```typescript +// Enable context synthesis for better quality +const result = await rb.retrieveWithReasoning(embedding, { + synthesizeContext: true, + useMMR: true, + k: 10, +}); +``` + +### Issue: Memory growing too large +```typescript +// Enable automatic optimization +const result = await rb.retrieveWithReasoning(embedding, { + optimizeMemory: true, // Consolidates similar patterns +}); + +// Or manually optimize +await rb.optimize(); +``` + +--- + +## Learn More + +- **AgentDB Integration**: node_modules/agentic-flow/docs/AGENTDB_INTEGRATION.md +- **GitHub**: https://github.com/ruvnet/agentic-flow/tree/main/packages/agentdb +- **MCP Integration**: `npx agentdb@latest mcp` +- **Website**: https://agentdb.ruv.io + +--- + +**Category**: Machine Learning / Reinforcement Learning +**Difficulty**: Intermediate +**Estimated Time**: 20-30 minutes diff --git a/.agents/skills/reasoningbank-intelligence/SKILL.md b/.agents/skills/reasoningbank-intelligence/SKILL.md new file mode 100644 index 0000000..bf3e845 --- /dev/null +++ b/.agents/skills/reasoningbank-intelligence/SKILL.md @@ -0,0 +1,201 @@ +--- +name: "ReasoningBank Intelligence" +description: "Implement adaptive learning with ReasoningBank for pattern recognition, strategy optimization, and continuous improvement. Use when building self-learning agents, optimizing workflows, or implementing meta-cognitive systems." +--- + +# ReasoningBank Intelligence + +## What This Skill Does + +Implements ReasoningBank's adaptive learning system for AI agents to learn from experience, recognize patterns, and optimize strategies over time. Enables meta-cognitive capabilities and continuous improvement. + +## Prerequisites + +- agentic-flow v3.0.0-alpha.1+ +- AgentDB v3.0.0-alpha.10+ (for persistence) +- Node.js 18+ + +## Quick Start + +```typescript +import { ReasoningBank } from 'agentic-flow/reasoningbank'; + +// Initialize ReasoningBank +const rb = new ReasoningBank({ + persist: true, + learningRate: 0.1, + adapter: 'agentdb' // Use AgentDB for storage +}); + +// Record task outcome +await rb.recordExperience({ + task: 'code_review', + approach: 'static_analysis_first', + outcome: { + success: true, + metrics: { + bugs_found: 5, + time_taken: 120, + false_positives: 1 + } + }, + context: { + language: 'typescript', + complexity: 'medium' + } +}); + +// Get optimal strategy +const strategy = await rb.recommendStrategy('code_review', { + language: 'typescript', + complexity: 'high' +}); +``` + +## Core Features + +### 1. Pattern Recognition +```typescript +// Learn patterns from data +await rb.learnPattern({ + pattern: 'api_errors_increase_after_deploy', + triggers: ['deployment', 'traffic_spike'], + actions: ['rollback', 'scale_up'], + confidence: 0.85 +}); + +// Match patterns +const matches = await rb.matchPatterns(currentSituation); +``` + +### 2. Strategy Optimization +```typescript +// Compare strategies +const comparison = await rb.compareStrategies('bug_fixing', [ + 'tdd_approach', + 'debug_first', + 'reproduce_then_fix' +]); + +// Get best strategy +const best = comparison.strategies[0]; +console.log(`Best: ${best.name} (score: ${best.score})`); +``` + +### 3. Continuous Learning +```typescript +// Enable auto-learning from all tasks +await rb.enableAutoLearning({ + threshold: 0.7, // Only learn from high-confidence outcomes + updateFrequency: 100 // Update models every 100 experiences +}); +``` + +## Advanced Usage + +### Meta-Learning +```typescript +// Learn about learning +await rb.metaLearn({ + observation: 'parallel_execution_faster_for_independent_tasks', + confidence: 0.95, + applicability: { + task_types: ['batch_processing', 'data_transformation'], + conditions: ['tasks_independent', 'io_bound'] + } +}); +``` + +### Transfer Learning +```typescript +// Apply knowledge from one domain to another +await rb.transferKnowledge({ + from: 'code_review_javascript', + to: 'code_review_typescript', + similarity: 0.8 +}); +``` + +### Adaptive Agents +```typescript +// Create self-improving agent +class AdaptiveAgent { + async execute(task: Task) { + // Get optimal strategy + const strategy = await rb.recommendStrategy(task.type, task.context); + + // Execute with strategy + const result = await this.executeWithStrategy(task, strategy); + + // Learn from outcome + await rb.recordExperience({ + task: task.type, + approach: strategy.name, + outcome: result, + context: task.context + }); + + return result; + } +} +``` + +## Integration with AgentDB + +```typescript +// Persist ReasoningBank data +await rb.configure({ + storage: { + type: 'agentdb', + options: { + database: './reasoning-bank.db', + enableVectorSearch: true + } + } +}); + +// Query learned patterns +const patterns = await rb.query({ + category: 'optimization', + minConfidence: 0.8, + timeRange: { last: '30d' } +}); +``` + +## Performance Metrics + +```typescript +// Track learning effectiveness +const metrics = await rb.getMetrics(); +console.log(` + Total Experiences: ${metrics.totalExperiences} + Patterns Learned: ${metrics.patternsLearned} + Strategy Success Rate: ${metrics.strategySuccessRate} + Improvement Over Time: ${metrics.improvement} +`); +``` + +## Best Practices + +1. **Record consistently**: Log all task outcomes, not just successes +2. **Provide context**: Rich context improves pattern matching +3. **Set thresholds**: Filter low-confidence learnings +4. **Review periodically**: Audit learned patterns for quality +5. **Use vector search**: Enable semantic pattern matching + +## Troubleshooting + +### Issue: Poor recommendations +**Solution**: Ensure sufficient training data (100+ experiences per task type) + +### Issue: Slow pattern matching +**Solution**: Enable vector indexing in AgentDB + +### Issue: Memory growing large +**Solution**: Set TTL for old experiences or enable pruning + +## Learn More + +- ReasoningBank Guide: agentic-flow/src/reasoningbank/README.md +- AgentDB Integration: packages/agentdb/docs/reasoningbank.md +- Pattern Learning: docs/reasoning/patterns.md diff --git a/.agents/skills/skill-builder/SKILL.md b/.agents/skills/skill-builder/SKILL.md new file mode 100644 index 0000000..8849c62 --- /dev/null +++ b/.agents/skills/skill-builder/SKILL.md @@ -0,0 +1,910 @@ +--- +name: "Skill Builder" +description: "Create new Codex Skills with proper YAML frontmatter, progressive disclosure structure, and complete directory organization. Use when you need to build custom skills for specific workflows, generate skill templates, or understand the Codex Skills specification." +--- + +# Skill Builder + +## What This Skill Does + +Creates production-ready Codex Skills with proper YAML frontmatter, progressive disclosure architecture, and complete file/folder structure. This skill guides you through building skills that Codex can autonomously discover and use across all surfaces (Codex.ai, Codex, SDK, API). + +## Prerequisites + +- Codex 2.0+ or Codex.ai with Skills support +- Basic understanding of Markdown and YAML +- Text editor or IDE + +## Quick Start + +### Creating Your First Skill + +```bash +# 1. Create skill directory (MUST be at top level, NOT in subdirectories!) +mkdir -p ~/.Codex/skills/my-first-skill + +# 2. Create SKILL.md with proper format +cat > ~/.Codex/skills/my-first-skill/SKILL.md << 'EOF' +--- +name: "My First Skill" +description: "Brief description of what this skill does and when Codex should use it. Maximum 1024 characters." +--- + +# My First Skill + +## What This Skill Does +[Your instructions here] + +## Quick Start +[Basic usage] +EOF + +# 3. Verify skill is detected +# Restart Codex or refresh Codex.ai +``` + +--- + +## Complete Specification + +### 📋 YAML Frontmatter (REQUIRED) + +Every SKILL.md **must** start with YAML frontmatter containing exactly two required fields: + +```yaml +--- +name: "Skill Name" # REQUIRED: Max 64 chars +description: "What this skill does # REQUIRED: Max 1024 chars +and when Codex should use it." # Include BOTH what & when +--- +``` + +#### Field Requirements + +**`name`** (REQUIRED): +- **Type**: String +- **Max Length**: 64 characters +- **Format**: Human-friendly display name +- **Usage**: Shown in skill lists, UI, and loaded into Codex's system prompt +- **Best Practice**: Use Title Case, be concise and descriptive +- **Examples**: + - ✅ "API Documentation Generator" + - ✅ "React Component Builder" + - ✅ "Database Schema Designer" + - ❌ "skill-1" (not descriptive) + - ❌ "This is a very long skill name that exceeds sixty-four characters" (too long) + +**`description`** (REQUIRED): +- **Type**: String +- **Max Length**: 1024 characters +- **Format**: Plain text or minimal markdown +- **Content**: MUST include: + 1. **What** the skill does (functionality) + 2. **When** Codex should invoke it (trigger conditions) +- **Usage**: Loaded into Codex's system prompt for autonomous matching +- **Best Practice**: Front-load key trigger words, be specific about use cases +- **Examples**: + - ✅ "Generate OpenAPI 3.0 documentation from Express.js routes. Use when creating API docs, documenting endpoints, or building API specifications." + - ✅ "Create React functional components with TypeScript, hooks, and tests. Use when scaffolding new components or converting class components." + - ❌ "A comprehensive guide to API documentation" (no "when" clause) + - ❌ "Documentation tool" (too vague) + +#### YAML Formatting Rules + +```yaml +--- +# ✅ CORRECT: Simple string +name: "API Builder" +description: "Creates REST APIs with Express and TypeScript." + +# ✅ CORRECT: Multi-line description +name: "Full-Stack Generator" +description: "Generates full-stack applications with React frontend and Node.js backend. Use when starting new projects or scaffolding applications." + +# ✅ CORRECT: Special characters quoted +name: "JSON:API Builder" +description: "Creates JSON:API compliant endpoints: pagination, filtering, relationships." + +# ❌ WRONG: Missing quotes with special chars +name: API:Builder # YAML parse error! + +# ❌ WRONG: Extra fields (ignored but discouraged) +name: "My Skill" +description: "My description" +version: "1.0.0" # NOT part of spec +author: "Me" # NOT part of spec +tags: ["dev", "api"] # NOT part of spec +--- +``` + +**Critical**: Only `name` and `description` are used by Codex. Additional fields are ignored. + +--- + +### 📂 Directory Structure + +#### Minimal Skill (Required) +``` +~/.Codex/skills/ # Personal skills location +└── my-skill/ # Skill directory (MUST be at top level!) + └── SKILL.md # REQUIRED: Main skill file +``` + +**IMPORTANT**: Skills MUST be directly under `~/.Codex/skills/[skill-name]/`. +Codex does NOT support nested subdirectories or namespaces! + +#### Full-Featured Skill (Recommended) +``` +~/.Codex/skills/ +└── my-skill/ # Top-level skill directory + ├── SKILL.md # REQUIRED: Main skill file + ├── README.md # Optional: Human-readable docs + ├── scripts/ # Optional: Executable scripts + │ ├── setup.sh + │ ├── validate.js + │ └── deploy.py + ├── resources/ # Optional: Supporting files + │ ├── templates/ + │ │ ├── api-template.js + │ │ └── component.tsx + │ ├── examples/ + │ │ └── sample-output.json + │ └── schemas/ + │ └── config-schema.json + └── docs/ # Optional: Additional documentation + ├── ADVANCED.md + ├── TROUBLESHOOTING.md + └── API_REFERENCE.md +``` + +#### Skills Locations + +**Personal Skills** (available across all projects): +``` +~/.Codex/skills/ +└── [your-skills]/ +``` +- **Path**: `~/.Codex/skills/` or `$HOME/.Codex/skills/` +- **Scope**: Available in all projects for this user +- **Version Control**: NOT committed to git (outside repo) +- **Use Case**: Personal productivity tools, custom workflows + +**Project Skills** (team-shared, version controlled): +``` +/.Codex/skills/ +└── [team-skills]/ +``` +- **Path**: `.Codex/skills/` in project root +- **Scope**: Available only in this project +- **Version Control**: SHOULD be committed to git +- **Use Case**: Team workflows, project-specific tools, shared knowledge + +--- + +### 🎯 Progressive Disclosure Architecture + +Codex uses a **3-level progressive disclosure system** to scale to 100+ skills without context penalty: + +#### Level 1: Metadata (Name + Description) +**Loaded**: At Codex startup, always +**Size**: ~200 chars per skill +**Purpose**: Enable autonomous skill matching +**Context**: Loaded into system prompt for ALL skills + +```yaml +--- +name: "API Builder" # 11 chars +description: "Creates REST APIs..." # ~50 chars +--- +# Total: ~61 chars per skill +# 100 skills = ~6KB context (minimal!) +``` + +#### Level 2: SKILL.md Body +**Loaded**: When skill is triggered/matched +**Size**: ~1-10KB typically +**Purpose**: Main instructions and procedures +**Context**: Only loaded for ACTIVE skills + +```markdown +# API Builder + +## What This Skill Does +[Main instructions - loaded only when skill is active] + +## Quick Start +[Basic procedures] + +## Step-by-Step Guide +[Detailed instructions] +``` + +#### Level 3+: Referenced Files +**Loaded**: On-demand as Codex navigates +**Size**: Variable (KB to MB) +**Purpose**: Deep reference, examples, schemas +**Context**: Loaded only when Codex accesses specific files + +```markdown +# In SKILL.md +See [Advanced Configuration](docs/ADVANCED.md) for complex scenarios. +See [API Reference](docs/API_REFERENCE.md) for complete documentation. +Use template: `resources/templates/api-template.js` + +# Codex will load these files ONLY if needed +``` + +**Benefit**: Install 100+ skills with ~6KB context. Only active skill content (1-10KB) enters context. + +--- + +### 📝 SKILL.md Content Structure + +#### Recommended 4-Level Structure + +```markdown +--- +name: "Your Skill Name" +description: "What it does and when to use it" +--- + +# Your Skill Name + +## Level 1: Overview (Always Read First) +Brief 2-3 sentence description of the skill. + +## Prerequisites +- Requirement 1 +- Requirement 2 + +## What This Skill Does +1. Primary function +2. Secondary function +3. Key benefit + +--- + +## Level 2: Quick Start (For Fast Onboarding) + +### Basic Usage +```bash +# Simplest use case +command --option value +``` + +### Common Scenarios +1. **Scenario 1**: How to... +2. **Scenario 2**: How to... + +--- + +## Level 3: Detailed Instructions (For Deep Work) + +### Step-by-Step Guide + +#### Step 1: Initial Setup +```bash +# Commands +``` +Expected output: +``` +Success message +``` + +#### Step 2: Configuration +- Configuration option 1 +- Configuration option 2 + +#### Step 3: Execution +- Run the main command +- Verify results + +### Advanced Options + +#### Option 1: Custom Configuration +```bash +# Advanced usage +``` + +#### Option 2: Integration +```bash +# Integration steps +``` + +--- + +## Level 4: Reference (Rarely Needed) + +### Troubleshooting + +#### Issue: Common Problem +**Symptoms**: What you see +**Cause**: Why it happens +**Solution**: How to fix +```bash +# Fix command +``` + +#### Issue: Another Problem +**Solution**: Steps to resolve + +### Complete API Reference +See [API_REFERENCE.md](docs/API_REFERENCE.md) + +### Examples +See [examples/](resources/examples/) + +### Related Skills +- [Related Skill 1](#) +- [Related Skill 2](#) + +### Resources +- [External Link 1](https://example.com) +- [Documentation](https://docs.example.com) +``` + +--- + +### 🎨 Content Best Practices + +#### Writing Effective Descriptions + +**Front-Load Keywords**: +```yaml +# ✅ GOOD: Keywords first +description: "Generate TypeScript interfaces from JSON schema. Use when converting schemas, creating types, or building API clients." + +# ❌ BAD: Keywords buried +description: "This skill helps developers who need to work with JSON schemas by providing a way to generate TypeScript interfaces." +``` + +**Include Trigger Conditions**: +```yaml +# ✅ GOOD: Clear "when" clause +description: "Debug React performance issues using Chrome DevTools. Use when components re-render unnecessarily, investigating slow updates, or optimizing bundle size." + +# ❌ BAD: No trigger conditions +description: "Helps with React performance debugging." +``` + +**Be Specific**: +```yaml +# ✅ GOOD: Specific technologies +description: "Create Express.js REST endpoints with Joi validation, Swagger docs, and Jest tests. Use when building new APIs or adding endpoints." + +# ❌ BAD: Too generic +description: "Build API endpoints with proper validation and testing." +``` + +#### Progressive Disclosure Writing + +**Keep Level 1 Brief** (Overview): +```markdown +## What This Skill Does +Creates production-ready React components with TypeScript, hooks, and tests in 3 steps. +``` + +**Level 2 for Common Paths** (Quick Start): +```markdown +## Quick Start +```bash +# Most common use case (80% of users) +generate-component MyComponent +``` +``` + +**Level 3 for Details** (Step-by-Step): +```markdown +## Step-by-Step Guide + +### Creating a Basic Component +1. Run generator +2. Choose template +3. Customize options +[Detailed explanations] +``` + +**Level 4 for Edge Cases** (Reference): +```markdown +## Advanced Configuration +For complex scenarios like HOCs, render props, or custom hooks, see [ADVANCED.md](docs/ADVANCED.md). +``` + +--- + +### 🛠️ Adding Scripts and Resources + +#### Scripts Directory + +**Purpose**: Executable scripts that Codex can run +**Location**: `scripts/` in skill directory +**Usage**: Referenced from SKILL.md + +Example: +```bash +# In skill directory +scripts/ +├── setup.sh # Initialization script +├── validate.js # Validation logic +├── generate.py # Code generation +└── deploy.sh # Deployment script +``` + +Reference from SKILL.md: +```markdown +## Setup +Run the setup script: +```bash +./scripts/setup.sh +``` + +## Validation +Validate your configuration: +```bash +node scripts/validate.js config.json +``` +``` + +#### Resources Directory + +**Purpose**: Templates, examples, schemas, static files +**Location**: `resources/` in skill directory +**Usage**: Referenced or copied by scripts + +Example: +```bash +resources/ +├── templates/ +│ ├── component.tsx.template +│ ├── test.spec.ts.template +│ └── story.stories.tsx.template +├── examples/ +│ ├── basic-example/ +│ ├── advanced-example/ +│ └── integration-example/ +└── schemas/ + ├── config.schema.json + └── output.schema.json +``` + +Reference from SKILL.md: +```markdown +## Templates +Use the component template: +```bash +cp resources/templates/component.tsx.template src/components/MyComponent.tsx +``` + +## Examples +See working examples in `resources/examples/`: +- `basic-example/` - Simple component +- `advanced-example/` - With hooks and context +``` + +--- + +### 🔗 File References and Navigation + +Codex can navigate to referenced files automatically. Use these patterns: + +#### Markdown Links +```markdown +See [Advanced Configuration](docs/ADVANCED.md) for complex scenarios. +See [Troubleshooting Guide](docs/TROUBLESHOOTING.md) if you encounter errors. +``` + +#### Relative File Paths +```markdown +Use the template located at `resources/templates/api-template.js` +See examples in `resources/examples/basic-usage/` +``` + +#### Inline File Content +```markdown +## Example Configuration +See `resources/examples/config.json`: +```json +{ + "option": "value" +} +``` +``` + +**Best Practice**: Keep SKILL.md lean (~2-5KB). Move lengthy content to separate files and reference them. Codex will load only what's needed. + +--- + +### ✅ Validation Checklist + +Before publishing a skill, verify: + +**YAML Frontmatter**: +- [ ] Starts with `---` +- [ ] Contains `name` field (max 64 chars) +- [ ] Contains `description` field (max 1024 chars) +- [ ] Description includes "what" and "when" +- [ ] Ends with `---` +- [ ] No YAML syntax errors + +**File Structure**: +- [ ] SKILL.md exists in skill directory +- [ ] Directory is DIRECTLY in `~/.Codex/skills/[skill-name]/` or `.Codex/skills/[skill-name]/` +- [ ] Uses clear, descriptive directory name +- [ ] **NO nested subdirectories** (Codex requires top-level structure) + +**Content Quality**: +- [ ] Level 1 (Overview) is brief and clear +- [ ] Level 2 (Quick Start) shows common use case +- [ ] Level 3 (Details) provides step-by-step guide +- [ ] Level 4 (Reference) links to advanced content +- [ ] Examples are concrete and runnable +- [ ] Troubleshooting section addresses common issues + +**Progressive Disclosure**: +- [ ] Core instructions in SKILL.md (~2-5KB) +- [ ] Advanced content in separate docs/ +- [ ] Large resources in resources/ directory +- [ ] Clear navigation between levels + +**Testing**: +- [ ] Skill appears in Codex's skill list +- [ ] Description triggers on relevant queries +- [ ] Instructions are clear and actionable +- [ ] Scripts execute successfully (if included) +- [ ] Examples work as documented + +--- + +## Skill Builder Templates + +### Template 1: Basic Skill (Minimal) + +```markdown +--- +name: "My Basic Skill" +description: "One sentence what. One sentence when to use." +--- + +# My Basic Skill + +## What This Skill Does +[2-3 sentences describing functionality] + +## Quick Start +```bash +# Single command to get started +``` + +## Step-by-Step Guide + +### Step 1: Setup +[Instructions] + +### Step 2: Usage +[Instructions] + +### Step 3: Verify +[Instructions] + +## Troubleshooting +- **Issue**: Problem description + - **Solution**: Fix description +``` + +### Template 2: Intermediate Skill (With Scripts) + +```markdown +--- +name: "My Intermediate Skill" +description: "Detailed what with key features. When to use with specific triggers: scaffolding, generating, building." +--- + +# My Intermediate Skill + +## Prerequisites +- Requirement 1 +- Requirement 2 + +## What This Skill Does +1. Primary function +2. Secondary function +3. Integration capability + +## Quick Start +```bash +./scripts/setup.sh +./scripts/generate.sh my-project +``` + +## Configuration +Edit `config.json`: +```json +{ + "option1": "value1", + "option2": "value2" +} +``` + +## Step-by-Step Guide + +### Basic Usage +[Steps for 80% use case] + +### Advanced Usage +[Steps for complex scenarios] + +## Available Scripts +- `scripts/setup.sh` - Initial setup +- `scripts/generate.sh` - Code generation +- `scripts/validate.sh` - Validation + +## Resources +- Templates: `resources/templates/` +- Examples: `resources/examples/` + +## Troubleshooting +[Common issues and solutions] +``` + +### Template 3: Advanced Skill (Full-Featured) + +```markdown +--- +name: "My Advanced Skill" +description: "Comprehensive what with all features and integrations. Use when [trigger 1], [trigger 2], or [trigger 3]. Supports [technology stack]." +--- + +# My Advanced Skill + +## Overview +[Brief 2-3 sentence description] + +## Prerequisites +- Technology 1 (version X+) +- Technology 2 (version Y+) +- API keys or credentials + +## What This Skill Does +1. **Core Feature**: Description +2. **Integration**: Description +3. **Automation**: Description + +--- + +## Quick Start (60 seconds) + +### Installation +```bash +./scripts/install.sh +``` + +### First Use +```bash +./scripts/quickstart.sh +``` + +Expected output: +``` +✓ Setup complete +✓ Configuration validated +→ Ready to use +``` + +--- + +## Configuration + +### Basic Configuration +Edit `config.json`: +```json +{ + "mode": "production", + "features": ["feature1", "feature2"] +} +``` + +### Advanced Configuration +See [Configuration Guide](docs/CONFIGURATION.md) + +--- + +## Step-by-Step Guide + +### 1. Initial Setup +[Detailed steps] + +### 2. Core Workflow +[Main procedures] + +### 3. Integration +[Integration steps] + +--- + +## Advanced Features + +### Feature 1: Custom Templates +```bash +./scripts/generate.sh --template custom +``` + +### Feature 2: Batch Processing +```bash +./scripts/batch.sh --input data.json +``` + +### Feature 3: CI/CD Integration +See [CI/CD Guide](docs/CICD.md) + +--- + +## Scripts Reference + +| Script | Purpose | Usage | +|--------|---------|-------| +| `install.sh` | Install dependencies | `./scripts/install.sh` | +| `generate.sh` | Generate code | `./scripts/generate.sh [name]` | +| `validate.sh` | Validate output | `./scripts/validate.sh` | +| `deploy.sh` | Deploy to environment | `./scripts/deploy.sh [env]` | + +--- + +## Resources + +### Templates +- `resources/templates/basic.template` - Basic template +- `resources/templates/advanced.template` - Advanced template + +### Examples +- `resources/examples/basic/` - Simple example +- `resources/examples/advanced/` - Complex example +- `resources/examples/integration/` - Integration example + +### Schemas +- `resources/schemas/config.schema.json` - Configuration schema +- `resources/schemas/output.schema.json` - Output validation + +--- + +## Troubleshooting + +### Issue: Installation Failed +**Symptoms**: Error during `install.sh` +**Cause**: Missing dependencies +**Solution**: +```bash +# Install prerequisites +npm install -g required-package +./scripts/install.sh --force +``` + +### Issue: Validation Errors +**Symptoms**: Validation script fails +**Solution**: See [Troubleshooting Guide](docs/TROUBLESHOOTING.md) + +--- + +## API Reference +Complete API documentation: [API_REFERENCE.md](docs/API_REFERENCE.md) + +## Related Skills +- [Related Skill 1](../related-skill-1/) +- [Related Skill 2](../related-skill-2/) + +## Resources +- [Official Documentation](https://example.com/docs) +- [GitHub Repository](https://github.com/example/repo) +- [Community Forum](https://forum.example.com) + +--- + +**Created**: 2025-10-19 +**Category**: Advanced +**Difficulty**: Intermediate +**Estimated Time**: 15-30 minutes +``` + +--- + +## Examples from the Wild + +### Example 1: Simple Documentation Skill + +```markdown +--- +name: "README Generator" +description: "Generate comprehensive README.md files for GitHub repositories. Use when starting new projects, documenting code, or improving existing READMEs." +--- + +# README Generator + +## What This Skill Does +Creates well-structured README.md files with badges, installation, usage, and contribution sections. + +## Quick Start +```bash +# Answer a few questions +./scripts/generate-readme.sh + +# README.md created with: +# - Project title and description +# - Installation instructions +# - Usage examples +# - Contribution guidelines +``` + +## Customization +Edit sections in `resources/templates/sections/` before generating. +``` + +### Example 2: Code Generation Skill + +```markdown +--- +name: "React Component Generator" +description: "Generate React functional components with TypeScript, hooks, tests, and Storybook stories. Use when creating new components, scaffolding UI, or following component architecture patterns." +--- + +# React Component Generator + +## Prerequisites +- Node.js 18+ +- React 18+ +- TypeScript 5+ + +## Quick Start +```bash +./scripts/generate-component.sh MyComponent + +# Creates: +# - src/components/MyComponent/MyComponent.tsx +# - src/components/MyComponent/MyComponent.test.tsx +# - src/components/MyComponent/MyComponent.stories.tsx +# - src/components/MyComponent/index.ts +``` + +## Step-by-Step Guide + +### 1. Run Generator +```bash +./scripts/generate-component.sh ComponentName +``` + +### 2. Choose Template +- Basic: Simple functional component +- With State: useState hooks +- With Context: useContext integration +- With API: Data fetching component + +### 3. Customize +Edit generated files in `src/components/ComponentName/` + +## Templates +See `resources/templates/` for available component templates. +``` + +--- + +## Learn More + +### Official Resources +- [Anthropic Agent Skills Documentation](https://docs.Codex.com/en/docs/agents-and-tools/agent-skills) +- [GitHub Skills Repository](https://github.com/anthropics/skills) +- [Codex Documentation](https://docs.Codex.com/en/docs/Codex) + +### Community +- [Skills Marketplace](https://github.com/anthropics/skills) - Browse community skills +- [Anthropic Discord](https://discord.gg/anthropic) - Get help from community + +### Advanced Topics +- Multi-file skills with complex navigation +- Skills that spawn other skills +- Integration with MCP tools +- Dynamic skill generation + +--- + +**Created**: 2025-10-19 +**Version**: 1.0.0 +**Maintained By**: agentic-flow team +**License**: MIT diff --git a/.agents/skills/soft-delete-relogin-consistency/SKILL.md b/.agents/skills/soft-delete-relogin-consistency/SKILL.md new file mode 100644 index 0000000..5ed1e94 --- /dev/null +++ b/.agents/skills/soft-delete-relogin-consistency/SKILL.md @@ -0,0 +1,144 @@ +--- +name: soft-delete-relogin-consistency +description: | + Fix for missing auth/identity records after account deletion + device re-login. + Use when: (1) User deletes account but device records are intentionally kept + (e.g., to prevent trial abuse), (2) Re-login via device succeeds but user + appears to have wrong identity type, (3) Frontend shows incorrect UI because + auth_methods or similar identity records are empty/wrong after re-login, + (4) Soft-deleted records cause stale cache entries that misrepresent user state. + Covers GORM soft-delete, device-based auth, cache invalidation after re-creation. +author: Codex +version: 1.0.0 +date: 2026-03-11 +--- + +# Soft-Delete + Re-Login Auth Consistency + +## Problem + +When a system uses soft-delete for auth/identity records during account deletion but +intentionally keeps primary records (like device records) for abuse prevention, re-login +flows may succeed at the "find existing record" step but fail to re-create the +soft-deleted identity records. This causes the user to exist in an inconsistent state +where they're authenticated but missing critical identity metadata. + +## Context / Trigger Conditions + +- Account deletion (注销) soft-deletes `auth_methods` (or equivalent identity records) +- Device/hardware records are intentionally kept to prevent trial reward abuse +- Device-based re-login finds existing device record -> reuses old user_id +- But the "device found" code path skips identity record creation (only the + "device not found" registration path creates them) +- Result: User is logged in but `auth_methods` is empty or missing the expected type +- Frontend UI breaks because it relies on `auth_methods[0].auth_type` to determine + login mode and show/hide UI elements + +### Symptoms + +- Buttons or UI elements that should be hidden for device-only users appear after + account deletion + re-login +- API returns user info with empty or unexpected `auth_methods` array +- `isDeviceLogin()` or similar identity checks return wrong results +- Cache returns stale user data even after re-login + +## Solution + +### Step 1: Identify the re-login code path + +Find the "device found" branch in the login logic. This is the code path that runs +when a device record already exists (as opposed to the registration path). + +### Step 2: Add identity record existence check + +After finding the user via device record, check if the expected identity record exists: + +```go +// After finding user via existing device record +hasDeviceAuth := false +for _, am := range userInfo.AuthMethods { + if am.AuthType == "device" && am.AuthIdentifier == req.Identifier { + hasDeviceAuth = true + break + } +} +if !hasDeviceAuth { + // Re-create the soft-deleted auth record + authMethod := &user.AuthMethods{ + UserId: userInfo.Id, + AuthType: "device", + AuthIdentifier: req.Identifier, + Verified: true, + } + if createErr := db.Create(authMethod).Error; createErr != nil { + log.Error("re-create auth method failed", err) + } else { + // CRITICAL: Clear user cache so subsequent reads return updated data + _ = userModel.ClearUserCache(ctx, userInfo) + } +} +``` + +### Step 3: Ensure cache invalidation + +After re-creating the identity record, clear the user cache. This is critical because +cached user data (with `Preload("AuthMethods")`) will still show the old empty state +until the cache is invalidated. + +### Step 4: Verify GORM soft-delete behavior + +GORM's soft-delete (`deleted_at IS NULL` filter) means: +- `Preload("AuthMethods")` will NOT return soft-deleted records +- `db.Create()` will create a NEW record (not undelete the old one) +- The old soft-deleted record remains in the database (harmless) + +## Verification + +1. Delete account (注销) +2. Re-login via device +3. Call user info API - verify `auth_methods` contains the device type +4. Check frontend UI - verify device-specific UI state is correct + +## Example + +**Before fix:** +``` +1. User has auth_methods: [device_A, email_A] +2. User deletes account -> auth_methods all soft-deleted +3. Device record kept (abuse prevention) +4. User re-logins via same device +5. FindOneDeviceByIdentifier finds device -> reuses user_id +6. FindOne returns user with AuthMethods=[] (soft-deleted, filtered out) +7. Frontend: isDeviceLogin() = false (no auth_methods) -> shows wrong buttons +``` + +**After fix:** +``` +1-4. Same as above +5. FindOneDeviceByIdentifier finds device -> reuses user_id +6. FindOne returns user with AuthMethods=[] +7. NEW: Detects missing device auth_method, re-creates it, clears cache +8. Frontend: isDeviceLogin() = true -> correct UI +``` + +## Notes + +- This pattern applies broadly to any system where: + - Account deletion removes identity records but keeps usage records + - Re-login can succeed via the usage records + - UI/business logic depends on the identity records existing +- The "don't delete device records" design is intentional for preventing abuse + (e.g., users repeatedly deleting and re-creating accounts to get trial rewards) +- Cache invalidation is the most commonly missed step - without it, the fix appears + to not work because cached data is served until TTL expires +- Consider whether `Unscoped()` (GORM) should be used to also query soft-deleted + records, or whether re-creation is the better approach (usually re-creation is + cleaner as it creates a fresh record with correct timestamps) + +## Related Patterns + +- **Cache key dependency chains**: When `ClearUserCache` depends on `AuthMethods` + to generate email cache keys, capture auth_methods BEFORE deletion, then explicitly + clear derived cache keys after the transaction +- **Family ownership transfer**: When an owner exits a shared resource group, transfer + ownership to a remaining member instead of dissolving the group diff --git a/.agents/skills/stream-chain/SKILL.md b/.agents/skills/stream-chain/SKILL.md new file mode 100644 index 0000000..14333dd --- /dev/null +++ b/.agents/skills/stream-chain/SKILL.md @@ -0,0 +1,563 @@ +--- +name: stream-chain +description: Stream-JSON chaining for multi-agent pipelines, data transformation, and sequential workflows +version: 1.0.0 +category: workflow +tags: [streaming, pipeline, chaining, multi-agent, workflow] +--- + +# Stream-Chain Skill + +Execute sophisticated multi-step workflows where each agent's output flows into the next, enabling complex data transformations and sequential processing pipelines. + +## Overview + +Stream-Chain provides two powerful modes for orchestrating multi-agent workflows: + +1. **Custom Chains** (`run`): Execute custom prompt sequences with full control +2. **Predefined Pipelines** (`pipeline`): Use battle-tested workflows for common tasks + +Each step in a chain receives the complete output from the previous step, enabling sophisticated multi-agent coordination through streaming data flow. + +--- + +## Quick Start + +### Run a Custom Chain + +```bash +Codex-flow stream-chain run \ + "Analyze codebase structure" \ + "Identify improvement areas" \ + "Generate action plan" +``` + +### Execute a Pipeline + +```bash +Codex-flow stream-chain pipeline analysis +``` + +--- + +## Custom Chains (`run`) + +Execute custom stream chains with your own prompts for maximum flexibility. + +### Syntax + +```bash +Codex-flow stream-chain run [...] [options] +``` + +**Requirements:** +- Minimum 2 prompts required +- Each prompt becomes a step in the chain +- Output flows sequentially through all steps + +### Options + +| Option | Description | Default | +|--------|-------------|---------| +| `--verbose` | Show detailed execution information | `false` | +| `--timeout ` | Timeout per step | `30` | +| `--debug` | Enable debug mode with full logging | `false` | + +### How Context Flows + +Each step receives the previous output as context: + +``` +Step 1: "Write a sorting function" +Output: [function implementation] + +Step 2 receives: + "Previous step output: + [function implementation] + + Next task: Add comprehensive tests" + +Step 3 receives: + "Previous steps output: + [function + tests] + + Next task: Optimize performance" +``` + +### Examples + +#### Basic Development Chain + +```bash +Codex-flow stream-chain run \ + "Write a user authentication function" \ + "Add input validation and error handling" \ + "Create unit tests with edge cases" +``` + +#### Security Audit Workflow + +```bash +Codex-flow stream-chain run \ + "Analyze authentication system for vulnerabilities" \ + "Identify and categorize security issues by severity" \ + "Propose fixes with implementation priority" \ + "Generate security test cases" \ + --timeout 45 \ + --verbose +``` + +#### Code Refactoring Chain + +```bash +Codex-flow stream-chain run \ + "Identify code smells in src/ directory" \ + "Create refactoring plan with specific changes" \ + "Apply refactoring to top 3 priority items" \ + "Verify refactored code maintains behavior" \ + --debug +``` + +#### Data Processing Pipeline + +```bash +Codex-flow stream-chain run \ + "Extract data from API responses" \ + "Transform data into normalized format" \ + "Validate data against schema" \ + "Generate data quality report" +``` + +--- + +## Predefined Pipelines (`pipeline`) + +Execute battle-tested workflows optimized for common development tasks. + +### Syntax + +```bash +Codex-flow stream-chain pipeline [options] +``` + +### Available Pipelines + +#### 1. Analysis Pipeline + +Comprehensive codebase analysis and improvement identification. + +```bash +Codex-flow stream-chain pipeline analysis +``` + +**Workflow Steps:** +1. **Structure Analysis**: Map directory structure and identify components +2. **Issue Detection**: Find potential improvements and problems +3. **Recommendations**: Generate actionable improvement report + +**Use Cases:** +- New codebase onboarding +- Technical debt assessment +- Architecture review +- Code quality audits + +#### 2. Refactor Pipeline + +Systematic code refactoring with prioritization. + +```bash +Codex-flow stream-chain pipeline refactor +``` + +**Workflow Steps:** +1. **Candidate Identification**: Find code needing refactoring +2. **Prioritization**: Create ranked refactoring plan +3. **Implementation**: Provide refactored code for top priorities + +**Use Cases:** +- Technical debt reduction +- Code quality improvement +- Legacy code modernization +- Design pattern implementation + +#### 3. Test Pipeline + +Comprehensive test generation with coverage analysis. + +```bash +Codex-flow stream-chain pipeline test +``` + +**Workflow Steps:** +1. **Coverage Analysis**: Identify areas lacking tests +2. **Test Design**: Create test cases for critical functions +3. **Implementation**: Generate unit tests with assertions + +**Use Cases:** +- Increasing test coverage +- TDD workflow support +- Regression test creation +- Quality assurance + +#### 4. Optimize Pipeline + +Performance optimization with profiling and implementation. + +```bash +Codex-flow stream-chain pipeline optimize +``` + +**Workflow Steps:** +1. **Profiling**: Identify performance bottlenecks +2. **Strategy**: Analyze and suggest optimization approaches +3. **Implementation**: Provide optimized code + +**Use Cases:** +- Performance improvement +- Resource optimization +- Scalability enhancement +- Latency reduction + +### Pipeline Options + +| Option | Description | Default | +|--------|-------------|---------| +| `--verbose` | Show detailed execution | `false` | +| `--timeout ` | Timeout per step | `30` | +| `--debug` | Enable debug mode | `false` | + +### Pipeline Examples + +#### Quick Analysis + +```bash +Codex-flow stream-chain pipeline analysis +``` + +#### Extended Refactoring + +```bash +Codex-flow stream-chain pipeline refactor --timeout 60 --verbose +``` + +#### Debug Test Generation + +```bash +Codex-flow stream-chain pipeline test --debug +``` + +#### Comprehensive Optimization + +```bash +Codex-flow stream-chain pipeline optimize --timeout 90 --verbose +``` + +### Pipeline Output + +Each pipeline execution provides: + +- **Progress**: Step-by-step execution status +- **Results**: Success/failure per step +- **Timing**: Total and per-step execution time +- **Summary**: Consolidated results and recommendations + +--- + +## Custom Pipeline Definitions + +Define reusable pipelines in `.Codex-flow/config.json`: + +### Configuration Format + +```json +{ + "streamChain": { + "pipelines": { + "security": { + "name": "Security Audit Pipeline", + "description": "Comprehensive security analysis", + "prompts": [ + "Scan codebase for security vulnerabilities", + "Categorize issues by severity (critical/high/medium/low)", + "Generate fixes with priority and implementation steps", + "Create security test suite" + ], + "timeout": 45 + }, + "documentation": { + "name": "Documentation Generation Pipeline", + "prompts": [ + "Analyze code structure and identify undocumented areas", + "Generate API documentation with examples", + "Create usage guides and tutorials", + "Build architecture diagrams and flow charts" + ] + } + } + } +} +``` + +### Execute Custom Pipeline + +```bash +Codex-flow stream-chain pipeline security +Codex-flow stream-chain pipeline documentation +``` + +--- + +## Advanced Use Cases + +### Multi-Agent Coordination + +Chain different agent types for complex workflows: + +```bash +Codex-flow stream-chain run \ + "Research best practices for API design" \ + "Design REST API with discovered patterns" \ + "Implement API endpoints with validation" \ + "Generate OpenAPI specification" \ + "Create integration tests" \ + "Write deployment documentation" +``` + +### Data Transformation Pipeline + +Process and transform data through multiple stages: + +```bash +Codex-flow stream-chain run \ + "Extract user data from CSV files" \ + "Normalize and validate data format" \ + "Enrich data with external API calls" \ + "Generate analytics report" \ + "Create visualization code" +``` + +### Code Migration Workflow + +Systematic code migration with validation: + +```bash +Codex-flow stream-chain run \ + "Analyze legacy codebase dependencies" \ + "Create migration plan with risk assessment" \ + "Generate modernized code for high-priority modules" \ + "Create migration tests" \ + "Document migration steps and rollback procedures" +``` + +### Quality Assurance Chain + +Comprehensive code quality workflow: + +```bash +Codex-flow stream-chain pipeline analysis +Codex-flow stream-chain pipeline refactor +Codex-flow stream-chain pipeline test +Codex-flow stream-chain pipeline optimize +``` + +--- + +## Best Practices + +### 1. Clear and Specific Prompts + +**Good:** +```bash +"Analyze authentication.js for SQL injection vulnerabilities" +``` + +**Avoid:** +```bash +"Check security" +``` + +### 2. Logical Progression + +Order prompts to build on previous outputs: +```bash +1. "Identify the problem" +2. "Analyze root causes" +3. "Design solution" +4. "Implement solution" +5. "Verify implementation" +``` + +### 3. Appropriate Timeouts + +- Simple tasks: 30 seconds (default) +- Analysis tasks: 45-60 seconds +- Implementation tasks: 60-90 seconds +- Complex workflows: 90-120 seconds + +### 4. Verification Steps + +Include validation in your chains: +```bash +Codex-flow stream-chain run \ + "Implement feature X" \ + "Write tests for feature X" \ + "Verify tests pass and cover edge cases" +``` + +### 5. Iterative Refinement + +Use chains for iterative improvement: +```bash +Codex-flow stream-chain run \ + "Generate initial implementation" \ + "Review and identify issues" \ + "Refine based on issues found" \ + "Final quality check" +``` + +--- + +## Integration with Codex Flow + +### Combine with Swarm Coordination + +```bash +# Initialize swarm for coordination +Codex-flow swarm init --topology mesh + +# Execute stream chain with swarm agents +Codex-flow stream-chain run \ + "Agent 1: Research task" \ + "Agent 2: Implement solution" \ + "Agent 3: Test implementation" \ + "Agent 4: Review and refine" +``` + +### Memory Integration + +Stream chains automatically store context in memory for cross-session persistence: + +```bash +# Execute chain with memory +Codex-flow stream-chain run \ + "Analyze requirements" \ + "Design architecture" \ + --verbose + +# Results stored in .Codex-flow/memory/stream-chain/ +``` + +### Neural Pattern Training + +Successful chains train neural patterns for improved performance: + +```bash +# Enable neural training +Codex-flow stream-chain pipeline optimize --debug + +# Patterns learned and stored for future optimizations +``` + +--- + +## Troubleshooting + +### Chain Timeout + +If steps timeout, increase timeout value: + +```bash +Codex-flow stream-chain run "complex task" --timeout 120 +``` + +### Context Loss + +If context not flowing properly, use `--debug`: + +```bash +Codex-flow stream-chain run "step 1" "step 2" --debug +``` + +### Pipeline Not Found + +Verify pipeline name and custom definitions: + +```bash +# Check available pipelines +cat .Codex-flow/config.json | grep -A 10 "streamChain" +``` + +--- + +## Performance Characteristics + +- **Throughput**: 2-5 steps per minute (varies by complexity) +- **Context Size**: Up to 100K tokens per step +- **Memory Usage**: ~50MB per active chain +- **Concurrency**: Supports parallel chain execution + +--- + +## Related Skills + +- **SPARC Methodology**: Systematic development workflow +- **Swarm Coordination**: Multi-agent orchestration +- **Memory Management**: Persistent context storage +- **Neural Patterns**: Adaptive learning + +--- + +## Examples Repository + +### Complete Development Workflow + +```bash +# Full feature development chain +Codex-flow stream-chain run \ + "Analyze requirements for user profile feature" \ + "Design database schema and API endpoints" \ + "Implement backend with validation" \ + "Create frontend components" \ + "Write comprehensive tests" \ + "Generate API documentation" \ + --timeout 60 \ + --verbose +``` + +### Code Review Pipeline + +```bash +# Automated code review workflow +Codex-flow stream-chain run \ + "Analyze recent git changes" \ + "Identify code quality issues" \ + "Check for security vulnerabilities" \ + "Verify test coverage" \ + "Generate code review report with recommendations" +``` + +### Migration Assistant + +```bash +# Framework migration helper +Codex-flow stream-chain run \ + "Analyze current Vue 2 codebase" \ + "Identify Vue 3 breaking changes" \ + "Create migration checklist" \ + "Generate migration scripts" \ + "Provide updated code examples" +``` + +--- + +## Conclusion + +Stream-Chain enables sophisticated multi-step workflows by: + +- **Sequential Processing**: Each step builds on previous results +- **Context Preservation**: Full output history flows through chain +- **Flexible Orchestration**: Custom chains or predefined pipelines +- **Agent Coordination**: Natural multi-agent collaboration pattern +- **Data Transformation**: Complex processing through simple steps + +Use `run` for custom workflows and `pipeline` for battle-tested solutions. diff --git a/.agents/skills/swarm-advanced/SKILL.md b/.agents/skills/swarm-advanced/SKILL.md new file mode 100644 index 0000000..2853579 --- /dev/null +++ b/.agents/skills/swarm-advanced/SKILL.md @@ -0,0 +1,973 @@ +--- +name: swarm-advanced +description: Advanced swarm orchestration patterns for research, development, testing, and complex distributed workflows +version: 2.0.0 +category: orchestration +tags: [swarm, distributed, parallel, research, testing, development, coordination] +author: Codex Flow Team +--- + +# Advanced Swarm Orchestration + +Master advanced swarm patterns for distributed research, development, and testing workflows. This skill covers comprehensive orchestration strategies using both MCP tools and CLI commands. + +## Quick Start + +### Prerequisites +```bash +# Ensure Codex Flow is installed +npm install -g Codex-flow@alpha + +# Add MCP server (if using MCP tools) +Codex mcp add Codex-flow npx Codex-flow@alpha mcp start +``` + +### Basic Pattern +```javascript +// 1. Initialize swarm topology +mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 6 }) + +// 2. Spawn specialized agents +mcp__claude-flow__agent_spawn({ type: "researcher", name: "Agent 1" }) + +// 3. Orchestrate tasks +mcp__claude-flow__task_orchestrate({ task: "...", strategy: "parallel" }) +``` + +## Core Concepts + +### Swarm Topologies + +**Mesh Topology** - Peer-to-peer communication, best for research and analysis +- All agents communicate directly +- High flexibility and resilience +- Use for: Research, analysis, brainstorming + +**Hierarchical Topology** - Coordinator with subordinates, best for development +- Clear command structure +- Sequential workflow support +- Use for: Development, structured workflows + +**Star Topology** - Central coordinator, best for testing +- Centralized control and monitoring +- Parallel execution with coordination +- Use for: Testing, validation, quality assurance + +**Ring Topology** - Sequential processing chain +- Step-by-step processing +- Pipeline workflows +- Use for: Multi-stage processing, data pipelines + +### Agent Strategies + +**Adaptive** - Dynamic adjustment based on task complexity +**Balanced** - Equal distribution of work across agents +**Specialized** - Task-specific agent assignment +**Parallel** - Maximum concurrent execution + +## Pattern 1: Research Swarm + +### Purpose +Deep research through parallel information gathering, analysis, and synthesis. + +### Architecture +```javascript +// Initialize research swarm +mcp__claude-flow__swarm_init({ + "topology": "mesh", + "maxAgents": 6, + "strategy": "adaptive" +}) + +// Spawn research team +const researchAgents = [ + { + type: "researcher", + name: "Web Researcher", + capabilities: ["web-search", "content-extraction", "source-validation"] + }, + { + type: "researcher", + name: "Academic Researcher", + capabilities: ["paper-analysis", "citation-tracking", "literature-review"] + }, + { + type: "analyst", + name: "Data Analyst", + capabilities: ["data-processing", "statistical-analysis", "visualization"] + }, + { + type: "analyst", + name: "Pattern Analyzer", + capabilities: ["trend-detection", "correlation-analysis", "outlier-detection"] + }, + { + type: "documenter", + name: "Report Writer", + capabilities: ["synthesis", "technical-writing", "formatting"] + } +] + +// Spawn all agents +researchAgents.forEach(agent => { + mcp__claude-flow__agent_spawn({ + type: agent.type, + name: agent.name, + capabilities: agent.capabilities + }) +}) +``` + +### Research Workflow + +#### Phase 1: Information Gathering +```javascript +// Parallel information collection +mcp__claude-flow__parallel_execute({ + "tasks": [ + { + "id": "web-search", + "command": "search recent publications and articles" + }, + { + "id": "academic-search", + "command": "search academic databases and papers" + }, + { + "id": "data-collection", + "command": "gather relevant datasets and statistics" + }, + { + "id": "expert-search", + "command": "identify domain experts and thought leaders" + } + ] +}) + +// Store research findings in memory +mcp__claude-flow__memory_usage({ + "action": "store", + "key": "research-findings-" + Date.now(), + "value": JSON.stringify(findings), + "namespace": "research", + "ttl": 604800 // 7 days +}) +``` + +#### Phase 2: Analysis and Validation +```javascript +// Pattern recognition in findings +mcp__claude-flow__pattern_recognize({ + "data": researchData, + "patterns": ["trend", "correlation", "outlier", "emerging-pattern"] +}) + +// Cognitive analysis +mcp__claude-flow__cognitive_analyze({ + "behavior": "research-synthesis" +}) + +// Quality assessment +mcp__claude-flow__quality_assess({ + "target": "research-sources", + "criteria": ["credibility", "relevance", "recency", "authority"] +}) + +// Cross-reference validation +mcp__claude-flow__neural_patterns({ + "action": "analyze", + "operation": "fact-checking", + "metadata": { "sources": sourcesArray } +}) +``` + +#### Phase 3: Knowledge Management +```javascript +// Search existing knowledge base +mcp__claude-flow__memory_search({ + "pattern": "topic X", + "namespace": "research", + "limit": 20 +}) + +// Create knowledge graph connections +mcp__claude-flow__neural_patterns({ + "action": "learn", + "operation": "knowledge-graph", + "metadata": { + "topic": "X", + "connections": relatedTopics, + "depth": 3 + } +}) + +// Store connections for future use +mcp__claude-flow__memory_usage({ + "action": "store", + "key": "knowledge-graph-X", + "value": JSON.stringify(knowledgeGraph), + "namespace": "research/graphs", + "ttl": 2592000 // 30 days +}) +``` + +#### Phase 4: Report Generation +```javascript +// Orchestrate report generation +mcp__claude-flow__task_orchestrate({ + "task": "generate comprehensive research report", + "strategy": "sequential", + "priority": "high", + "dependencies": ["gather", "analyze", "validate", "synthesize"] +}) + +// Monitor research progress +mcp__claude-flow__swarm_status({ + "swarmId": "research-swarm" +}) + +// Generate final report +mcp__claude-flow__workflow_execute({ + "workflowId": "research-report-generation", + "params": { + "findings": findings, + "format": "comprehensive", + "sections": ["executive-summary", "methodology", "findings", "analysis", "conclusions", "references"] + } +}) +``` + +### CLI Fallback +```bash +# Quick research swarm +npx Codex-flow swarm "research AI trends in 2025" \ + --strategy research \ + --mode distributed \ + --max-agents 6 \ + --parallel \ + --output research-report.md +``` + +## Pattern 2: Development Swarm + +### Purpose +Full-stack development through coordinated specialist agents. + +### Architecture +```javascript +// Initialize development swarm with hierarchy +mcp__claude-flow__swarm_init({ + "topology": "hierarchical", + "maxAgents": 8, + "strategy": "balanced" +}) + +// Spawn development team +const devTeam = [ + { type: "architect", name: "System Architect", role: "coordinator" }, + { type: "coder", name: "Backend Developer", capabilities: ["node", "api", "database"] }, + { type: "coder", name: "Frontend Developer", capabilities: ["react", "ui", "ux"] }, + { type: "coder", name: "Database Engineer", capabilities: ["sql", "nosql", "optimization"] }, + { type: "tester", name: "QA Engineer", capabilities: ["unit", "integration", "e2e"] }, + { type: "reviewer", name: "Code Reviewer", capabilities: ["security", "performance", "best-practices"] }, + { type: "documenter", name: "Technical Writer", capabilities: ["api-docs", "guides", "tutorials"] }, + { type: "monitor", name: "DevOps Engineer", capabilities: ["ci-cd", "deployment", "monitoring"] } +] + +// Spawn all team members +devTeam.forEach(member => { + mcp__claude-flow__agent_spawn({ + type: member.type, + name: member.name, + capabilities: member.capabilities, + swarmId: "dev-swarm" + }) +}) +``` + +### Development Workflow + +#### Phase 1: Architecture and Design +```javascript +// System architecture design +mcp__claude-flow__task_orchestrate({ + "task": "design system architecture for REST API", + "strategy": "sequential", + "priority": "critical", + "assignTo": "System Architect" +}) + +// Store architecture decisions +mcp__claude-flow__memory_usage({ + "action": "store", + "key": "architecture-decisions", + "value": JSON.stringify(architectureDoc), + "namespace": "development/design" +}) +``` + +#### Phase 2: Parallel Implementation +```javascript +// Parallel development tasks +mcp__claude-flow__parallel_execute({ + "tasks": [ + { + "id": "backend-api", + "command": "implement REST API endpoints", + "assignTo": "Backend Developer" + }, + { + "id": "frontend-ui", + "command": "build user interface components", + "assignTo": "Frontend Developer" + }, + { + "id": "database-schema", + "command": "design and implement database schema", + "assignTo": "Database Engineer" + }, + { + "id": "api-documentation", + "command": "create API documentation", + "assignTo": "Technical Writer" + } + ] +}) + +// Monitor development progress +mcp__claude-flow__swarm_monitor({ + "swarmId": "dev-swarm", + "interval": 5000 +}) +``` + +#### Phase 3: Testing and Validation +```javascript +// Comprehensive testing +mcp__claude-flow__batch_process({ + "items": [ + { type: "unit", target: "all-modules" }, + { type: "integration", target: "api-endpoints" }, + { type: "e2e", target: "user-flows" }, + { type: "performance", target: "critical-paths" } + ], + "operation": "execute-tests" +}) + +// Quality assessment +mcp__claude-flow__quality_assess({ + "target": "codebase", + "criteria": ["coverage", "complexity", "maintainability", "security"] +}) +``` + +#### Phase 4: Review and Deployment +```javascript +// Code review workflow +mcp__claude-flow__workflow_execute({ + "workflowId": "code-review-process", + "params": { + "reviewers": ["Code Reviewer"], + "criteria": ["security", "performance", "best-practices"] + } +}) + +// CI/CD pipeline +mcp__claude-flow__pipeline_create({ + "config": { + "stages": ["build", "test", "security-scan", "deploy"], + "environment": "production" + } +}) +``` + +### CLI Fallback +```bash +# Quick development swarm +npx Codex-flow swarm "build REST API with authentication" \ + --strategy development \ + --mode hierarchical \ + --monitor \ + --output sqlite +``` + +## Pattern 3: Testing Swarm + +### Purpose +Comprehensive quality assurance through distributed testing. + +### Architecture +```javascript +// Initialize testing swarm with star topology +mcp__claude-flow__swarm_init({ + "topology": "star", + "maxAgents": 7, + "strategy": "parallel" +}) + +// Spawn testing team +const testingTeam = [ + { + type: "tester", + name: "Unit Test Coordinator", + capabilities: ["unit-testing", "mocking", "coverage", "tdd"] + }, + { + type: "tester", + name: "Integration Tester", + capabilities: ["integration", "api-testing", "contract-testing"] + }, + { + type: "tester", + name: "E2E Tester", + capabilities: ["e2e", "ui-testing", "user-flows", "selenium"] + }, + { + type: "tester", + name: "Performance Tester", + capabilities: ["load-testing", "stress-testing", "benchmarking"] + }, + { + type: "monitor", + name: "Security Tester", + capabilities: ["security-testing", "penetration-testing", "vulnerability-scanning"] + }, + { + type: "analyst", + name: "Test Analyst", + capabilities: ["coverage-analysis", "test-optimization", "reporting"] + }, + { + type: "documenter", + name: "Test Documenter", + capabilities: ["test-documentation", "test-plans", "reports"] + } +] + +// Spawn all testers +testingTeam.forEach(tester => { + mcp__claude-flow__agent_spawn({ + type: tester.type, + name: tester.name, + capabilities: tester.capabilities, + swarmId: "testing-swarm" + }) +}) +``` + +### Testing Workflow + +#### Phase 1: Test Planning +```javascript +// Analyze test coverage requirements +mcp__claude-flow__quality_assess({ + "target": "test-coverage", + "criteria": [ + "line-coverage", + "branch-coverage", + "function-coverage", + "edge-cases" + ] +}) + +// Identify test scenarios +mcp__claude-flow__pattern_recognize({ + "data": testScenarios, + "patterns": [ + "edge-case", + "boundary-condition", + "error-path", + "happy-path" + ] +}) + +// Store test plan +mcp__claude-flow__memory_usage({ + "action": "store", + "key": "test-plan-" + Date.now(), + "value": JSON.stringify(testPlan), + "namespace": "testing/plans" +}) +``` + +#### Phase 2: Parallel Test Execution +```javascript +// Execute all test suites in parallel +mcp__claude-flow__parallel_execute({ + "tasks": [ + { + "id": "unit-tests", + "command": "npm run test:unit", + "assignTo": "Unit Test Coordinator" + }, + { + "id": "integration-tests", + "command": "npm run test:integration", + "assignTo": "Integration Tester" + }, + { + "id": "e2e-tests", + "command": "npm run test:e2e", + "assignTo": "E2E Tester" + }, + { + "id": "performance-tests", + "command": "npm run test:performance", + "assignTo": "Performance Tester" + }, + { + "id": "security-tests", + "command": "npm run test:security", + "assignTo": "Security Tester" + } + ] +}) + +// Batch process test suites +mcp__claude-flow__batch_process({ + "items": testSuites, + "operation": "execute-test-suite" +}) +``` + +#### Phase 3: Performance and Security +```javascript +// Run performance benchmarks +mcp__claude-flow__benchmark_run({ + "suite": "comprehensive-performance" +}) + +// Bottleneck analysis +mcp__claude-flow__bottleneck_analyze({ + "component": "application", + "metrics": ["response-time", "throughput", "memory", "cpu"] +}) + +// Security scanning +mcp__claude-flow__security_scan({ + "target": "application", + "depth": "comprehensive" +}) + +// Vulnerability analysis +mcp__claude-flow__error_analysis({ + "logs": securityScanLogs +}) +``` + +#### Phase 4: Monitoring and Reporting +```javascript +// Real-time test monitoring +mcp__claude-flow__swarm_monitor({ + "swarmId": "testing-swarm", + "interval": 2000 +}) + +// Generate comprehensive test report +mcp__claude-flow__performance_report({ + "format": "detailed", + "timeframe": "current-run" +}) + +// Get test results +mcp__claude-flow__task_results({ + "taskId": "test-execution-001" +}) + +// Trend analysis +mcp__claude-flow__trend_analysis({ + "metric": "test-coverage", + "period": "30d" +}) +``` + +### CLI Fallback +```bash +# Quick testing swarm +npx Codex-flow swarm "test application comprehensively" \ + --strategy testing \ + --mode star \ + --parallel \ + --timeout 600 +``` + +## Pattern 4: Analysis Swarm + +### Purpose +Deep code and system analysis through specialized analyzers. + +### Architecture +```javascript +// Initialize analysis swarm +mcp__claude-flow__swarm_init({ + "topology": "mesh", + "maxAgents": 5, + "strategy": "adaptive" +}) + +// Spawn analysis specialists +const analysisTeam = [ + { + type: "analyst", + name: "Code Analyzer", + capabilities: ["static-analysis", "complexity-analysis", "dead-code-detection"] + }, + { + type: "analyst", + name: "Security Analyzer", + capabilities: ["security-scan", "vulnerability-detection", "dependency-audit"] + }, + { + type: "analyst", + name: "Performance Analyzer", + capabilities: ["profiling", "bottleneck-detection", "optimization"] + }, + { + type: "analyst", + name: "Architecture Analyzer", + capabilities: ["dependency-analysis", "coupling-detection", "modularity-assessment"] + }, + { + type: "documenter", + name: "Analysis Reporter", + capabilities: ["reporting", "visualization", "recommendations"] + } +] + +// Spawn all analysts +analysisTeam.forEach(analyst => { + mcp__claude-flow__agent_spawn({ + type: analyst.type, + name: analyst.name, + capabilities: analyst.capabilities + }) +}) +``` + +### Analysis Workflow +```javascript +// Parallel analysis execution +mcp__claude-flow__parallel_execute({ + "tasks": [ + { "id": "analyze-code", "command": "analyze codebase structure and quality" }, + { "id": "analyze-security", "command": "scan for security vulnerabilities" }, + { "id": "analyze-performance", "command": "identify performance bottlenecks" }, + { "id": "analyze-architecture", "command": "assess architectural patterns" } + ] +}) + +// Generate comprehensive analysis report +mcp__claude-flow__performance_report({ + "format": "detailed", + "timeframe": "current" +}) + +// Cost analysis +mcp__claude-flow__cost_analysis({ + "timeframe": "30d" +}) +``` + +## Advanced Techniques + +### Error Handling and Fault Tolerance + +```javascript +// Setup fault tolerance for all agents +mcp__claude-flow__daa_fault_tolerance({ + "agentId": "all", + "strategy": "auto-recovery" +}) + +// Error handling pattern +try { + await mcp__claude-flow__task_orchestrate({ + "task": "complex operation", + "strategy": "parallel", + "priority": "high" + }) +} catch (error) { + // Check swarm health + const status = await mcp__claude-flow__swarm_status({}) + + // Analyze error patterns + await mcp__claude-flow__error_analysis({ + "logs": [error.message] + }) + + // Auto-recovery attempt + if (status.healthy) { + await mcp__claude-flow__task_orchestrate({ + "task": "retry failed operation", + "strategy": "sequential" + }) + } +} +``` + +### Memory and State Management + +```javascript +// Cross-session persistence +mcp__claude-flow__memory_persist({ + "sessionId": "swarm-session-001" +}) + +// Namespace management for different swarms +mcp__claude-flow__memory_namespace({ + "namespace": "research-swarm", + "action": "create" +}) + +// Create state snapshot +mcp__claude-flow__state_snapshot({ + "name": "development-checkpoint-1" +}) + +// Restore from snapshot if needed +mcp__claude-flow__context_restore({ + "snapshotId": "development-checkpoint-1" +}) + +// Backup memory stores +mcp__claude-flow__memory_backup({ + "path": "/workspaces/Codex-flow/backups/swarm-memory.json" +}) +``` + +### Neural Pattern Learning + +```javascript +// Train neural patterns from successful workflows +mcp__claude-flow__neural_train({ + "pattern_type": "coordination", + "training_data": JSON.stringify(successfulWorkflows), + "epochs": 50 +}) + +// Adaptive learning from experience +mcp__claude-flow__learning_adapt({ + "experience": { + "workflow": "research-to-report", + "success": true, + "duration": 3600, + "quality": 0.95 + } +}) + +// Pattern recognition for optimization +mcp__claude-flow__pattern_recognize({ + "data": workflowMetrics, + "patterns": ["bottleneck", "optimization-opportunity", "efficiency-gain"] +}) +``` + +### Workflow Automation + +```javascript +// Create reusable workflow +mcp__claude-flow__workflow_create({ + "name": "full-stack-development", + "steps": [ + { "phase": "design", "agents": ["architect"] }, + { "phase": "implement", "agents": ["backend-dev", "frontend-dev"], "parallel": true }, + { "phase": "test", "agents": ["tester", "security-tester"], "parallel": true }, + { "phase": "review", "agents": ["reviewer"] }, + { "phase": "deploy", "agents": ["devops"] } + ], + "triggers": ["on-commit", "scheduled-daily"] +}) + +// Setup automation rules +mcp__claude-flow__automation_setup({ + "rules": [ + { + "trigger": "file-changed", + "pattern": "*.js", + "action": "run-tests" + }, + { + "trigger": "PR-created", + "action": "code-review-swarm" + } + ] +}) + +// Event-driven triggers +mcp__claude-flow__trigger_setup({ + "events": ["code-commit", "PR-merge", "deployment"], + "actions": ["test", "analyze", "document"] +}) +``` + +### Performance Optimization + +```javascript +// Topology optimization +mcp__claude-flow__topology_optimize({ + "swarmId": "current-swarm" +}) + +// Load balancing +mcp__claude-flow__load_balance({ + "swarmId": "development-swarm", + "tasks": taskQueue +}) + +// Agent coordination sync +mcp__claude-flow__coordination_sync({ + "swarmId": "development-swarm" +}) + +// Auto-scaling +mcp__claude-flow__swarm_scale({ + "swarmId": "development-swarm", + "targetSize": 12 +}) +``` + +### Monitoring and Metrics + +```javascript +// Real-time swarm monitoring +mcp__claude-flow__swarm_monitor({ + "swarmId": "active-swarm", + "interval": 3000 +}) + +// Collect comprehensive metrics +mcp__claude-flow__metrics_collect({ + "components": ["agents", "tasks", "memory", "performance"] +}) + +// Health monitoring +mcp__claude-flow__health_check({ + "components": ["swarm", "agents", "neural", "memory"] +}) + +// Usage statistics +mcp__claude-flow__usage_stats({ + "component": "swarm-orchestration" +}) + +// Trend analysis +mcp__claude-flow__trend_analysis({ + "metric": "agent-performance", + "period": "7d" +}) +``` + +## Best Practices + +### 1. Choosing the Right Topology + +- **Mesh**: Research, brainstorming, collaborative analysis +- **Hierarchical**: Structured development, sequential workflows +- **Star**: Testing, validation, centralized coordination +- **Ring**: Pipeline processing, staged workflows + +### 2. Agent Specialization + +- Assign specific capabilities to each agent +- Avoid overlapping responsibilities +- Use coordination agents for complex workflows +- Leverage memory for agent communication + +### 3. Parallel Execution + +- Identify independent tasks for parallelization +- Use sequential execution for dependent tasks +- Monitor resource usage during parallel execution +- Implement proper error handling + +### 4. Memory Management + +- Use namespaces to organize memory +- Set appropriate TTL values +- Create regular backups +- Implement state snapshots for checkpoints + +### 5. Monitoring and Optimization + +- Monitor swarm health regularly +- Collect and analyze metrics +- Optimize topology based on performance +- Use neural patterns to learn from success + +### 6. Error Recovery + +- Implement fault tolerance strategies +- Use auto-recovery mechanisms +- Analyze error patterns +- Create fallback workflows + +## Real-World Examples + +### Example 1: AI Research Project +```javascript +// Research AI trends, analyze findings, generate report +mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 6 }) +// Spawn: 2 researchers, 2 analysts, 1 synthesizer, 1 documenter +// Parallel gather → Analyze patterns → Synthesize → Report +``` + +### Example 2: Full-Stack Application +```javascript +// Build complete web application with testing +mcp__claude-flow__swarm_init({ topology: "hierarchical", maxAgents: 8 }) +// Spawn: 1 architect, 2 devs, 1 db engineer, 2 testers, 1 reviewer, 1 devops +// Design → Parallel implement → Test → Review → Deploy +``` + +### Example 3: Security Audit +```javascript +// Comprehensive security analysis +mcp__claude-flow__swarm_init({ topology: "star", maxAgents: 5 }) +// Spawn: 1 coordinator, 1 code analyzer, 1 security scanner, 1 penetration tester, 1 reporter +// Parallel scan → Vulnerability analysis → Penetration test → Report +``` + +### Example 4: Performance Optimization +```javascript +// Identify and fix performance bottlenecks +mcp__claude-flow__swarm_init({ topology: "mesh", maxAgents: 4 }) +// Spawn: 1 profiler, 1 bottleneck analyzer, 1 optimizer, 1 tester +// Profile → Identify bottlenecks → Optimize → Validate +``` + +## Troubleshooting + +### Common Issues + +**Issue**: Swarm agents not coordinating properly +**Solution**: Check topology selection, verify memory usage, enable monitoring + +**Issue**: Parallel execution failing +**Solution**: Verify task dependencies, check resource limits, implement error handling + +**Issue**: Memory persistence not working +**Solution**: Verify namespaces, check TTL settings, ensure backup configuration + +**Issue**: Performance degradation +**Solution**: Optimize topology, reduce agent count, analyze bottlenecks + +## Related Skills + +- `sparc-methodology` - Systematic development workflow +- `github-integration` - Repository management and automation +- `neural-patterns` - AI-powered coordination optimization +- `memory-management` - Cross-session state persistence + +## References + +- [Codex Flow Documentation](https://github.com/ruvnet/Codex-flow) +- [Swarm Orchestration Guide](https://github.com/ruvnet/Codex-flow/wiki/swarm) +- [MCP Tools Reference](https://github.com/ruvnet/Codex-flow/wiki/mcp) +- [Performance Optimization](https://github.com/ruvnet/Codex-flow/wiki/performance) + +--- + +**Version**: 2.0.0 +**Last Updated**: 2025-10-19 +**Skill Level**: Advanced +**Estimated Learning Time**: 2-3 hours diff --git a/.agents/skills/v3-cli-modernization/SKILL.md b/.agents/skills/v3-cli-modernization/SKILL.md new file mode 100644 index 0000000..1ba5f35 --- /dev/null +++ b/.agents/skills/v3-cli-modernization/SKILL.md @@ -0,0 +1,872 @@ +--- +name: "V3 CLI Modernization" +description: "CLI modernization and hooks system enhancement for Codex-flow v3. Implements interactive prompts, command decomposition, enhanced hooks integration, and intelligent workflow automation." +--- + +# V3 CLI Modernization + +## What This Skill Does + +Modernizes Codex-flow v3 CLI with interactive prompts, intelligent command decomposition, enhanced hooks integration, performance optimization, and comprehensive workflow automation capabilities. + +## Quick Start + +```bash +# Initialize CLI modernization analysis +Task("CLI architecture", "Analyze current CLI structure and identify optimization opportunities", "cli-hooks-developer") + +# Modernization implementation (parallel) +Task("Command decomposition", "Break down large CLI files into focused modules", "cli-hooks-developer") +Task("Interactive prompts", "Implement intelligent interactive CLI experience", "cli-hooks-developer") +Task("Hooks enhancement", "Deep integrate hooks with CLI lifecycle", "cli-hooks-developer") +``` + +## CLI Architecture Modernization + +### Current State Analysis +``` +Current CLI Issues: +├── index.ts: 108KB monolithic file +├── enterprise.ts: 68KB feature module +├── Limited interactivity: Basic command parsing +├── Hooks integration: Basic pre/post execution +└── No intelligent workflows: Manual command chaining + +Target Architecture: +├── Modular Commands: <500 lines per command +├── Interactive Prompts: Smart context-aware UX +├── Enhanced Hooks: Deep lifecycle integration +├── Workflow Automation: Intelligent command orchestration +└── Performance: <200ms command response time +``` + +### Modular Command Architecture +```typescript +// src/cli/core/command-registry.ts +interface CommandModule { + name: string; + description: string; + category: CommandCategory; + handler: CommandHandler; + middleware: MiddlewareStack; + permissions: Permission[]; + examples: CommandExample[]; +} + +export class ModularCommandRegistry { + private commands = new Map(); + private categories = new Map(); + private aliases = new Map(); + + registerCommand(command: CommandModule): void { + this.commands.set(command.name, command); + + // Register in category index + if (!this.categories.has(command.category)) { + this.categories.set(command.category, []); + } + this.categories.get(command.category)!.push(command); + } + + async executeCommand(name: string, args: string[]): Promise { + const command = this.resolveCommand(name); + if (!command) { + throw new CommandNotFoundError(name, this.getSuggestions(name)); + } + + // Execute middleware stack + const context = await this.buildExecutionContext(command, args); + const result = await command.middleware.execute(context); + + return result; + } + + private resolveCommand(name: string): CommandModule | undefined { + // Try exact match first + if (this.commands.has(name)) { + return this.commands.get(name); + } + + // Try alias + const aliasTarget = this.aliases.get(name); + if (aliasTarget) { + return this.commands.get(aliasTarget); + } + + // Try fuzzy match + return this.findFuzzyMatch(name); + } +} +``` + +## Command Decomposition Strategy + +### Swarm Commands Module +```typescript +// src/cli/commands/swarm/swarm.command.ts +@Command({ + name: 'swarm', + description: 'Swarm coordination and management', + category: 'orchestration' +}) +export class SwarmCommand { + constructor( + private swarmCoordinator: UnifiedSwarmCoordinator, + private promptService: InteractivePromptService + ) {} + + @SubCommand('init') + @Option('--topology', 'Swarm topology (mesh|hierarchical|adaptive)', 'hierarchical') + @Option('--agents', 'Number of agents to spawn', 5) + @Option('--interactive', 'Interactive agent configuration', false) + async init( + @Arg('projectName') projectName: string, + options: SwarmInitOptions + ): Promise { + + if (options.interactive) { + return this.interactiveSwarmInit(projectName); + } + + return this.quickSwarmInit(projectName, options); + } + + private async interactiveSwarmInit(projectName: string): Promise { + console.log(`🚀 Initializing Swarm for ${projectName}`); + + // Interactive topology selection + const topology = await this.promptService.select({ + message: 'Select swarm topology:', + choices: [ + { name: 'Hierarchical (Queen-led coordination)', value: 'hierarchical' }, + { name: 'Mesh (Peer-to-peer collaboration)', value: 'mesh' }, + { name: 'Adaptive (Dynamic topology switching)', value: 'adaptive' } + ] + }); + + // Agent configuration + const agents = await this.promptAgentConfiguration(); + + // Initialize with configuration + const swarm = await this.swarmCoordinator.initialize({ + name: projectName, + topology, + agents, + hooks: { + onAgentSpawn: this.handleAgentSpawn.bind(this), + onTaskComplete: this.handleTaskComplete.bind(this), + onSwarmComplete: this.handleSwarmComplete.bind(this) + } + }); + + return CommandResult.success({ + message: `✅ Swarm ${projectName} initialized with ${agents.length} agents`, + data: { swarmId: swarm.id, topology, agentCount: agents.length } + }); + } + + @SubCommand('status') + async status(): Promise { + const swarms = await this.swarmCoordinator.listActiveSwarms(); + + if (swarms.length === 0) { + return CommandResult.info('No active swarms found'); + } + + // Interactive swarm selection if multiple + const selectedSwarm = swarms.length === 1 + ? swarms[0] + : await this.promptService.select({ + message: 'Select swarm to inspect:', + choices: swarms.map(s => ({ + name: `${s.name} (${s.agents.length} agents, ${s.topology})`, + value: s + })) + }); + + return this.displaySwarmStatus(selectedSwarm); + } +} +``` + +### Learning Commands Module +```typescript +// src/cli/commands/learning/learning.command.ts +@Command({ + name: 'learning', + description: 'Learning system management and optimization', + category: 'intelligence' +}) +export class LearningCommand { + constructor( + private learningService: IntegratedLearningService, + private promptService: InteractivePromptService + ) {} + + @SubCommand('start') + @Option('--algorithm', 'RL algorithm to use', 'auto') + @Option('--tier', 'Learning tier (basic|standard|advanced)', 'standard') + async start(options: LearningStartOptions): Promise { + // Auto-detect optimal algorithm if not specified + if (options.algorithm === 'auto') { + const taskContext = await this.analyzeCurrentContext(); + options.algorithm = this.learningService.selectOptimalAlgorithm(taskContext); + + console.log(`🧠 Auto-selected ${options.algorithm} algorithm based on context`); + } + + const session = await this.learningService.startSession({ + algorithm: options.algorithm, + tier: options.tier, + userId: await this.getCurrentUser() + }); + + return CommandResult.success({ + message: `🚀 Learning session started with ${options.algorithm}`, + data: { sessionId: session.id, algorithm: options.algorithm, tier: options.tier } + }); + } + + @SubCommand('feedback') + @Arg('reward', 'Reward value (0-1)', 'number') + async feedback( + @Arg('reward') reward: number, + @Option('--context', 'Additional context for learning') + context?: string + ): Promise { + const activeSession = await this.learningService.getActiveSession(); + if (!activeSession) { + return CommandResult.error('No active learning session found. Start one with `learning start`'); + } + + await this.learningService.submitFeedback({ + sessionId: activeSession.id, + reward, + context, + timestamp: new Date() + }); + + return CommandResult.success({ + message: `📊 Feedback recorded (reward: ${reward})`, + data: { reward, sessionId: activeSession.id } + }); + } + + @SubCommand('metrics') + async metrics(): Promise { + const metrics = await this.learningService.getMetrics(); + + // Interactive metrics display + await this.displayInteractiveMetrics(metrics); + + return CommandResult.success('Metrics displayed'); + } +} +``` + +## Interactive Prompt System + +### Advanced Prompt Service +```typescript +// src/cli/services/interactive-prompt.service.ts +interface PromptOptions { + message: string; + type: 'select' | 'multiselect' | 'input' | 'confirm' | 'progress'; + choices?: PromptChoice[]; + default?: any; + validate?: (input: any) => boolean | string; + transform?: (input: any) => any; +} + +export class InteractivePromptService { + private inquirer: any; // Dynamic import for tree-shaking + + async select(options: SelectPromptOptions): Promise { + const { default: inquirer } = await import('inquirer'); + + const result = await inquirer.prompt([{ + type: 'list', + name: 'selection', + message: options.message, + choices: options.choices, + default: options.default + }]); + + return result.selection; + } + + async multiSelect(options: MultiSelectPromptOptions): Promise { + const { default: inquirer } = await import('inquirer'); + + const result = await inquirer.prompt([{ + type: 'checkbox', + name: 'selections', + message: options.message, + choices: options.choices, + validate: (input: T[]) => { + if (options.minSelections && input.length < options.minSelections) { + return `Please select at least ${options.minSelections} options`; + } + if (options.maxSelections && input.length > options.maxSelections) { + return `Please select at most ${options.maxSelections} options`; + } + return true; + } + }]); + + return result.selections; + } + + async input(options: InputPromptOptions): Promise { + const { default: inquirer } = await import('inquirer'); + + const result = await inquirer.prompt([{ + type: 'input', + name: 'input', + message: options.message, + default: options.default, + validate: options.validate, + transformer: options.transform + }]); + + return result.input; + } + + async progressTask( + task: ProgressTask, + options: ProgressOptions + ): Promise { + const { default: cliProgress } = await import('cli-progress'); + + const progressBar = new cliProgress.SingleBar({ + format: `${options.title} |{bar}| {percentage}% | {status}`, + barCompleteChar: '█', + barIncompleteChar: '░', + hideCursor: true + }); + + progressBar.start(100, 0, { status: 'Starting...' }); + + try { + const result = await task({ + updateProgress: (percent: number, status?: string) => { + progressBar.update(percent, { status: status || 'Processing...' }); + } + }); + + progressBar.update(100, { status: 'Complete!' }); + progressBar.stop(); + + return result; + } catch (error) { + progressBar.stop(); + throw error; + } + } + + async confirmWithDetails( + message: string, + details: ConfirmationDetails + ): Promise { + console.log('\n' + chalk.bold(message)); + console.log(chalk.gray('Details:')); + + for (const [key, value] of Object.entries(details)) { + console.log(chalk.gray(` ${key}: ${value}`)); + } + + return this.confirm('\nProceed?'); + } +} +``` + +## Enhanced Hooks Integration + +### Deep CLI Hooks Integration +```typescript +// src/cli/hooks/cli-hooks-manager.ts +interface CLIHookEvent { + type: 'command_start' | 'command_end' | 'command_error' | 'agent_spawn' | 'task_complete'; + command: string; + args: string[]; + context: ExecutionContext; + timestamp: Date; +} + +export class CLIHooksManager { + private hooks: Map = new Map(); + private learningIntegration: LearningHooksIntegration; + + constructor() { + this.learningIntegration = new LearningHooksIntegration(); + this.setupDefaultHooks(); + } + + private setupDefaultHooks(): void { + // Learning integration hooks + this.registerHook('command_start', async (event: CLIHookEvent) => { + await this.learningIntegration.recordCommandStart(event); + }); + + this.registerHook('command_end', async (event: CLIHookEvent) => { + await this.learningIntegration.recordCommandSuccess(event); + }); + + this.registerHook('command_error', async (event: CLIHookEvent) => { + await this.learningIntegration.recordCommandError(event); + }); + + // Intelligent suggestions + this.registerHook('command_start', async (event: CLIHookEvent) => { + const suggestions = await this.generateIntelligentSuggestions(event); + if (suggestions.length > 0) { + this.displaySuggestions(suggestions); + } + }); + + // Performance monitoring + this.registerHook('command_end', async (event: CLIHookEvent) => { + await this.recordPerformanceMetrics(event); + }); + } + + async executeHooks(type: string, event: CLIHookEvent): Promise { + const handlers = this.hooks.get(type) || []; + + await Promise.all(handlers.map(handler => + this.executeHookSafely(handler, event) + )); + } + + private async generateIntelligentSuggestions(event: CLIHookEvent): Promise { + const context = await this.learningIntegration.getExecutionContext(event); + const patterns = await this.learningIntegration.findSimilarPatterns(context); + + return patterns.map(pattern => ({ + type: 'optimization', + message: `Based on similar executions, consider: ${pattern.suggestion}`, + confidence: pattern.confidence + })); + } +} +``` + +### Learning Integration +```typescript +// src/cli/hooks/learning-hooks-integration.ts +export class LearningHooksIntegration { + constructor( + private agenticFlowHooks: AgenticFlowHooksClient, + private agentDBLearning: AgentDBLearningClient + ) {} + + async recordCommandStart(event: CLIHookEvent): Promise { + // Start trajectory tracking + await this.agenticFlowHooks.trajectoryStart({ + sessionId: event.context.sessionId, + command: event.command, + args: event.args, + context: event.context + }); + + // Record experience in AgentDB + await this.agentDBLearning.recordExperience({ + type: 'command_execution', + state: this.encodeCommandState(event), + action: event.command, + timestamp: event.timestamp + }); + } + + async recordCommandSuccess(event: CLIHookEvent): Promise { + const executionTime = Date.now() - event.timestamp.getTime(); + const reward = this.calculateReward(event, executionTime, true); + + // Complete trajectory + await this.agenticFlowHooks.trajectoryEnd({ + sessionId: event.context.sessionId, + success: true, + reward, + verdict: 'positive' + }); + + // Submit feedback to learning system + await this.agentDBLearning.submitFeedback({ + sessionId: event.context.learningSessionId, + reward, + success: true, + latencyMs: executionTime + }); + + // Store successful pattern + if (reward > 0.8) { + await this.agenticFlowHooks.storePattern({ + pattern: event.command, + solution: event.context.result, + confidence: reward + }); + } + } + + async recordCommandError(event: CLIHookEvent): Promise { + const executionTime = Date.now() - event.timestamp.getTime(); + const reward = this.calculateReward(event, executionTime, false); + + // Complete trajectory with error + await this.agenticFlowHooks.trajectoryEnd({ + sessionId: event.context.sessionId, + success: false, + reward, + verdict: 'negative', + error: event.context.error + }); + + // Learn from failure + await this.agentDBLearning.submitFeedback({ + sessionId: event.context.learningSessionId, + reward, + success: false, + latencyMs: executionTime, + error: event.context.error + }); + } + + private calculateReward(event: CLIHookEvent, executionTime: number, success: boolean): number { + if (!success) return 0; + + // Base reward for success + let reward = 0.5; + + // Performance bonus (faster execution) + const expectedTime = this.getExpectedExecutionTime(event.command); + if (executionTime < expectedTime) { + reward += 0.3 * (1 - executionTime / expectedTime); + } + + // Complexity bonus + const complexity = this.calculateCommandComplexity(event); + reward += complexity * 0.2; + + return Math.min(reward, 1.0); + } +} +``` + +## Intelligent Workflow Automation + +### Workflow Orchestrator +```typescript +// src/cli/workflows/workflow-orchestrator.ts +interface WorkflowStep { + id: string; + command: string; + args: string[]; + dependsOn: string[]; + condition?: WorkflowCondition; + retryPolicy?: RetryPolicy; +} + +export class WorkflowOrchestrator { + constructor( + private commandRegistry: ModularCommandRegistry, + private promptService: InteractivePromptService + ) {} + + async executeWorkflow(workflow: Workflow): Promise { + const context = new WorkflowExecutionContext(workflow); + + // Display workflow overview + await this.displayWorkflowOverview(workflow); + + const confirmed = await this.promptService.confirm( + 'Execute this workflow?' + ); + + if (!confirmed) { + return WorkflowResult.cancelled(); + } + + // Execute steps + return this.promptService.progressTask( + async ({ updateProgress }) => { + const steps = this.sortStepsByDependencies(workflow.steps); + + for (let i = 0; i < steps.length; i++) { + const step = steps[i]; + updateProgress((i / steps.length) * 100, `Executing ${step.command}`); + + await this.executeStep(step, context); + } + + return WorkflowResult.success(context.getResults()); + }, + { title: `Workflow: ${workflow.name}` } + ); + } + + async generateWorkflowFromIntent(intent: string): Promise { + // Use learning system to generate workflow + const patterns = await this.findWorkflowPatterns(intent); + + if (patterns.length === 0) { + throw new Error('Could not generate workflow for intent'); + } + + // Select best pattern or let user choose + const selectedPattern = patterns.length === 1 + ? patterns[0] + : await this.promptService.select({ + message: 'Select workflow template:', + choices: patterns.map(p => ({ + name: `${p.name} (${p.confidence}% match)`, + value: p + })) + }); + + return this.customizeWorkflow(selectedPattern, intent); + } + + private async executeStep(step: WorkflowStep, context: WorkflowExecutionContext): Promise { + // Check conditions + if (step.condition && !this.evaluateCondition(step.condition, context)) { + context.skipStep(step.id, 'Condition not met'); + return; + } + + // Check dependencies + const missingDeps = step.dependsOn.filter(dep => !context.isStepCompleted(dep)); + if (missingDeps.length > 0) { + throw new WorkflowError(`Step ${step.id} has unmet dependencies: ${missingDeps.join(', ')}`); + } + + // Execute with retry policy + const retryPolicy = step.retryPolicy || { maxAttempts: 1 }; + let lastError: Error | null = null; + + for (let attempt = 1; attempt <= retryPolicy.maxAttempts; attempt++) { + try { + const result = await this.commandRegistry.executeCommand(step.command, step.args); + context.completeStep(step.id, result); + return; + } catch (error) { + lastError = error as Error; + + if (attempt < retryPolicy.maxAttempts) { + await this.delay(retryPolicy.backoffMs || 1000); + } + } + } + + throw new WorkflowError(`Step ${step.id} failed after ${retryPolicy.maxAttempts} attempts: ${lastError?.message}`); + } +} +``` + +## Performance Optimization + +### Command Performance Monitoring +```typescript +// src/cli/performance/command-performance.ts +export class CommandPerformanceMonitor { + private metrics = new Map(); + + async measureCommand( + commandName: string, + executor: () => Promise + ): Promise { + const start = performance.now(); + const memBefore = process.memoryUsage(); + + try { + const result = await executor(); + const end = performance.now(); + const memAfter = process.memoryUsage(); + + this.recordMetrics(commandName, { + executionTime: end - start, + memoryDelta: memAfter.heapUsed - memBefore.heapUsed, + success: true + }); + + return result; + } catch (error) { + const end = performance.now(); + + this.recordMetrics(commandName, { + executionTime: end - start, + memoryDelta: 0, + success: false, + error: error as Error + }); + + throw error; + } + } + + private recordMetrics(command: string, measurement: PerformanceMeasurement): void { + if (!this.metrics.has(command)) { + this.metrics.set(command, new CommandMetrics(command)); + } + + const metrics = this.metrics.get(command)!; + metrics.addMeasurement(measurement); + + // Alert if performance degrades + if (metrics.getP95ExecutionTime() > 5000) { // 5 seconds + console.warn(`⚠️ Command '${command}' is performing slowly (P95: ${metrics.getP95ExecutionTime()}ms)`); + } + } + + getCommandReport(command: string): PerformanceReport { + const metrics = this.metrics.get(command); + if (!metrics) { + throw new Error(`No metrics found for command: ${command}`); + } + + return { + command, + totalExecutions: metrics.getTotalExecutions(), + successRate: metrics.getSuccessRate(), + avgExecutionTime: metrics.getAverageExecutionTime(), + p95ExecutionTime: metrics.getP95ExecutionTime(), + avgMemoryUsage: metrics.getAverageMemoryUsage(), + recommendations: this.generateRecommendations(metrics) + }; + } +} +``` + +## Smart Auto-completion + +### Intelligent Command Completion +```typescript +// src/cli/completion/intelligent-completion.ts +export class IntelligentCompletion { + constructor( + private learningService: LearningService, + private commandRegistry: ModularCommandRegistry + ) {} + + async generateCompletions( + partial: string, + context: CompletionContext + ): Promise { + const completions: Completion[] = []; + + // 1. Exact command matches + const exactMatches = this.commandRegistry.findCommandsByPrefix(partial); + completions.push(...exactMatches.map(cmd => ({ + value: cmd.name, + description: cmd.description, + type: 'command', + confidence: 1.0 + }))); + + // 2. Learning-based suggestions + const learnedSuggestions = await this.learningService.suggestCommands( + partial, + context + ); + completions.push(...learnedSuggestions); + + // 3. Context-aware suggestions + const contextualSuggestions = await this.generateContextualSuggestions( + partial, + context + ); + completions.push(...contextualSuggestions); + + // Sort by confidence and relevance + return completions + .sort((a, b) => b.confidence - a.confidence) + .slice(0, 10); // Top 10 suggestions + } + + private async generateContextualSuggestions( + partial: string, + context: CompletionContext + ): Promise { + const suggestions: Completion[] = []; + + // If in git repository, suggest git-related commands + if (context.isGitRepository) { + if (partial.startsWith('git')) { + suggestions.push({ + value: 'git commit', + description: 'Create git commit with generated message', + type: 'workflow', + confidence: 0.8 + }); + } + } + + // If package.json exists, suggest npm commands + if (context.hasPackageJson) { + if (partial.startsWith('npm') || partial.startsWith('swarm')) { + suggestions.push({ + value: 'swarm init', + description: 'Initialize swarm for this project', + type: 'workflow', + confidence: 0.9 + }); + } + } + + return suggestions; + } +} +``` + +## Success Metrics + +### CLI Performance Targets +- [ ] **Command Response**: <200ms average command execution time +- [ ] **File Decomposition**: index.ts (108KB) → <10KB per command module +- [ ] **Interactive UX**: Smart prompts with context awareness +- [ ] **Hook Integration**: Deep lifecycle integration with learning +- [ ] **Workflow Automation**: Intelligent multi-step command orchestration +- [ ] **Auto-completion**: >90% accuracy for command suggestions + +### User Experience Improvements +```typescript +const cliImprovements = { + before: { + commandResponse: '~500ms', + interactivity: 'Basic command parsing', + workflows: 'Manual command chaining', + suggestions: 'Static help text' + }, + + after: { + commandResponse: '<200ms with caching', + interactivity: 'Smart context-aware prompts', + workflows: 'Automated multi-step execution', + suggestions: 'Learning-based intelligent completion' + } +}; +``` + +## Related V3 Skills + +- `v3-core-implementation` - Core domain integration +- `v3-memory-unification` - Memory-backed command caching +- `v3-swarm-coordination` - CLI swarm management integration +- `v3-performance-optimization` - CLI performance monitoring + +## Usage Examples + +### Complete CLI Modernization +```bash +# Full CLI modernization implementation +Task("CLI modernization implementation", + "Implement modular commands, interactive prompts, and intelligent workflows", + "cli-hooks-developer") +``` + +### Interactive Command Enhancement +```bash +# Enhanced interactive commands +Codex-flow swarm init --interactive +Codex-flow learning start --guided +Codex-flow workflow create --from-intent "setup new project" +``` \ No newline at end of file diff --git a/.agents/skills/v3-core-implementation/SKILL.md b/.agents/skills/v3-core-implementation/SKILL.md new file mode 100644 index 0000000..d822836 --- /dev/null +++ b/.agents/skills/v3-core-implementation/SKILL.md @@ -0,0 +1,797 @@ +--- +name: "V3 Core Implementation" +description: "Core module implementation for Codex-flow v3. Implements DDD domains, clean architecture patterns, dependency injection, and modular TypeScript codebase with comprehensive testing." +--- + +# V3 Core Implementation + +## What This Skill Does + +Implements the core TypeScript modules for Codex-flow v3 following Domain-Driven Design principles, clean architecture patterns, and modern TypeScript best practices with comprehensive test coverage. + +## Quick Start + +```bash +# Initialize core implementation +Task("Core foundation", "Set up DDD domain structure and base classes", "core-implementer") + +# Domain implementation (parallel) +Task("Task domain", "Implement task management domain with entities and services", "core-implementer") +Task("Session domain", "Implement session management domain", "core-implementer") +Task("Health domain", "Implement health monitoring domain", "core-implementer") +``` + +## Core Implementation Architecture + +### Domain Structure +``` +src/ +├── core/ +│ ├── kernel/ # Microkernel pattern +│ │ ├── Codex-flow-kernel.ts +│ │ ├── domain-registry.ts +│ │ └── plugin-loader.ts +│ │ +│ ├── domains/ # DDD Bounded Contexts +│ │ ├── task-management/ +│ │ │ ├── entities/ +│ │ │ ├── value-objects/ +│ │ │ ├── services/ +│ │ │ ├── repositories/ +│ │ │ └── events/ +│ │ │ +│ │ ├── session-management/ +│ │ ├── health-monitoring/ +│ │ ├── lifecycle-management/ +│ │ └── event-coordination/ +│ │ +│ ├── shared/ # Shared kernel +│ │ ├── domain/ +│ │ │ ├── entity.ts +│ │ │ ├── value-object.ts +│ │ │ ├── domain-event.ts +│ │ │ └── aggregate-root.ts +│ │ │ +│ │ ├── infrastructure/ +│ │ │ ├── event-bus.ts +│ │ │ ├── dependency-container.ts +│ │ │ └── logger.ts +│ │ │ +│ │ └── types/ +│ │ ├── common.ts +│ │ ├── errors.ts +│ │ └── interfaces.ts +│ │ +│ └── application/ # Application services +│ ├── use-cases/ +│ ├── commands/ +│ ├── queries/ +│ └── handlers/ +``` + +## Base Domain Classes + +### Entity Base Class +```typescript +// src/core/shared/domain/entity.ts +export abstract class Entity { + protected readonly _id: T; + private _domainEvents: DomainEvent[] = []; + + constructor(id: T) { + this._id = id; + } + + get id(): T { + return this._id; + } + + public equals(object?: Entity): boolean { + if (object == null || object == undefined) { + return false; + } + + if (this === object) { + return true; + } + + if (!(object instanceof Entity)) { + return false; + } + + return this._id === object._id; + } + + protected addDomainEvent(domainEvent: DomainEvent): void { + this._domainEvents.push(domainEvent); + } + + public getUncommittedEvents(): DomainEvent[] { + return this._domainEvents; + } + + public markEventsAsCommitted(): void { + this._domainEvents = []; + } +} +``` + +### Value Object Base Class +```typescript +// src/core/shared/domain/value-object.ts +export abstract class ValueObject { + protected readonly props: T; + + constructor(props: T) { + this.props = Object.freeze(props); + } + + public equals(object?: ValueObject): boolean { + if (object == null || object == undefined) { + return false; + } + + if (this === object) { + return true; + } + + return JSON.stringify(this.props) === JSON.stringify(object.props); + } + + get value(): T { + return this.props; + } +} +``` + +### Aggregate Root +```typescript +// src/core/shared/domain/aggregate-root.ts +export abstract class AggregateRoot extends Entity { + private _version: number = 0; + + get version(): number { + return this._version; + } + + protected incrementVersion(): void { + this._version++; + } + + public applyEvent(event: DomainEvent): void { + this.addDomainEvent(event); + this.incrementVersion(); + } +} +``` + +## Task Management Domain Implementation + +### Task Entity +```typescript +// src/core/domains/task-management/entities/task.entity.ts +import { AggregateRoot } from '../../../shared/domain/aggregate-root'; +import { TaskId } from '../value-objects/task-id.vo'; +import { TaskStatus } from '../value-objects/task-status.vo'; +import { Priority } from '../value-objects/priority.vo'; +import { TaskAssignedEvent } from '../events/task-assigned.event'; + +interface TaskProps { + id: TaskId; + description: string; + priority: Priority; + status: TaskStatus; + assignedAgentId?: string; + createdAt: Date; + updatedAt: Date; +} + +export class Task extends AggregateRoot { + private props: TaskProps; + + private constructor(props: TaskProps) { + super(props.id); + this.props = props; + } + + static create(description: string, priority: Priority): Task { + const task = new Task({ + id: TaskId.create(), + description, + priority, + status: TaskStatus.pending(), + createdAt: new Date(), + updatedAt: new Date() + }); + + return task; + } + + static reconstitute(props: TaskProps): Task { + return new Task(props); + } + + public assignTo(agentId: string): void { + if (this.props.status.equals(TaskStatus.completed())) { + throw new Error('Cannot assign completed task'); + } + + this.props.assignedAgentId = agentId; + this.props.status = TaskStatus.assigned(); + this.props.updatedAt = new Date(); + + this.applyEvent(new TaskAssignedEvent( + this.id.value, + agentId, + this.props.priority + )); + } + + public complete(result: TaskResult): void { + if (!this.props.assignedAgentId) { + throw new Error('Cannot complete unassigned task'); + } + + this.props.status = TaskStatus.completed(); + this.props.updatedAt = new Date(); + + this.applyEvent(new TaskCompletedEvent( + this.id.value, + result, + this.calculateDuration() + )); + } + + // Getters + get description(): string { return this.props.description; } + get priority(): Priority { return this.props.priority; } + get status(): TaskStatus { return this.props.status; } + get assignedAgentId(): string | undefined { return this.props.assignedAgentId; } + get createdAt(): Date { return this.props.createdAt; } + get updatedAt(): Date { return this.props.updatedAt; } + + private calculateDuration(): number { + return this.props.updatedAt.getTime() - this.props.createdAt.getTime(); + } +} +``` + +### Task Value Objects +```typescript +// src/core/domains/task-management/value-objects/task-id.vo.ts +export class TaskId extends ValueObject { + private constructor(value: string) { + super({ value }); + } + + static create(): TaskId { + return new TaskId(crypto.randomUUID()); + } + + static fromString(id: string): TaskId { + if (!id || id.length === 0) { + throw new Error('TaskId cannot be empty'); + } + return new TaskId(id); + } + + get value(): string { + return this.props.value; + } +} + +// src/core/domains/task-management/value-objects/task-status.vo.ts +type TaskStatusType = 'pending' | 'assigned' | 'in_progress' | 'completed' | 'failed'; + +export class TaskStatus extends ValueObject { + private constructor(status: TaskStatusType) { + super({ value: status }); + } + + static pending(): TaskStatus { return new TaskStatus('pending'); } + static assigned(): TaskStatus { return new TaskStatus('assigned'); } + static inProgress(): TaskStatus { return new TaskStatus('in_progress'); } + static completed(): TaskStatus { return new TaskStatus('completed'); } + static failed(): TaskStatus { return new TaskStatus('failed'); } + + get value(): TaskStatusType { + return this.props.value; + } + + public isPending(): boolean { return this.value === 'pending'; } + public isAssigned(): boolean { return this.value === 'assigned'; } + public isInProgress(): boolean { return this.value === 'in_progress'; } + public isCompleted(): boolean { return this.value === 'completed'; } + public isFailed(): boolean { return this.value === 'failed'; } +} + +// src/core/domains/task-management/value-objects/priority.vo.ts +type PriorityLevel = 'low' | 'medium' | 'high' | 'critical'; + +export class Priority extends ValueObject { + private constructor(level: PriorityLevel) { + super({ value: level }); + } + + static low(): Priority { return new Priority('low'); } + static medium(): Priority { return new Priority('medium'); } + static high(): Priority { return new Priority('high'); } + static critical(): Priority { return new Priority('critical'); } + + get value(): PriorityLevel { + return this.props.value; + } + + public getNumericValue(): number { + const priorities = { low: 1, medium: 2, high: 3, critical: 4 }; + return priorities[this.value]; + } +} +``` + +## Domain Services + +### Task Scheduling Service +```typescript +// src/core/domains/task-management/services/task-scheduling.service.ts +import { Injectable } from '../../../shared/infrastructure/dependency-container'; +import { Task } from '../entities/task.entity'; +import { Priority } from '../value-objects/priority.vo'; + +@Injectable() +export class TaskSchedulingService { + public prioritizeTasks(tasks: Task[]): Task[] { + return tasks.sort((a, b) => + b.priority.getNumericValue() - a.priority.getNumericValue() + ); + } + + public canSchedule(task: Task, agentCapacity: number): boolean { + if (agentCapacity <= 0) return false; + + // Critical tasks always schedulable + if (task.priority.equals(Priority.critical())) return true; + + // Other logic based on capacity + return true; + } + + public calculateEstimatedDuration(task: Task): number { + // Simple heuristic - would use ML in real implementation + const baseTime = 300000; // 5 minutes + const priorityMultiplier = { + low: 0.5, + medium: 1.0, + high: 1.5, + critical: 2.0 + }; + + return baseTime * priorityMultiplier[task.priority.value]; + } +} +``` + +## Repository Interfaces & Implementations + +### Task Repository Interface +```typescript +// src/core/domains/task-management/repositories/task.repository.ts +export interface ITaskRepository { + save(task: Task): Promise; + findById(id: TaskId): Promise; + findByAgentId(agentId: string): Promise; + findByStatus(status: TaskStatus): Promise; + findPendingTasks(): Promise; + delete(id: TaskId): Promise; +} +``` + +### SQLite Implementation +```typescript +// src/core/domains/task-management/repositories/sqlite-task.repository.ts +@Injectable() +export class SqliteTaskRepository implements ITaskRepository { + constructor( + @Inject('Database') private db: Database, + @Inject('Logger') private logger: ILogger + ) {} + + async save(task: Task): Promise { + const sql = ` + INSERT OR REPLACE INTO tasks ( + id, description, priority, status, assigned_agent_id, created_at, updated_at + ) VALUES (?, ?, ?, ?, ?, ?, ?) + `; + + await this.db.run(sql, [ + task.id.value, + task.description, + task.priority.value, + task.status.value, + task.assignedAgentId, + task.createdAt.toISOString(), + task.updatedAt.toISOString() + ]); + + this.logger.debug(`Task saved: ${task.id.value}`); + } + + async findById(id: TaskId): Promise { + const sql = 'SELECT * FROM tasks WHERE id = ?'; + const row = await this.db.get(sql, [id.value]); + + return row ? this.mapRowToTask(row) : null; + } + + async findPendingTasks(): Promise { + const sql = 'SELECT * FROM tasks WHERE status = ? ORDER BY priority DESC, created_at ASC'; + const rows = await this.db.all(sql, ['pending']); + + return rows.map(row => this.mapRowToTask(row)); + } + + private mapRowToTask(row: any): Task { + return Task.reconstitute({ + id: TaskId.fromString(row.id), + description: row.description, + priority: Priority.fromString(row.priority), + status: TaskStatus.fromString(row.status), + assignedAgentId: row.assigned_agent_id, + createdAt: new Date(row.created_at), + updatedAt: new Date(row.updated_at) + }); + } +} +``` + +## Application Layer + +### Use Case Implementation +```typescript +// src/core/application/use-cases/assign-task.use-case.ts +@Injectable() +export class AssignTaskUseCase { + constructor( + @Inject('TaskRepository') private taskRepository: ITaskRepository, + @Inject('AgentRepository') private agentRepository: IAgentRepository, + @Inject('DomainEventBus') private eventBus: DomainEventBus, + @Inject('Logger') private logger: ILogger + ) {} + + async execute(command: AssignTaskCommand): Promise { + try { + // 1. Validate command + await this.validateCommand(command); + + // 2. Load aggregates + const task = await this.taskRepository.findById(command.taskId); + if (!task) { + throw new TaskNotFoundError(command.taskId); + } + + const agent = await this.agentRepository.findById(command.agentId); + if (!agent) { + throw new AgentNotFoundError(command.agentId); + } + + // 3. Business logic + if (!agent.canAcceptTask(task)) { + throw new AgentCannotAcceptTaskError(command.agentId, command.taskId); + } + + task.assignTo(command.agentId); + agent.acceptTask(task.id); + + // 4. Persist changes + await Promise.all([ + this.taskRepository.save(task), + this.agentRepository.save(agent) + ]); + + // 5. Publish domain events + const events = [ + ...task.getUncommittedEvents(), + ...agent.getUncommittedEvents() + ]; + + for (const event of events) { + await this.eventBus.publish(event); + } + + task.markEventsAsCommitted(); + agent.markEventsAsCommitted(); + + // 6. Return result + this.logger.info(`Task ${command.taskId.value} assigned to agent ${command.agentId}`); + + return AssignTaskResult.success({ + taskId: task.id, + agentId: command.agentId, + assignedAt: new Date() + }); + + } catch (error) { + this.logger.error(`Failed to assign task ${command.taskId.value}:`, error); + return AssignTaskResult.failure(error); + } + } + + private async validateCommand(command: AssignTaskCommand): Promise { + if (!command.taskId) { + throw new ValidationError('Task ID is required'); + } + if (!command.agentId) { + throw new ValidationError('Agent ID is required'); + } + } +} +``` + +## Dependency Injection Setup + +### Container Configuration +```typescript +// src/core/shared/infrastructure/dependency-container.ts +import { Container } from 'inversify'; +import { TYPES } from './types'; + +export class DependencyContainer { + private container: Container; + + constructor() { + this.container = new Container(); + this.setupBindings(); + } + + private setupBindings(): void { + // Repositories + this.container.bind(TYPES.TaskRepository) + .to(SqliteTaskRepository) + .inSingletonScope(); + + this.container.bind(TYPES.AgentRepository) + .to(SqliteAgentRepository) + .inSingletonScope(); + + // Services + this.container.bind(TYPES.TaskSchedulingService) + .to(TaskSchedulingService) + .inSingletonScope(); + + // Use Cases + this.container.bind(TYPES.AssignTaskUseCase) + .to(AssignTaskUseCase) + .inSingletonScope(); + + // Infrastructure + this.container.bind(TYPES.Logger) + .to(ConsoleLogger) + .inSingletonScope(); + + this.container.bind(TYPES.DomainEventBus) + .to(InMemoryDomainEventBus) + .inSingletonScope(); + } + + get(serviceIdentifier: symbol): T { + return this.container.get(serviceIdentifier); + } + + bind(serviceIdentifier: symbol): BindingToSyntax { + return this.container.bind(serviceIdentifier); + } +} +``` + +## Modern TypeScript Configuration + +### Strict TypeScript Setup +```json +// tsconfig.json +{ + "compilerOptions": { + "target": "ES2022", + "lib": ["ES2022"], + "module": "NodeNext", + "moduleResolution": "NodeNext", + "declaration": true, + "outDir": "./dist", + "strict": true, + "exactOptionalPropertyTypes": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedIndexedAccess": true, + "noImplicitOverride": true, + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "baseUrl": ".", + "paths": { + "@/*": ["src/*"], + "@core/*": ["src/core/*"], + "@shared/*": ["src/core/shared/*"], + "@domains/*": ["src/core/domains/*"] + } + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist", "**/*.test.ts", "**/*.spec.ts"] +} +``` + +## Testing Implementation + +### Domain Unit Tests +```typescript +// src/core/domains/task-management/__tests__/entities/task.entity.test.ts +describe('Task Entity', () => { + let task: Task; + + beforeEach(() => { + task = Task.create('Test task', Priority.medium()); + }); + + describe('creation', () => { + it('should create task with pending status', () => { + expect(task.status.isPending()).toBe(true); + expect(task.description).toBe('Test task'); + expect(task.priority.equals(Priority.medium())).toBe(true); + }); + + it('should generate unique ID', () => { + const task1 = Task.create('Task 1', Priority.low()); + const task2 = Task.create('Task 2', Priority.low()); + + expect(task1.id.equals(task2.id)).toBe(false); + }); + }); + + describe('assignment', () => { + it('should assign to agent and change status', () => { + const agentId = 'agent-123'; + + task.assignTo(agentId); + + expect(task.assignedAgentId).toBe(agentId); + expect(task.status.isAssigned()).toBe(true); + }); + + it('should emit TaskAssignedEvent when assigned', () => { + const agentId = 'agent-123'; + + task.assignTo(agentId); + + const events = task.getUncommittedEvents(); + expect(events).toHaveLength(1); + expect(events[0]).toBeInstanceOf(TaskAssignedEvent); + }); + + it('should not allow assignment of completed task', () => { + task.assignTo('agent-123'); + task.complete(TaskResult.success('done')); + + expect(() => task.assignTo('agent-456')) + .toThrow('Cannot assign completed task'); + }); + }); +}); +``` + +### Integration Tests +```typescript +// src/core/domains/task-management/__tests__/integration/task-repository.integration.test.ts +describe('TaskRepository Integration', () => { + let repository: SqliteTaskRepository; + let db: Database; + + beforeEach(async () => { + db = new Database(':memory:'); + await setupTasksTable(db); + repository = new SqliteTaskRepository(db, new ConsoleLogger()); + }); + + afterEach(async () => { + await db.close(); + }); + + it('should save and retrieve task', async () => { + const task = Task.create('Test task', Priority.high()); + + await repository.save(task); + const retrieved = await repository.findById(task.id); + + expect(retrieved).toBeDefined(); + expect(retrieved!.id.equals(task.id)).toBe(true); + expect(retrieved!.description).toBe('Test task'); + expect(retrieved!.priority.equals(Priority.high())).toBe(true); + }); + + it('should find pending tasks ordered by priority', async () => { + const lowTask = Task.create('Low priority', Priority.low()); + const highTask = Task.create('High priority', Priority.high()); + + await repository.save(lowTask); + await repository.save(highTask); + + const pending = await repository.findPendingTasks(); + + expect(pending).toHaveLength(2); + expect(pending[0].id.equals(highTask.id)).toBe(true); // High priority first + expect(pending[1].id.equals(lowTask.id)).toBe(true); + }); +}); +``` + +## Performance Optimizations + +### Entity Caching +```typescript +// src/core/shared/infrastructure/entity-cache.ts +@Injectable() +export class EntityCache> { + private cache = new Map(); + private readonly ttl: number = 300000; // 5 minutes + + set(id: string, entity: T): void { + this.cache.set(id, { entity, timestamp: Date.now() }); + } + + get(id: string): T | null { + const cached = this.cache.get(id); + if (!cached) return null; + + // Check TTL + if (Date.now() - cached.timestamp > this.ttl) { + this.cache.delete(id); + return null; + } + + return cached.entity; + } + + invalidate(id: string): void { + this.cache.delete(id); + } + + clear(): void { + this.cache.clear(); + } +} +``` + +## Success Metrics + +- [ ] **Domain Isolation**: 100% clean dependency boundaries +- [ ] **Test Coverage**: >90% unit test coverage for domain logic +- [ ] **Type Safety**: Strict TypeScript compilation with zero any types +- [ ] **Performance**: <50ms average use case execution time +- [ ] **Memory Efficiency**: <100MB heap usage for core domains +- [ ] **Plugin Architecture**: Modular domain loading capability + +## Related V3 Skills + +- `v3-ddd-architecture` - DDD architectural design +- `v3-mcp-optimization` - MCP server integration +- `v3-memory-unification` - AgentDB repository integration +- `v3-swarm-coordination` - Swarm domain implementation + +## Usage Examples + +### Complete Core Implementation +```bash +# Full core module implementation +Task("Core implementation", + "Implement all core domains with DDD patterns and comprehensive testing", + "core-implementer") +``` + +### Domain-Specific Implementation +```bash +# Single domain implementation +Task("Task domain implementation", + "Implement task management domain with entities, services, and repositories", + "core-implementer") +``` \ No newline at end of file diff --git a/.agents/skills/v3-ddd-architecture/SKILL.md b/.agents/skills/v3-ddd-architecture/SKILL.md new file mode 100644 index 0000000..e068667 --- /dev/null +++ b/.agents/skills/v3-ddd-architecture/SKILL.md @@ -0,0 +1,442 @@ +--- +name: "V3 DDD Architecture" +description: "Domain-Driven Design architecture for Codex-flow v3. Implements modular, bounded context architecture with clean separation of concerns and microkernel pattern." +--- + +# V3 DDD Architecture + +## What This Skill Does + +Designs and implements Domain-Driven Design (DDD) architecture for Codex-flow v3, decomposing god objects into bounded contexts, implementing clean architecture patterns, and enabling modular, testable code structure. + +## Quick Start + +```bash +# Initialize DDD architecture analysis +Task("Architecture analysis", "Analyze current architecture and design DDD boundaries", "core-architect") + +# Domain modeling (parallel) +Task("Domain decomposition", "Break down orchestrator god object into domains", "core-architect") +Task("Context mapping", "Map bounded contexts and relationships", "core-architect") +Task("Interface design", "Design clean domain interfaces", "core-architect") +``` + +## DDD Implementation Strategy + +### Current Architecture Analysis +``` +├── PROBLEMATIC: core/orchestrator.ts (1,440 lines - GOD OBJECT) +│ ├── Task management responsibilities +│ ├── Session management responsibilities +│ ├── Health monitoring responsibilities +│ ├── Lifecycle management responsibilities +│ └── Event coordination responsibilities +│ +└── TARGET: Modular DDD Architecture + ├── core/domains/ + │ ├── task-management/ + │ ├── session-management/ + │ ├── health-monitoring/ + │ ├── lifecycle-management/ + │ └── event-coordination/ + └── core/shared/ + ├── interfaces/ + ├── value-objects/ + └── domain-events/ +``` + +### Domain Boundaries + +#### 1. Task Management Domain +```typescript +// core/domains/task-management/ +interface TaskManagementDomain { + // Entities + Task: TaskEntity; + TaskQueue: TaskQueueEntity; + + // Value Objects + TaskId: TaskIdVO; + TaskStatus: TaskStatusVO; + Priority: PriorityVO; + + // Services + TaskScheduler: TaskSchedulingService; + TaskValidator: TaskValidationService; + + // Repository + TaskRepository: ITaskRepository; +} +``` + +#### 2. Session Management Domain +```typescript +// core/domains/session-management/ +interface SessionManagementDomain { + // Entities + Session: SessionEntity; + SessionState: SessionStateEntity; + + // Value Objects + SessionId: SessionIdVO; + SessionStatus: SessionStatusVO; + + // Services + SessionLifecycle: SessionLifecycleService; + SessionPersistence: SessionPersistenceService; + + // Repository + SessionRepository: ISessionRepository; +} +``` + +#### 3. Health Monitoring Domain +```typescript +// core/domains/health-monitoring/ +interface HealthMonitoringDomain { + // Entities + HealthCheck: HealthCheckEntity; + Metric: MetricEntity; + + // Value Objects + HealthStatus: HealthStatusVO; + Threshold: ThresholdVO; + + // Services + HealthCollector: HealthCollectionService; + AlertManager: AlertManagementService; + + // Repository + MetricsRepository: IMetricsRepository; +} +``` + +## Microkernel Architecture Pattern + +### Core Kernel +```typescript +// core/kernel/Codex-flow-kernel.ts +export class ClaudeFlowKernel { + private domains: Map = new Map(); + private eventBus: DomainEventBus; + private dependencyContainer: Container; + + async initialize(): Promise { + // Load core domains + await this.loadDomain('task-management', new TaskManagementDomain()); + await this.loadDomain('session-management', new SessionManagementDomain()); + await this.loadDomain('health-monitoring', new HealthMonitoringDomain()); + + // Wire up domain events + this.setupDomainEventHandlers(); + } + + async loadDomain(name: string, domain: Domain): Promise { + await domain.initialize(this.dependencyContainer); + this.domains.set(name, domain); + } + + getDomain(name: string): T { + const domain = this.domains.get(name); + if (!domain) { + throw new DomainNotLoadedError(name); + } + return domain as T; + } +} +``` + +### Plugin Architecture +```typescript +// core/plugins/ +interface DomainPlugin { + name: string; + version: string; + dependencies: string[]; + + initialize(kernel: ClaudeFlowKernel): Promise; + shutdown(): Promise; +} + +// Example: Swarm Coordination Plugin +export class SwarmCoordinationPlugin implements DomainPlugin { + name = 'swarm-coordination'; + version = '3.0.0'; + dependencies = ['task-management', 'session-management']; + + async initialize(kernel: ClaudeFlowKernel): Promise { + const taskDomain = kernel.getDomain('task-management'); + const sessionDomain = kernel.getDomain('session-management'); + + // Register swarm coordination services + this.swarmCoordinator = new UnifiedSwarmCoordinator(taskDomain, sessionDomain); + kernel.registerService('swarm-coordinator', this.swarmCoordinator); + } +} +``` + +## Domain Events & Integration + +### Event-Driven Communication +```typescript +// core/shared/domain-events/ +abstract class DomainEvent { + public readonly eventId: string; + public readonly aggregateId: string; + public readonly occurredOn: Date; + public readonly eventVersion: number; + + constructor(aggregateId: string) { + this.eventId = crypto.randomUUID(); + this.aggregateId = aggregateId; + this.occurredOn = new Date(); + this.eventVersion = 1; + } +} + +// Task domain events +export class TaskAssignedEvent extends DomainEvent { + constructor( + taskId: string, + public readonly agentId: string, + public readonly priority: Priority + ) { + super(taskId); + } +} + +export class TaskCompletedEvent extends DomainEvent { + constructor( + taskId: string, + public readonly result: TaskResult, + public readonly duration: number + ) { + super(taskId); + } +} + +// Event handlers +@EventHandler(TaskCompletedEvent) +export class TaskCompletedHandler { + constructor( + private metricsRepository: IMetricsRepository, + private sessionService: SessionLifecycleService + ) {} + + async handle(event: TaskCompletedEvent): Promise { + // Update metrics + await this.metricsRepository.recordTaskCompletion( + event.aggregateId, + event.duration + ); + + // Update session state + await this.sessionService.markTaskCompleted( + event.aggregateId, + event.result + ); + } +} +``` + +## Clean Architecture Layers + +```typescript +// Architecture layers +┌─────────────────────────────────────────┐ +│ Presentation │ ← CLI, API, UI +├─────────────────────────────────────────┤ +│ Application │ ← Use Cases, Commands +├─────────────────────────────────────────┤ +│ Domain │ ← Entities, Services, Events +├─────────────────────────────────────────┤ +│ Infrastructure │ ← DB, MCP, External APIs +└─────────────────────────────────────────┘ + +// Dependency direction: Outside → Inside +// Domain layer has NO external dependencies +``` + +### Application Layer (Use Cases) +```typescript +// core/application/use-cases/ +export class AssignTaskUseCase { + constructor( + private taskRepository: ITaskRepository, + private agentRepository: IAgentRepository, + private eventBus: DomainEventBus + ) {} + + async execute(command: AssignTaskCommand): Promise { + // 1. Validate command + await this.validateCommand(command); + + // 2. Load aggregates + const task = await this.taskRepository.findById(command.taskId); + const agent = await this.agentRepository.findById(command.agentId); + + // 3. Business logic (in domain) + task.assignTo(agent); + + // 4. Persist changes + await this.taskRepository.save(task); + + // 5. Publish domain events + task.getUncommittedEvents().forEach(event => + this.eventBus.publish(event) + ); + + // 6. Return result + return TaskResult.success(task); + } +} +``` + +## Module Configuration + +### Bounded Context Modules +```typescript +// core/domains/task-management/module.ts +export const taskManagementModule = { + name: 'task-management', + + entities: [ + TaskEntity, + TaskQueueEntity + ], + + valueObjects: [ + TaskIdVO, + TaskStatusVO, + PriorityVO + ], + + services: [ + TaskSchedulingService, + TaskValidationService + ], + + repositories: [ + { provide: ITaskRepository, useClass: SqliteTaskRepository } + ], + + eventHandlers: [ + TaskAssignedHandler, + TaskCompletedHandler + ] +}; +``` + +## Migration Strategy + +### Phase 1: Extract Domain Services +```typescript +// Extract services from orchestrator.ts +const extractionPlan = { + week1: [ + 'TaskManager → task-management domain', + 'SessionManager → session-management domain' + ], + week2: [ + 'HealthMonitor → health-monitoring domain', + 'LifecycleManager → lifecycle-management domain' + ], + week3: [ + 'EventCoordinator → event-coordination domain', + 'Wire up domain events' + ] +}; +``` + +### Phase 2: Implement Clean Interfaces +```typescript +// Clean separation with dependency injection +export class TaskController { + constructor( + @Inject('AssignTaskUseCase') private assignTask: AssignTaskUseCase, + @Inject('CompleteTaskUseCase') private completeTask: CompleteTaskUseCase + ) {} + + async assign(request: AssignTaskRequest): Promise { + const command = AssignTaskCommand.fromRequest(request); + const result = await this.assignTask.execute(command); + return TaskResponse.fromResult(result); + } +} +``` + +### Phase 3: Plugin System +```typescript +// Enable plugin-based extensions +const pluginSystem = { + core: ['task-management', 'session-management', 'health-monitoring'], + optional: ['swarm-coordination', 'learning-integration', 'performance-monitoring'] +}; +``` + +## Testing Strategy + +### Domain Testing (London School TDD) +```typescript +// Pure domain logic testing +describe('Task Entity', () => { + let task: TaskEntity; + let mockAgent: jest.Mocked; + + beforeEach(() => { + task = new TaskEntity(TaskId.create(), 'Test task'); + mockAgent = createMock(); + }); + + it('should assign to agent when valid', () => { + mockAgent.canAcceptTask.mockReturnValue(true); + + task.assignTo(mockAgent); + + expect(task.assignedAgent).toBe(mockAgent); + expect(task.status.value).toBe('assigned'); + }); + + it('should emit TaskAssignedEvent when assigned', () => { + mockAgent.canAcceptTask.mockReturnValue(true); + + task.assignTo(mockAgent); + + const events = task.getUncommittedEvents(); + expect(events).toHaveLength(1); + expect(events[0]).toBeInstanceOf(TaskAssignedEvent); + }); +}); +``` + +## Success Metrics + +- [ ] **God Object Elimination**: orchestrator.ts (1,440 lines) → 5 focused domains (<300 lines each) +- [ ] **Bounded Context Isolation**: 100% domain independence +- [ ] **Plugin Architecture**: Core + optional modules loading +- [ ] **Clean Architecture**: Dependency inversion maintained +- [ ] **Event-Driven Communication**: Loose coupling between domains +- [ ] **Test Coverage**: >90% domain logic coverage + +## Related V3 Skills + +- `v3-core-implementation` - Implementation of DDD domains +- `v3-memory-unification` - AgentDB integration within bounded contexts +- `v3-swarm-coordination` - Swarm coordination as domain plugin +- `v3-performance-optimization` - Performance optimization across domains + +## Usage Examples + +### Complete Domain Extraction +```bash +# Full DDD architecture implementation +Task("DDD architecture implementation", + "Extract orchestrator into DDD domains with clean architecture", + "core-architect") +``` + +### Plugin Development +```bash +# Create domain plugin +npm run create:plugin -- --name swarm-coordination --template domain +``` \ No newline at end of file diff --git a/.agents/skills/v3-integration-deep/SKILL.md b/.agents/skills/v3-integration-deep/SKILL.md new file mode 100644 index 0000000..ff62427 --- /dev/null +++ b/.agents/skills/v3-integration-deep/SKILL.md @@ -0,0 +1,241 @@ +--- +name: "V3 Deep Integration" +description: "Deep agentic-flow@alpha integration implementing ADR-001. Eliminates 10,000+ duplicate lines by building Codex-flow as specialized extension rather than parallel implementation." +--- + +# V3 Deep Integration + +## What This Skill Does + +Transforms Codex-flow from parallel implementation to specialized extension of agentic-flow@alpha, eliminating massive code duplication while achieving performance improvements and feature parity. + +## Quick Start + +```bash +# Initialize deep integration +Task("Integration architecture", "Design agentic-flow@alpha adapter layer", "v3-integration-architect") + +# Feature integration (parallel) +Task("SONA integration", "Integrate 5 SONA learning modes", "v3-integration-architect") +Task("Flash Attention", "Implement 2.49x-7.47x speedup", "v3-integration-architect") +Task("AgentDB coordination", "Setup 150x-12,500x search", "v3-integration-architect") +``` + +## Code Deduplication Strategy + +### Current Overlap → Integration +``` +┌─────────────────────────────────────────┐ +│ Codex-flow agentic-flow │ +├─────────────────────────────────────────┤ +│ SwarmCoordinator → Swarm System │ 80% overlap (eliminate) +│ AgentManager → Agent Lifecycle │ 70% overlap (eliminate) +│ TaskScheduler → Task Execution │ 60% overlap (eliminate) +│ SessionManager → Session Mgmt │ 50% overlap (eliminate) +└─────────────────────────────────────────┘ + +TARGET: <5,000 lines (vs 15,000+ currently) +``` + +## agentic-flow@alpha Feature Integration + +### SONA Learning Modes +```typescript +class SONAIntegration { + async initializeMode(mode: SONAMode): Promise { + switch(mode) { + case 'real-time': // ~0.05ms adaptation + case 'balanced': // general purpose + case 'research': // deep exploration + case 'edge': // resource-constrained + case 'batch': // high-throughput + } + await this.agenticFlow.sona.setMode(mode); + } +} +``` + +### Flash Attention Integration +```typescript +class FlashAttentionIntegration { + async optimizeAttention(): Promise { + return this.agenticFlow.attention.flashAttention({ + speedupTarget: '2.49x-7.47x', + memoryReduction: '50-75%', + mechanisms: ['multi-head', 'linear', 'local', 'global'] + }); + } +} +``` + +### AgentDB Coordination +```typescript +class AgentDBIntegration { + async setupCrossAgentMemory(): Promise { + await this.agentdb.enableCrossAgentSharing({ + indexType: 'HNSW', + speedupTarget: '150x-12500x', + dimensions: 1536 + }); + } +} +``` + +### MCP Tools Integration +```typescript +class MCPToolsIntegration { + async integrateBuiltinTools(): Promise { + // Leverage 213 pre-built tools + const tools = await this.agenticFlow.mcp.getAvailableTools(); + await this.registerClaudeFlowSpecificTools(tools); + + // Use 19 hook types + const hookTypes = await this.agenticFlow.hooks.getTypes(); + await this.configureClaudeFlowHooks(hookTypes); + } +} +``` + +## Migration Implementation + +### Phase 1: Adapter Layer +```typescript +import { Agent as AgenticFlowAgent } from 'agentic-flow@alpha'; + +export class ClaudeFlowAgent extends AgenticFlowAgent { + async handleClaudeFlowTask(task: ClaudeTask): Promise { + return this.executeWithSONA(task); + } + + // Backward compatibility + async legacyCompatibilityLayer(oldAPI: any): Promise { + return this.adaptToNewAPI(oldAPI); + } +} +``` + +### Phase 2: System Migration +```typescript +class SystemMigration { + async migrateSwarmCoordination(): Promise { + // Replace SwarmCoordinator (800+ lines) with agentic-flow Swarm + const swarmConfig = await this.extractSwarmConfig(); + await this.agenticFlow.swarm.initialize(swarmConfig); + } + + async migrateAgentManagement(): Promise { + // Replace AgentManager (1,736+ lines) with agentic-flow lifecycle + const agents = await this.extractActiveAgents(); + for (const agent of agents) { + await this.agenticFlow.agent.create(agent); + } + } + + async migrateTaskExecution(): Promise { + // Replace TaskScheduler with agentic-flow task graph + const tasks = await this.extractTasks(); + await this.agenticFlow.task.executeGraph(this.buildTaskGraph(tasks)); + } +} +``` + +### Phase 3: Cleanup +```typescript +class CodeCleanup { + async removeDeprecatedCode(): Promise { + // Remove massive duplicate implementations + await this.removeFile('src/core/SwarmCoordinator.ts'); // 800+ lines + await this.removeFile('src/agents/AgentManager.ts'); // 1,736+ lines + await this.removeFile('src/task/TaskScheduler.ts'); // 500+ lines + + // Total reduction: 10,000+ → <5,000 lines + } +} +``` + +## RL Algorithm Integration + +```typescript +class RLIntegration { + algorithms = [ + 'PPO', 'DQN', 'A2C', 'MCTS', 'Q-Learning', + 'SARSA', 'Actor-Critic', 'Decision-Transformer' + ]; + + async optimizeAgentBehavior(): Promise { + for (const algorithm of this.algorithms) { + await this.agenticFlow.rl.train(algorithm, { + episodes: 1000, + rewardFunction: this.claudeFlowRewardFunction + }); + } + } +} +``` + +## Performance Integration + +### Flash Attention Targets +```typescript +const attentionBenchmark = { + baseline: 'current attention mechanism', + target: '2.49x-7.47x improvement', + memoryReduction: '50-75%', + implementation: 'agentic-flow@alpha Flash Attention' +}; +``` + +### AgentDB Search Performance +```typescript +const searchBenchmark = { + baseline: 'linear search in current systems', + target: '150x-12,500x via HNSW indexing', + implementation: 'agentic-flow@alpha AgentDB' +}; +``` + +## Backward Compatibility + +### Gradual Migration +```typescript +class BackwardCompatibility { + // Phase 1: Dual operation + async enableDualOperation(): Promise { + this.oldSystem.continue(); + this.newSystem.initialize(); + this.syncState(this.oldSystem, this.newSystem); + } + + // Phase 2: Feature-by-feature migration + async migrateGradually(): Promise { + const features = this.getAllFeatures(); + for (const feature of features) { + await this.migrateFeature(feature); + await this.validateFeatureParity(feature); + } + } + + // Phase 3: Complete transition + async completeTransition(): Promise { + await this.validateFullParity(); + await this.deprecateOldSystem(); + } +} +``` + +## Success Metrics + +- **Code Reduction**: <5,000 lines orchestration (vs 15,000+) +- **Performance**: 2.49x-7.47x Flash Attention speedup +- **Search**: 150x-12,500x AgentDB improvement +- **Memory**: 50-75% usage reduction +- **Feature Parity**: 100% v2 functionality maintained +- **SONA**: <0.05ms adaptation time +- **Integration**: All 213 MCP tools + 19 hook types available + +## Related V3 Skills + +- `v3-memory-unification` - Memory system integration +- `v3-performance-optimization` - Performance target validation +- `v3-swarm-coordination` - Swarm system migration +- `v3-security-overhaul` - Secure integration patterns \ No newline at end of file diff --git a/.agents/skills/v3-mcp-optimization/SKILL.md b/.agents/skills/v3-mcp-optimization/SKILL.md new file mode 100644 index 0000000..eaa47de --- /dev/null +++ b/.agents/skills/v3-mcp-optimization/SKILL.md @@ -0,0 +1,777 @@ +--- +name: "V3 MCP Optimization" +description: "MCP server optimization and transport layer enhancement for Codex-flow v3. Implements connection pooling, load balancing, tool registry optimization, and performance monitoring for sub-100ms response times." +--- + +# V3 MCP Optimization + +## What This Skill Does + +Optimizes Codex-flow v3 MCP (Model Context Protocol) server implementation with advanced transport layer optimizations, connection pooling, load balancing, and comprehensive performance monitoring to achieve sub-100ms response times. + +## Quick Start + +```bash +# Initialize MCP optimization analysis +Task("MCP architecture", "Analyze current MCP server performance and bottlenecks", "mcp-specialist") + +# Optimization implementation (parallel) +Task("Connection pooling", "Implement MCP connection pooling and reuse", "mcp-specialist") +Task("Load balancing", "Add dynamic load balancing for MCP tools", "mcp-specialist") +Task("Transport optimization", "Optimize transport layer performance", "mcp-specialist") +``` + +## MCP Performance Architecture + +### Current State Analysis +``` +Current MCP Issues: +├── Cold Start Latency: ~1.8s MCP server init +├── Connection Overhead: New connection per request +├── Tool Registry: Linear search O(n) for 213+ tools +├── Transport Layer: No connection reuse +└── Memory Usage: No cleanup of idle connections + +Target Performance: +├── Startup Time: <400ms (4.5x improvement) +├── Tool Lookup: <5ms (O(1) hash table) +├── Connection Reuse: 90%+ connection pool hits +├── Response Time: <100ms p95 +└── Memory Efficiency: 50% reduction +``` + +### MCP Server Architecture +```typescript +// src/core/mcp/mcp-server.ts +import { Server } from '@modelcontextprotocol/sdk/server/index.js'; +import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; + +interface OptimizedMCPConfig { + // Connection pooling + maxConnections: number; + idleTimeoutMs: number; + connectionReuseEnabled: boolean; + + // Tool registry + toolCacheEnabled: boolean; + toolIndexType: 'hash' | 'trie'; + + // Performance + requestTimeoutMs: number; + batchingEnabled: boolean; + compressionEnabled: boolean; + + // Monitoring + metricsEnabled: boolean; + healthCheckIntervalMs: number; +} + +export class OptimizedMCPServer { + private server: Server; + private connectionPool: ConnectionPool; + private toolRegistry: FastToolRegistry; + private loadBalancer: MCPLoadBalancer; + private metrics: MCPMetrics; + + constructor(config: OptimizedMCPConfig) { + this.server = new Server({ + name: 'Codex-flow-v3', + version: '3.0.0' + }, { + capabilities: { + tools: { listChanged: true }, + resources: { subscribe: true, listChanged: true }, + prompts: { listChanged: true } + } + }); + + this.connectionPool = new ConnectionPool(config); + this.toolRegistry = new FastToolRegistry(config.toolIndexType); + this.loadBalancer = new MCPLoadBalancer(); + this.metrics = new MCPMetrics(config.metricsEnabled); + } + + async start(): Promise { + // Pre-warm connection pool + await this.connectionPool.preWarm(); + + // Pre-build tool index + await this.toolRegistry.buildIndex(); + + // Setup request handlers with optimizations + this.setupOptimizedHandlers(); + + // Start health monitoring + this.startHealthMonitoring(); + + // Start server + const transport = new StdioServerTransport(); + await this.server.connect(transport); + + this.metrics.recordStartup(); + } +} +``` + +## Connection Pool Implementation + +### Advanced Connection Pooling +```typescript +// src/core/mcp/connection-pool.ts +interface PooledConnection { + id: string; + connection: MCPConnection; + lastUsed: number; + usageCount: number; + isHealthy: boolean; +} + +export class ConnectionPool { + private pool: Map = new Map(); + private readonly config: ConnectionPoolConfig; + private healthChecker: HealthChecker; + + constructor(config: ConnectionPoolConfig) { + this.config = { + maxConnections: 50, + minConnections: 5, + idleTimeoutMs: 300000, // 5 minutes + maxUsageCount: 1000, + healthCheckIntervalMs: 30000, + ...config + }; + + this.healthChecker = new HealthChecker(this.config.healthCheckIntervalMs); + } + + async getConnection(endpoint: string): Promise { + const start = performance.now(); + + // Try to get from pool first + const pooled = this.findAvailableConnection(endpoint); + if (pooled) { + pooled.lastUsed = Date.now(); + pooled.usageCount++; + + this.recordMetric('pool_hit', performance.now() - start); + return pooled.connection; + } + + // Check pool capacity + if (this.pool.size >= this.config.maxConnections) { + await this.evictLeastUsedConnection(); + } + + // Create new connection + const connection = await this.createConnection(endpoint); + const pooledConn: PooledConnection = { + id: this.generateConnectionId(), + connection, + lastUsed: Date.now(), + usageCount: 1, + isHealthy: true + }; + + this.pool.set(pooledConn.id, pooledConn); + this.recordMetric('pool_miss', performance.now() - start); + + return connection; + } + + async releaseConnection(connection: MCPConnection): Promise { + // Mark connection as available for reuse + const pooled = this.findConnectionById(connection.id); + if (pooled) { + // Check if connection should be retired + if (pooled.usageCount >= this.config.maxUsageCount) { + await this.removeConnection(pooled.id); + } + } + } + + async preWarm(): Promise { + const connections: Promise[] = []; + + for (let i = 0; i < this.config.minConnections; i++) { + connections.push(this.createConnection('default')); + } + + await Promise.all(connections); + } + + private async evictLeastUsedConnection(): Promise { + let oldestConn: PooledConnection | null = null; + let oldestTime = Date.now(); + + for (const conn of this.pool.values()) { + if (conn.lastUsed < oldestTime) { + oldestTime = conn.lastUsed; + oldestConn = conn; + } + } + + if (oldestConn) { + await this.removeConnection(oldestConn.id); + } + } + + private findAvailableConnection(endpoint: string): PooledConnection | null { + for (const conn of this.pool.values()) { + if (conn.isHealthy && + conn.connection.endpoint === endpoint && + Date.now() - conn.lastUsed < this.config.idleTimeoutMs) { + return conn; + } + } + return null; + } +} +``` + +## Fast Tool Registry + +### O(1) Tool Lookup Implementation +```typescript +// src/core/mcp/fast-tool-registry.ts +interface ToolIndexEntry { + name: string; + handler: ToolHandler; + metadata: ToolMetadata; + usageCount: number; + avgLatencyMs: number; +} + +export class FastToolRegistry { + private toolIndex: Map = new Map(); + private categoryIndex: Map = new Map(); + private fuzzyMatcher: FuzzyMatcher; + private cache: LRUCache; + + constructor(indexType: 'hash' | 'trie' = 'hash') { + this.fuzzyMatcher = new FuzzyMatcher(); + this.cache = new LRUCache(1000); // Cache 1000 most used tools + } + + async buildIndex(): Promise { + const start = performance.now(); + + // Load all available tools + const tools = await this.loadAllTools(); + + // Build hash index for O(1) lookup + for (const tool of tools) { + const entry: ToolIndexEntry = { + name: tool.name, + handler: tool.handler, + metadata: tool.metadata, + usageCount: 0, + avgLatencyMs: 0 + }; + + this.toolIndex.set(tool.name, entry); + + // Build category index + const category = tool.metadata.category || 'general'; + if (!this.categoryIndex.has(category)) { + this.categoryIndex.set(category, []); + } + this.categoryIndex.get(category)!.push(tool.name); + } + + // Build fuzzy search index + await this.fuzzyMatcher.buildIndex(tools.map(t => t.name)); + + console.log(`Tool index built in ${(performance.now() - start).toFixed(2)}ms for ${tools.length} tools`); + } + + findTool(name: string): ToolIndexEntry | null { + // Try cache first + const cached = this.cache.get(name); + if (cached) return cached; + + // Try exact match + const exact = this.toolIndex.get(name); + if (exact) { + this.cache.set(name, exact); + return exact; + } + + // Try fuzzy match + const fuzzyMatches = this.fuzzyMatcher.search(name, 1); + if (fuzzyMatches.length > 0) { + const match = this.toolIndex.get(fuzzyMatches[0]); + if (match) { + this.cache.set(name, match); + return match; + } + } + + return null; + } + + findToolsByCategory(category: string): ToolIndexEntry[] { + const toolNames = this.categoryIndex.get(category) || []; + return toolNames + .map(name => this.toolIndex.get(name)) + .filter(entry => entry !== undefined) as ToolIndexEntry[]; + } + + getMostUsedTools(limit: number = 10): ToolIndexEntry[] { + return Array.from(this.toolIndex.values()) + .sort((a, b) => b.usageCount - a.usageCount) + .slice(0, limit); + } + + recordToolUsage(toolName: string, latencyMs: number): void { + const entry = this.toolIndex.get(toolName); + if (entry) { + entry.usageCount++; + // Moving average for latency + entry.avgLatencyMs = (entry.avgLatencyMs + latencyMs) / 2; + } + } +} +``` + +## Load Balancing & Request Distribution + +### Intelligent Load Balancer +```typescript +// src/core/mcp/load-balancer.ts +interface ServerInstance { + id: string; + endpoint: string; + load: number; + responseTime: number; + isHealthy: boolean; + maxConnections: number; + currentConnections: number; +} + +export class MCPLoadBalancer { + private servers: Map = new Map(); + private routingStrategy: RoutingStrategy = 'least-connections'; + + addServer(server: ServerInstance): void { + this.servers.set(server.id, server); + } + + selectServer(toolCategory?: string): ServerInstance | null { + const healthyServers = Array.from(this.servers.values()) + .filter(server => server.isHealthy); + + if (healthyServers.length === 0) return null; + + switch (this.routingStrategy) { + case 'round-robin': + return this.roundRobinSelection(healthyServers); + + case 'least-connections': + return this.leastConnectionsSelection(healthyServers); + + case 'response-time': + return this.responseTimeSelection(healthyServers); + + case 'weighted': + return this.weightedSelection(healthyServers, toolCategory); + + default: + return healthyServers[0]; + } + } + + private leastConnectionsSelection(servers: ServerInstance[]): ServerInstance { + return servers.reduce((least, current) => + current.currentConnections < least.currentConnections ? current : least + ); + } + + private responseTimeSelection(servers: ServerInstance[]): ServerInstance { + return servers.reduce((fastest, current) => + current.responseTime < fastest.responseTime ? current : fastest + ); + } + + private weightedSelection(servers: ServerInstance[], category?: string): ServerInstance { + // Prefer servers with lower load and better response time + const scored = servers.map(server => ({ + server, + score: this.calculateServerScore(server, category) + })); + + scored.sort((a, b) => b.score - a.score); + return scored[0].server; + } + + private calculateServerScore(server: ServerInstance, category?: string): number { + const loadFactor = 1 - (server.currentConnections / server.maxConnections); + const responseFactor = 1 / (server.responseTime + 1); + const categoryBonus = this.getCategoryBonus(server, category); + + return loadFactor * 0.4 + responseFactor * 0.4 + categoryBonus * 0.2; + } + + updateServerMetrics(serverId: string, metrics: Partial): void { + const server = this.servers.get(serverId); + if (server) { + Object.assign(server, metrics); + } + } +} +``` + +## Transport Layer Optimization + +### High-Performance Transport +```typescript +// src/core/mcp/optimized-transport.ts +export class OptimizedTransport { + private compression: boolean = true; + private batching: boolean = true; + private batchBuffer: MCPMessage[] = []; + private batchTimeout: NodeJS.Timeout | null = null; + + constructor(private config: TransportConfig) {} + + async send(message: MCPMessage): Promise { + if (this.batching && this.canBatch(message)) { + this.addToBatch(message); + return; + } + + await this.sendImmediate(message); + } + + private async sendImmediate(message: MCPMessage): Promise { + const start = performance.now(); + + // Compress if enabled + const payload = this.compression + ? await this.compress(message) + : message; + + // Send through transport + await this.transport.send(payload); + + // Record metrics + this.recordLatency(performance.now() - start); + } + + private addToBatch(message: MCPMessage): void { + this.batchBuffer.push(message); + + // Start batch timeout if not already running + if (!this.batchTimeout) { + this.batchTimeout = setTimeout( + () => this.flushBatch(), + this.config.batchTimeoutMs || 10 + ); + } + + // Flush if batch is full + if (this.batchBuffer.length >= this.config.maxBatchSize) { + this.flushBatch(); + } + } + + private async flushBatch(): Promise { + if (this.batchBuffer.length === 0) return; + + const batch = this.batchBuffer.splice(0); + this.batchTimeout = null; + + // Send as single batched message + await this.sendImmediate({ + type: 'batch', + messages: batch + }); + } + + private canBatch(message: MCPMessage): boolean { + // Don't batch urgent messages or responses + return message.type !== 'response' && + message.priority !== 'high' && + message.type !== 'error'; + } + + private async compress(data: any): Promise { + // Use fast compression for smaller messages + return gzipSync(JSON.stringify(data)); + } +} +``` + +## Performance Monitoring + +### Real-time MCP Metrics +```typescript +// src/core/mcp/metrics.ts +interface MCPMetrics { + requestCount: number; + errorCount: number; + avgResponseTime: number; + p95ResponseTime: number; + connectionPoolHits: number; + connectionPoolMisses: number; + toolLookupTime: number; + startupTime: number; +} + +export class MCPMetricsCollector { + private metrics: MCPMetrics; + private responseTimeBuffer: number[] = []; + private readonly bufferSize = 1000; + + constructor() { + this.metrics = this.createInitialMetrics(); + } + + recordRequest(latencyMs: number): void { + this.metrics.requestCount++; + this.updateResponseTimes(latencyMs); + } + + recordError(): void { + this.metrics.errorCount++; + } + + recordConnectionPoolHit(): void { + this.metrics.connectionPoolHits++; + } + + recordConnectionPoolMiss(): void { + this.metrics.connectionPoolMisses++; + } + + recordToolLookup(latencyMs: number): void { + this.metrics.toolLookupTime = this.updateMovingAverage( + this.metrics.toolLookupTime, + latencyMs + ); + } + + recordStartup(latencyMs: number): void { + this.metrics.startupTime = latencyMs; + } + + getMetrics(): MCPMetrics { + return { ...this.metrics }; + } + + getHealthStatus(): HealthStatus { + const errorRate = this.metrics.errorCount / this.metrics.requestCount; + const poolHitRate = this.metrics.connectionPoolHits / + (this.metrics.connectionPoolHits + this.metrics.connectionPoolMisses); + + return { + status: this.determineHealthStatus(errorRate, poolHitRate), + errorRate, + poolHitRate, + avgResponseTime: this.metrics.avgResponseTime, + p95ResponseTime: this.metrics.p95ResponseTime + }; + } + + private updateResponseTimes(latency: number): void { + this.responseTimeBuffer.push(latency); + + if (this.responseTimeBuffer.length > this.bufferSize) { + this.responseTimeBuffer.shift(); + } + + this.metrics.avgResponseTime = this.calculateAverage(this.responseTimeBuffer); + this.metrics.p95ResponseTime = this.calculatePercentile(this.responseTimeBuffer, 95); + } + + private calculatePercentile(arr: number[], percentile: number): number { + const sorted = arr.slice().sort((a, b) => a - b); + const index = Math.ceil((percentile / 100) * sorted.length) - 1; + return sorted[index] || 0; + } + + private determineHealthStatus(errorRate: number, poolHitRate: number): 'healthy' | 'warning' | 'critical' { + if (errorRate > 0.1 || poolHitRate < 0.5) return 'critical'; + if (errorRate > 0.05 || poolHitRate < 0.7) return 'warning'; + return 'healthy'; + } +} +``` + +## Tool Registry Optimization + +### Pre-compiled Tool Index +```typescript +// src/core/mcp/tool-precompiler.ts +export class ToolPrecompiler { + async precompileTools(): Promise { + const tools = await this.loadAllTools(); + + // Create optimized lookup structures + const nameIndex = new Map(); + const categoryIndex = new Map(); + const fuzzyIndex = new Map(); + + for (const tool of tools) { + // Exact name index + nameIndex.set(tool.name, tool); + + // Category index + const category = tool.metadata.category || 'general'; + if (!categoryIndex.has(category)) { + categoryIndex.set(category, []); + } + categoryIndex.get(category)!.push(tool); + + // Pre-compute fuzzy variations + const variations = this.generateFuzzyVariations(tool.name); + for (const variation of variations) { + if (!fuzzyIndex.has(variation)) { + fuzzyIndex.set(variation, []); + } + fuzzyIndex.get(variation)!.push(tool.name); + } + } + + return { + nameIndex, + categoryIndex, + fuzzyIndex, + totalTools: tools.length, + compiledAt: new Date() + }; + } + + private generateFuzzyVariations(name: string): string[] { + const variations: string[] = []; + + // Common typos and abbreviations + variations.push(name.toLowerCase()); + variations.push(name.replace(/[-_]/g, '')); + variations.push(name.replace(/[aeiou]/gi, '')); // Consonants only + + // Add more fuzzy matching logic as needed + + return variations; + } +} +``` + +## Advanced Caching Strategy + +### Multi-Level Caching +```typescript +// src/core/mcp/multi-level-cache.ts +export class MultiLevelCache { + private l1Cache: Map = new Map(); // In-memory, fastest + private l2Cache: LRUCache; // LRU cache, larger capacity + private l3Cache: DiskCache; // Persistent disk cache + + constructor(config: CacheConfig) { + this.l2Cache = new LRUCache({ + max: config.l2MaxEntries || 10000, + ttl: config.l2TTL || 300000 // 5 minutes + }); + + this.l3Cache = new DiskCache(config.l3Path || './.cache/mcp'); + } + + async get(key: string): Promise { + // Try L1 cache first (fastest) + if (this.l1Cache.has(key)) { + return this.l1Cache.get(key); + } + + // Try L2 cache + const l2Value = this.l2Cache.get(key); + if (l2Value) { + // Promote to L1 + this.l1Cache.set(key, l2Value); + return l2Value; + } + + // Try L3 cache (disk) + const l3Value = await this.l3Cache.get(key); + if (l3Value) { + // Promote to L2 and L1 + this.l2Cache.set(key, l3Value); + this.l1Cache.set(key, l3Value); + return l3Value; + } + + return null; + } + + async set(key: string, value: any, options?: CacheOptions): Promise { + // Set in all levels + this.l1Cache.set(key, value); + this.l2Cache.set(key, value); + + if (options?.persistent) { + await this.l3Cache.set(key, value); + } + + // Manage L1 cache size + if (this.l1Cache.size > 1000) { + const firstKey = this.l1Cache.keys().next().value; + this.l1Cache.delete(firstKey); + } + } +} +``` + +## Success Metrics + +### Performance Targets +- [ ] **Startup Time**: <400ms MCP server initialization (4.5x improvement) +- [ ] **Response Time**: <100ms p95 for tool execution +- [ ] **Tool Lookup**: <5ms average lookup time +- [ ] **Connection Pool**: >90% hit rate +- [ ] **Memory Usage**: 50% reduction in idle memory +- [ ] **Error Rate**: <1% failed requests +- [ ] **Throughput**: >1000 requests/second + +### Monitoring Dashboards +```typescript +const mcpDashboard = { + metrics: [ + 'Request latency (p50, p95, p99)', + 'Error rate by tool category', + 'Connection pool utilization', + 'Tool lookup performance', + 'Memory usage trends', + 'Cache hit rates (L1, L2, L3)' + ], + + alerts: [ + 'Response time >200ms for 5 minutes', + 'Error rate >5% for 1 minute', + 'Pool hit rate <70% for 10 minutes', + 'Memory usage >500MB for 5 minutes' + ] +}; +``` + +## Related V3 Skills + +- `v3-core-implementation` - Core domain integration with MCP +- `v3-performance-optimization` - Overall performance optimization +- `v3-swarm-coordination` - MCP integration with swarm coordination +- `v3-memory-unification` - Memory sharing via MCP tools + +## Usage Examples + +### Complete MCP Optimization +```bash +# Full MCP server optimization +Task("MCP optimization implementation", + "Implement all MCP performance optimizations with monitoring", + "mcp-specialist") +``` + +### Specific Optimization +```bash +# Connection pool optimization +Task("MCP connection pooling", + "Implement advanced connection pooling with health monitoring", + "mcp-specialist") +``` \ No newline at end of file diff --git a/.agents/skills/v3-memory-unification/SKILL.md b/.agents/skills/v3-memory-unification/SKILL.md new file mode 100644 index 0000000..279dc63 --- /dev/null +++ b/.agents/skills/v3-memory-unification/SKILL.md @@ -0,0 +1,174 @@ +--- +name: "V3 Memory Unification" +description: "Unify 6+ memory systems into AgentDB with HNSW indexing for 150x-12,500x search improvements. Implements ADR-006 (Unified Memory Service) and ADR-009 (Hybrid Memory Backend)." +--- + +# V3 Memory Unification + +## What This Skill Does + +Consolidates disparate memory systems into unified AgentDB backend with HNSW vector search, achieving 150x-12,500x search performance improvements while maintaining backward compatibility. + +## Quick Start + +```bash +# Initialize memory unification +Task("Memory architecture", "Design AgentDB unification strategy", "v3-memory-specialist") + +# AgentDB integration +Task("AgentDB setup", "Configure HNSW indexing and vector search", "v3-memory-specialist") + +# Data migration +Task("Memory migration", "Migrate SQLite/Markdown to AgentDB", "v3-memory-specialist") +``` + +## Systems to Unify + +### Legacy Systems → AgentDB +``` +┌─────────────────────────────────────────┐ +│ • MemoryManager (basic operations) │ +│ • DistributedMemorySystem (clustering) │ +│ • SwarmMemory (agent-specific) │ +│ • AdvancedMemoryManager (features) │ +│ • SQLiteBackend (structured) │ +│ • MarkdownBackend (file-based) │ +│ • HybridBackend (combination) │ +└─────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────┐ +│ 🚀 AgentDB with HNSW │ +│ • 150x-12,500x faster search │ +│ • Unified query interface │ +│ • Cross-agent memory sharing │ +│ • SONA learning integration │ +└─────────────────────────────────────────┘ +``` + +## Implementation Architecture + +### Unified Memory Service +```typescript +class UnifiedMemoryService implements IMemoryBackend { + constructor( + private agentdb: AgentDBAdapter, + private indexer: HNSWIndexer, + private migrator: DataMigrator + ) {} + + async store(entry: MemoryEntry): Promise { + await this.agentdb.store(entry); + await this.indexer.index(entry); + } + + async query(query: MemoryQuery): Promise { + if (query.semantic) { + return this.indexer.search(query); // 150x-12,500x faster + } + return this.agentdb.query(query); + } +} +``` + +### HNSW Vector Search +```typescript +class HNSWIndexer { + constructor(dimensions: number = 1536) { + this.index = new HNSWIndex({ + dimensions, + efConstruction: 200, + M: 16, + speedupTarget: '150x-12500x' + }); + } + + async search(query: MemoryQuery): Promise { + const embedding = await this.embedContent(query.content); + const results = this.index.search(embedding, query.limit || 10); + return this.retrieveEntries(results); + } +} +``` + +## Migration Strategy + +### Phase 1: Foundation +```typescript +// AgentDB adapter setup +const agentdb = new AgentDBAdapter({ + dimensions: 1536, + indexType: 'HNSW', + speedupTarget: '150x-12500x' +}); +``` + +### Phase 2: Data Migration +```typescript +// SQLite → AgentDB +const migrateFromSQLite = async () => { + const entries = await sqlite.getAll(); + for (const entry of entries) { + const embedding = await generateEmbedding(entry.content); + await agentdb.store({ ...entry, embedding }); + } +}; + +// Markdown → AgentDB +const migrateFromMarkdown = async () => { + const files = await glob('**/*.md'); + for (const file of files) { + const content = await fs.readFile(file, 'utf-8'); + await agentdb.store({ + id: generateId(), + content, + embedding: await generateEmbedding(content), + metadata: { originalFile: file } + }); + } +}; +``` + +## SONA Integration + +### Learning Pattern Storage +```typescript +class SONAMemoryIntegration { + async storePattern(pattern: LearningPattern): Promise { + await this.memory.store({ + id: pattern.id, + content: pattern.data, + metadata: { + sonaMode: pattern.mode, + reward: pattern.reward, + adaptationTime: pattern.adaptationTime + }, + embedding: await this.generateEmbedding(pattern.data) + }); + } + + async retrieveSimilarPatterns(query: string): Promise { + return this.memory.query({ + type: 'semantic', + content: query, + filters: { type: 'learning_pattern' } + }); + } +} +``` + +## Performance Targets + +- **Search Speed**: 150x-12,500x improvement via HNSW +- **Memory Usage**: 50-75% reduction through optimization +- **Query Latency**: <100ms for 1M+ entries +- **Cross-Agent Sharing**: Real-time memory synchronization +- **SONA Integration**: <0.05ms adaptation time + +## Success Metrics + +- [ ] All 7 legacy memory systems migrated to AgentDB +- [ ] 150x-12,500x search performance validated +- [ ] 50-75% memory usage reduction achieved +- [ ] Backward compatibility maintained +- [ ] SONA learning patterns integrated +- [ ] Cross-agent memory sharing operational \ No newline at end of file diff --git a/.agents/skills/v3-performance-optimization/SKILL.md b/.agents/skills/v3-performance-optimization/SKILL.md new file mode 100644 index 0000000..9041bec --- /dev/null +++ b/.agents/skills/v3-performance-optimization/SKILL.md @@ -0,0 +1,390 @@ +--- +name: "V3 Performance Optimization" +description: "Achieve aggressive v3 performance targets: 2.49x-7.47x Flash Attention speedup, 150x-12,500x search improvements, 50-75% memory reduction. Comprehensive benchmarking and optimization suite." +--- + +# V3 Performance Optimization + +## What This Skill Does + +Validates and optimizes Codex-flow v3 to achieve industry-leading performance through Flash Attention, AgentDB HNSW indexing, and comprehensive system optimization with continuous benchmarking. + +## Quick Start + +```bash +# Initialize performance optimization +Task("Performance baseline", "Establish v2 performance benchmarks", "v3-performance-engineer") + +# Target validation (parallel) +Task("Flash Attention", "Validate 2.49x-7.47x speedup target", "v3-performance-engineer") +Task("Search optimization", "Validate 150x-12,500x search improvement", "v3-performance-engineer") +Task("Memory optimization", "Achieve 50-75% memory reduction", "v3-performance-engineer") +``` + +## Performance Target Matrix + +### Flash Attention Revolution +``` +┌─────────────────────────────────────────┐ +│ FLASH ATTENTION │ +├─────────────────────────────────────────┤ +│ Baseline: Standard attention │ +│ Target: 2.49x - 7.47x speedup │ +│ Memory: 50-75% reduction │ +│ Latency: Sub-millisecond processing │ +└─────────────────────────────────────────┘ +``` + +### Search Performance Revolution +``` +┌─────────────────────────────────────────┐ +│ SEARCH OPTIMIZATION │ +├─────────────────────────────────────────┤ +│ Current: O(n) linear search │ +│ Target: 150x - 12,500x improvement │ +│ Method: HNSW indexing │ +│ Latency: <100ms for 1M+ entries │ +└─────────────────────────────────────────┘ +``` + +## Comprehensive Benchmark Suite + +### Startup Performance +```typescript +class StartupBenchmarks { + async benchmarkColdStart(): Promise { + const startTime = performance.now(); + + await this.initializeCLI(); + await this.initializeMCPServer(); + await this.spawnTestAgent(); + + const totalTime = performance.now() - startTime; + + return { + total: totalTime, + target: 500, // ms + achieved: totalTime < 500 + }; + } +} +``` + +### Memory Operation Benchmarks +```typescript +class MemoryBenchmarks { + async benchmarkVectorSearch(): Promise { + const queries = this.generateTestQueries(10000); + + // Baseline: Current linear search + const baselineTime = await this.timeOperation(() => + this.currentMemory.searchAll(queries) + ); + + // Target: HNSW search + const hnswTime = await this.timeOperation(() => + this.agentDBMemory.hnswSearchAll(queries) + ); + + const improvement = baselineTime / hnswTime; + + return { + baseline: baselineTime, + hnsw: hnswTime, + improvement, + targetRange: [150, 12500], + achieved: improvement >= 150 + }; + } + + async benchmarkMemoryUsage(): Promise { + const baseline = process.memoryUsage().heapUsed; + + await this.loadTestDataset(); + const withData = process.memoryUsage().heapUsed; + + await this.enableOptimization(); + const optimized = process.memoryUsage().heapUsed; + + const reduction = (withData - optimized) / withData; + + return { + baseline, + withData, + optimized, + reductionPercent: reduction * 100, + targetReduction: [50, 75], + achieved: reduction >= 0.5 + }; + } +} +``` + +### Swarm Coordination Benchmarks +```typescript +class SwarmBenchmarks { + async benchmark15AgentCoordination(): Promise { + const agents = await this.spawn15Agents(); + + // Coordination latency + const coordinationTime = await this.timeOperation(() => + this.coordinateSwarmTask(agents) + ); + + // Task decomposition + const decompositionTime = await this.timeOperation(() => + this.decomposeComplexTask() + ); + + // Consensus achievement + const consensusTime = await this.timeOperation(() => + this.achieveSwarmConsensus(agents) + ); + + return { + coordination: coordinationTime, + decomposition: decompositionTime, + consensus: consensusTime, + agentCount: 15, + efficiency: this.calculateEfficiency(agents) + }; + } +} +``` + +### Flash Attention Benchmarks +```typescript +class AttentionBenchmarks { + async benchmarkFlashAttention(): Promise { + const sequences = this.generateSequences([512, 1024, 2048, 4096]); + const results = []; + + for (const sequence of sequences) { + // Baseline attention + const baselineResult = await this.benchmarkStandardAttention(sequence); + + // Flash attention + const flashResult = await this.benchmarkFlashAttention(sequence); + + results.push({ + sequenceLength: sequence.length, + speedup: baselineResult.time / flashResult.time, + memoryReduction: (baselineResult.memory - flashResult.memory) / baselineResult.memory, + targetSpeedup: [2.49, 7.47], + achieved: this.checkTarget(flashResult, [2.49, 7.47]) + }); + } + + return { + results, + averageSpeedup: this.calculateAverage(results, 'speedup'), + averageMemoryReduction: this.calculateAverage(results, 'memoryReduction') + }; + } +} +``` + +### SONA Learning Benchmarks +```typescript +class SONABenchmarks { + async benchmarkAdaptationTime(): Promise { + const scenarios = [ + 'pattern_recognition', + 'task_optimization', + 'error_correction', + 'performance_tuning' + ]; + + const results = []; + + for (const scenario of scenarios) { + const startTime = performance.hrtime.bigint(); + await this.sona.adapt(scenario); + const endTime = performance.hrtime.bigint(); + + const adaptationTimeMs = Number(endTime - startTime) / 1000000; + + results.push({ + scenario, + adaptationTime: adaptationTimeMs, + target: 0.05, // ms + achieved: adaptationTimeMs <= 0.05 + }); + } + + return { + scenarios: results, + averageTime: results.reduce((sum, r) => sum + r.adaptationTime, 0) / results.length, + successRate: results.filter(r => r.achieved).length / results.length + }; + } +} +``` + +## Performance Monitoring Dashboard + +### Real-time Metrics +```typescript +class PerformanceMonitor { + async collectMetrics(): Promise { + return { + timestamp: Date.now(), + flashAttention: await this.measureFlashAttention(), + searchPerformance: await this.measureSearchSpeed(), + memoryUsage: await this.measureMemoryEfficiency(), + startupTime: await this.measureStartupLatency(), + sonaAdaptation: await this.measureSONASpeed(), + swarmCoordination: await this.measureSwarmEfficiency() + }; + } + + async generateReport(): Promise { + const snapshot = await this.collectMetrics(); + + return { + summary: this.generateSummary(snapshot), + achievements: this.checkTargetAchievements(snapshot), + trends: this.analyzeTrends(), + recommendations: this.generateOptimizations(), + regressions: await this.detectRegressions() + }; + } +} +``` + +### Continuous Regression Detection +```typescript +class PerformanceRegression { + async detectRegressions(): Promise { + const current = await this.runFullBenchmark(); + const baseline = await this.getBaseline(); + + const regressions = []; + + for (const [metric, currentValue] of Object.entries(current)) { + const baselineValue = baseline[metric]; + const change = (currentValue - baselineValue) / baselineValue; + + if (change < -0.05) { // 5% regression threshold + regressions.push({ + metric, + baseline: baselineValue, + current: currentValue, + regressionPercent: change * 100, + severity: this.classifyRegression(change) + }); + } + } + + return { + hasRegressions: regressions.length > 0, + regressions, + recommendations: this.generateRegressionFixes(regressions) + }; + } +} +``` + +## Optimization Strategies + +### Memory Optimization +```typescript +class MemoryOptimization { + async optimizeMemoryUsage(): Promise { + // Implement memory pooling + await this.setupMemoryPools(); + + // Enable garbage collection tuning + await this.optimizeGarbageCollection(); + + // Implement object reuse patterns + await this.setupObjectPools(); + + // Enable memory compression + await this.enableMemoryCompression(); + + return this.validateMemoryReduction(); + } +} +``` + +### CPU Optimization +```typescript +class CPUOptimization { + async optimizeCPUUsage(): Promise { + // Implement worker thread pools + await this.setupWorkerThreads(); + + // Enable CPU-specific optimizations + await this.enableSIMDInstructions(); + + // Implement task batching + await this.optimizeTaskBatching(); + + return this.validateCPUImprovement(); + } +} +``` + +## Target Validation Framework + +### Performance Gates +```typescript +class PerformanceGates { + async validateAllTargets(): Promise { + const results = await Promise.all([ + this.validateFlashAttention(), // 2.49x-7.47x + this.validateSearchPerformance(), // 150x-12,500x + this.validateMemoryReduction(), // 50-75% + this.validateStartupTime(), // <500ms + this.validateSONAAdaptation() // <0.05ms + ]); + + return { + allTargetsAchieved: results.every(r => r.achieved), + results, + overallScore: this.calculateOverallScore(results), + recommendations: this.generateRecommendations(results) + }; + } +} +``` + +## Success Metrics + +### Primary Targets +- [ ] **Flash Attention**: 2.49x-7.47x speedup validated +- [ ] **Search Performance**: 150x-12,500x improvement confirmed +- [ ] **Memory Reduction**: 50-75% usage optimization achieved +- [ ] **Startup Time**: <500ms cold start consistently +- [ ] **SONA Adaptation**: <0.05ms learning response time +- [ ] **15-Agent Coordination**: Efficient parallel execution + +### Continuous Monitoring +- [ ] **Performance Dashboard**: Real-time metrics collection +- [ ] **Regression Testing**: Automated performance validation +- [ ] **Trend Analysis**: Performance evolution tracking +- [ ] **Alert System**: Immediate regression notification + +## Related V3 Skills + +- `v3-integration-deep` - Performance integration with agentic-flow +- `v3-memory-unification` - Memory performance optimization +- `v3-swarm-coordination` - Swarm performance coordination +- `v3-security-overhaul` - Secure performance patterns + +## Usage Examples + +### Complete Performance Validation +```bash +# Full performance suite +npm run benchmark:v3 + +# Specific target validation +npm run benchmark:flash-attention +npm run benchmark:agentdb-search +npm run benchmark:memory-optimization + +# Continuous monitoring +npm run monitor:performance +``` \ No newline at end of file diff --git a/.agents/skills/v3-security-overhaul/SKILL.md b/.agents/skills/v3-security-overhaul/SKILL.md new file mode 100644 index 0000000..3fd2cc5 --- /dev/null +++ b/.agents/skills/v3-security-overhaul/SKILL.md @@ -0,0 +1,82 @@ +--- +name: "V3 Security Overhaul" +description: "Complete security architecture overhaul for Codex-flow v3. Addresses critical CVEs (CVE-1, CVE-2, CVE-3) and implements secure-by-default patterns. Use for security-first v3 implementation." +--- + +# V3 Security Overhaul + +## What This Skill Does + +Orchestrates comprehensive security overhaul for Codex-flow v3, addressing critical vulnerabilities and establishing security-first development practices using specialized v3 security agents. + +## Quick Start + +```bash +# Initialize V3 security domain (parallel) +Task("Security architecture", "Design v3 threat model and security boundaries", "v3-security-architect") +Task("CVE remediation", "Fix CVE-1, CVE-2, CVE-3 critical vulnerabilities", "security-auditor") +Task("Security testing", "Implement TDD London School security framework", "test-architect") +``` + +## Critical Security Fixes + +### CVE-1: Vulnerable Dependencies +```bash +npm update @anthropic-ai/Codex@^2.0.31 +npm audit --audit-level high +``` + +### CVE-2: Weak Password Hashing +```typescript +// ❌ Old: SHA-256 with hardcoded salt +const hash = crypto.createHash('sha256').update(password + salt).digest('hex'); + +// ✅ New: bcrypt with 12 rounds +import bcrypt from 'bcrypt'; +const hash = await bcrypt.hash(password, 12); +``` + +### CVE-3: Hardcoded Credentials +```typescript +// ✅ Generate secure random credentials +const apiKey = crypto.randomBytes(32).toString('hex'); +``` + +## Security Patterns + +### Input Validation (Zod) +```typescript +import { z } from 'zod'; + +const TaskSchema = z.object({ + taskId: z.string().uuid(), + content: z.string().max(10000), + agentType: z.enum(['security', 'core', 'integration']) +}); +``` + +### Path Sanitization +```typescript +function securePath(userPath: string, allowedPrefix: string): string { + const resolved = path.resolve(allowedPrefix, userPath); + if (!resolved.startsWith(path.resolve(allowedPrefix))) { + throw new SecurityError('Path traversal detected'); + } + return resolved; +} +``` + +### Safe Command Execution +```typescript +import { execFile } from 'child_process'; + +// ✅ Safe: No shell interpretation +const { stdout } = await execFile('git', [userInput], { shell: false }); +``` + +## Success Metrics + +- **Security Score**: 90/100 (npm audit + custom scans) +- **CVE Resolution**: 100% of critical vulnerabilities fixed +- **Test Coverage**: >95% security-critical code +- **Implementation**: All secure patterns documented and tested \ No newline at end of file diff --git a/.agents/skills/v3-swarm-coordination/SKILL.md b/.agents/skills/v3-swarm-coordination/SKILL.md new file mode 100644 index 0000000..f3db009 --- /dev/null +++ b/.agents/skills/v3-swarm-coordination/SKILL.md @@ -0,0 +1,340 @@ +--- +name: "V3 Swarm Coordination" +description: "15-agent hierarchical mesh coordination for v3 implementation. Orchestrates parallel execution across security, core, and integration domains following 10 ADRs with 14-week timeline." +--- + +# V3 Swarm Coordination + +## What This Skill Does + +Orchestrates the complete 15-agent hierarchical mesh swarm for Codex-flow v3 implementation, coordinating parallel execution across domains while maintaining dependencies and timeline adherence. + +## Quick Start + +```bash +# Initialize 15-agent v3 swarm +Task("Swarm initialization", "Initialize hierarchical mesh for v3 implementation", "v3-queen-coordinator") + +# Security domain (Phase 1 - Critical priority) +Task("Security architecture", "Design v3 threat model and security boundaries", "v3-security-architect") +Task("CVE remediation", "Fix CVE-1, CVE-2, CVE-3 vulnerabilities", "security-auditor") +Task("Security testing", "Implement TDD security framework", "test-architect") + +# Core domain (Phase 2 - Parallel execution) +Task("Memory unification", "Implement AgentDB 150x improvement", "v3-memory-specialist") +Task("Integration architecture", "Deep agentic-flow@alpha integration", "v3-integration-architect") +Task("Performance validation", "Validate 2.49x-7.47x targets", "v3-performance-engineer") +``` + +## 15-Agent Swarm Architecture + +### Hierarchical Mesh Topology +``` + 👑 QUEEN COORDINATOR + (Agent #1) + │ + ┌────────────────────┼────────────────────┐ + │ │ │ + 🛡️ SECURITY 🧠 CORE 🔗 INTEGRATION + (Agents #2-4) (Agents #5-9) (Agents #10-12) + │ │ │ + └────────────────────┼────────────────────┘ + │ + ┌────────────────────┼────────────────────┐ + │ │ │ + 🧪 QUALITY ⚡ PERFORMANCE 🚀 DEPLOYMENT + (Agent #13) (Agent #14) (Agent #15) +``` + +### Agent Roster +| ID | Agent | Domain | Phase | Responsibility | +|----|-------|--------|-------|----------------| +| 1 | Queen Coordinator | Orchestration | All | GitHub issues, dependencies, timeline | +| 2 | Security Architect | Security | Foundation | Threat modeling, CVE planning | +| 3 | Security Implementer | Security | Foundation | CVE fixes, secure patterns | +| 4 | Security Tester | Security | Foundation | TDD security testing | +| 5 | Core Architect | Core | Systems | DDD architecture, coordination | +| 6 | Core Implementer | Core | Systems | Core module implementation | +| 7 | Memory Specialist | Core | Systems | AgentDB unification | +| 8 | Swarm Specialist | Core | Systems | Unified coordination engine | +| 9 | MCP Specialist | Core | Systems | MCP server optimization | +| 10 | Integration Architect | Integration | Integration | agentic-flow@alpha deep integration | +| 11 | CLI/Hooks Developer | Integration | Integration | CLI modernization | +| 12 | Neural/Learning Dev | Integration | Integration | SONA integration | +| 13 | TDD Test Engineer | Quality | All | London School TDD | +| 14 | Performance Engineer | Performance | Optimization | Benchmarking validation | +| 15 | Release Engineer | Deployment | Release | CI/CD and v3.0.0 release | + +## Implementation Phases + +### Phase 1: Foundation (Week 1-2) +**Active Agents**: #1, #2-4, #5-6 +```typescript +const phase1 = async () => { + // Parallel security and architecture foundation + await Promise.all([ + // Security domain (critical priority) + Task("Security architecture", "Complete threat model and security boundaries", "v3-security-architect"), + Task("CVE-1 fix", "Update vulnerable dependencies", "security-implementer"), + Task("CVE-2 fix", "Replace weak password hashing", "security-implementer"), + Task("CVE-3 fix", "Remove hardcoded credentials", "security-implementer"), + Task("Security testing", "TDD London School security framework", "test-architect"), + + // Core architecture foundation + Task("DDD architecture", "Design domain boundaries and structure", "core-architect"), + Task("Type modernization", "Update type system for v3", "core-implementer") + ]); +}; +``` + +### Phase 2: Core Systems (Week 3-6) +**Active Agents**: #1, #5-9, #13 +```typescript +const phase2 = async () => { + // Parallel core system implementation + await Promise.all([ + Task("Memory unification", "Implement AgentDB with 150x-12,500x improvement", "v3-memory-specialist"), + Task("Swarm coordination", "Merge 4 coordination systems into unified engine", "swarm-specialist"), + Task("MCP optimization", "Optimize MCP server performance", "mcp-specialist"), + Task("Core implementation", "Implement DDD modular architecture", "core-implementer"), + Task("TDD core tests", "Comprehensive test coverage for core systems", "test-architect") + ]); +}; +``` + +### Phase 3: Integration (Week 7-10) +**Active Agents**: #1, #10-12, #13-14 +```typescript +const phase3 = async () => { + // Parallel integration and optimization + await Promise.all([ + Task("agentic-flow integration", "Eliminate 10,000+ duplicate lines", "v3-integration-architect"), + Task("CLI modernization", "Enhance CLI with hooks system", "cli-hooks-developer"), + Task("SONA integration", "Implement <0.05ms learning adaptation", "neural-learning-developer"), + Task("Performance benchmarking", "Validate 2.49x-7.47x targets", "v3-performance-engineer"), + Task("Integration testing", "End-to-end system validation", "test-architect") + ]); +}; +``` + +### Phase 4: Release (Week 11-14) +**Active Agents**: All 15 +```typescript +const phase4 = async () => { + // Full swarm final optimization + await Promise.all([ + Task("Performance optimization", "Final optimization pass", "v3-performance-engineer"), + Task("Release preparation", "CI/CD pipeline and v3.0.0 release", "release-engineer"), + Task("Final testing", "Complete test coverage validation", "test-architect"), + + // All agents: Final polish and optimization + ...agents.map(agent => + Task("Final polish", `Agent ${agent.id} final optimization`, agent.name) + ) + ]); +}; +``` + +## Coordination Patterns + +### Dependency Management +```typescript +class DependencyCoordination { + private dependencies = new Map([ + // Security first (no dependencies) + [2, []], [3, [2]], [4, [2, 3]], + + // Core depends on security foundation + [5, [2]], [6, [5]], [7, [5]], [8, [5, 7]], [9, [5]], + + // Integration depends on core systems + [10, [5, 7, 8]], [11, [5, 10]], [12, [7, 10]], + + // Quality and performance cross-cutting + [13, [2, 5]], [14, [5, 7, 8, 10]], [15, [13, 14]] + ]); + + async coordinateExecution(): Promise { + const completed = new Set(); + + while (completed.size < 15) { + const ready = this.getReadyAgents(completed); + + if (ready.length === 0) { + throw new Error('Deadlock detected in dependency chain'); + } + + // Execute ready agents in parallel + await Promise.all(ready.map(agentId => this.executeAgent(agentId))); + + ready.forEach(id => completed.add(id)); + } + } +} +``` + +### GitHub Integration +```typescript +class GitHubCoordination { + async initializeV3Milestone(): Promise { + await gh.createMilestone({ + title: 'Codex-Flow v3.0.0 Implementation', + description: '15-agent swarm implementation of 10 ADRs', + dueDate: this.calculate14WeekDeadline() + }); + } + + async createEpicIssues(): Promise { + const epics = [ + { title: 'Security Overhaul (CVE-1,2,3)', agents: [2, 3, 4] }, + { title: 'Memory Unification (AgentDB)', agents: [7] }, + { title: 'agentic-flow Integration', agents: [10] }, + { title: 'Performance Optimization', agents: [14] }, + { title: 'DDD Architecture', agents: [5, 6] } + ]; + + for (const epic of epics) { + await gh.createIssue({ + title: epic.title, + labels: ['epic', 'v3', ...epic.agents.map(id => `agent-${id}`)], + assignees: epic.agents.map(id => this.getAgentGithubUser(id)) + }); + } + } + + async trackProgress(): Promise { + // Hourly progress updates from each agent + setInterval(async () => { + for (const agent of this.agents) { + await this.postAgentProgress(agent); + } + }, 3600000); // 1 hour + } +} +``` + +### Communication Bus +```typescript +class SwarmCommunication { + private bus = new QuicSwarmBus({ + maxAgents: 15, + messageTimeout: 30000, + retryAttempts: 3 + }); + + async broadcastToSecurityDomain(message: SwarmMessage): Promise { + await this.bus.broadcast(message, { + targetAgents: [2, 3, 4], + priority: 'critical' + }); + } + + async coordinateCoreSystems(message: SwarmMessage): Promise { + await this.bus.broadcast(message, { + targetAgents: [5, 6, 7, 8, 9], + priority: 'high' + }); + } + + async notifyIntegrationTeam(message: SwarmMessage): Promise { + await this.bus.broadcast(message, { + targetAgents: [10, 11, 12], + priority: 'medium' + }); + } +} +``` + +## Performance Coordination + +### Parallel Efficiency Monitoring +```typescript +class EfficiencyMonitor { + async measureParallelEfficiency(): Promise { + const agentUtilization = await this.measureAgentUtilization(); + const coordinationOverhead = await this.measureCoordinationCost(); + + return { + totalEfficiency: agentUtilization.average, + target: 0.85, // >85% utilization + achieved: agentUtilization.average > 0.85, + bottlenecks: this.identifyBottlenecks(agentUtilization), + recommendations: this.generateOptimizations() + }; + } +} +``` + +### Load Balancing +```typescript +class SwarmLoadBalancer { + async balanceWorkload(): Promise { + const workloads = await this.analyzeAgentWorkloads(); + + for (const [agentId, load] of workloads.entries()) { + if (load > this.getCapacityThreshold(agentId)) { + await this.redistributeWork(agentId); + } + } + } + + async redistributeWork(overloadedAgent: number): Promise { + const availableAgents = this.getAvailableAgents(); + const tasks = await this.getAgentTasks(overloadedAgent); + + // Redistribute tasks to available agents + for (const task of tasks) { + const bestAgent = this.selectOptimalAgent(task, availableAgents); + await this.reassignTask(task, bestAgent); + } + } +} +``` + +## Success Metrics + +### Swarm Coordination +- [ ] **Parallel Efficiency**: >85% agent utilization time +- [ ] **Dependency Resolution**: Zero deadlocks or blocking issues +- [ ] **Communication Latency**: <100ms inter-agent messaging +- [ ] **Timeline Adherence**: 14-week delivery maintained +- [ ] **GitHub Integration**: <4h automated issue response + +### Implementation Targets +- [ ] **ADR Coverage**: All 10 ADRs implemented successfully +- [ ] **Performance**: 2.49x-7.47x Flash Attention achieved +- [ ] **Search**: 150x-12,500x AgentDB improvement validated +- [ ] **Code Reduction**: <5,000 lines (vs 15,000+) +- [ ] **Security**: 90/100 security score achieved + +## Related V3 Skills + +- `v3-security-overhaul` - Security domain coordination +- `v3-memory-unification` - Memory system coordination +- `v3-integration-deep` - Integration domain coordination +- `v3-performance-optimization` - Performance domain coordination + +## Usage Examples + +### Initialize Complete V3 Swarm +```bash +# Queen Coordinator initializes full swarm +Task("V3 swarm initialization", + "Initialize 15-agent hierarchical mesh for complete v3 implementation", + "v3-queen-coordinator") +``` + +### Phase-based Execution +```bash +# Phase 1: Security-first foundation +npm run v3:phase1:security + +# Phase 2: Core systems parallel +npm run v3:phase2:core-systems + +# Phase 3: Integration and optimization +npm run v3:phase3:integration + +# Phase 4: Release preparation +npm run v3:phase4:release +``` \ No newline at end of file diff --git a/.agents/skills/verification-quality/SKILL.md b/.agents/skills/verification-quality/SKILL.md new file mode 100644 index 0000000..bc9d5af --- /dev/null +++ b/.agents/skills/verification-quality/SKILL.md @@ -0,0 +1,649 @@ +--- +name: "Verification & Quality Assurance" +description: "Comprehensive truth scoring, code quality verification, and automatic rollback system with 0.95 accuracy threshold for ensuring high-quality agent outputs and codebase reliability." +version: "2.0.0" +category: "quality-assurance" +tags: ["verification", "truth-scoring", "quality", "rollback", "metrics", "ci-cd"] +--- + +# Verification & Quality Assurance Skill + +## What This Skill Does + +This skill provides a comprehensive verification and quality assurance system that ensures code quality and correctness through: + +- **Truth Scoring**: Real-time reliability metrics (0.0-1.0 scale) for code, agents, and tasks +- **Verification Checks**: Automated code correctness, security, and best practices validation +- **Automatic Rollback**: Instant reversion of changes that fail verification (default threshold: 0.95) +- **Quality Metrics**: Statistical analysis with trends, confidence intervals, and improvement tracking +- **CI/CD Integration**: Export capabilities for continuous integration pipelines +- **Real-time Monitoring**: Live dashboards and watch modes for ongoing verification + +## Prerequisites + +- Codex Flow installed (`npx Codex-flow@alpha`) +- Git repository (for rollback features) +- Node.js 18+ (for dashboard features) + +## Quick Start + +```bash +# View current truth scores +npx Codex-flow@alpha truth + +# Run verification check +npx Codex-flow@alpha verify check + +# Verify specific file with custom threshold +npx Codex-flow@alpha verify check --file src/app.js --threshold 0.98 + +# Rollback last failed verification +npx Codex-flow@alpha verify rollback --last-good +``` + +--- + +## Complete Guide + +### Truth Scoring System + +#### View Truth Metrics + +Display comprehensive quality and reliability metrics for your codebase and agent tasks. + +**Basic Usage:** +```bash +# View current truth scores (default: table format) +npx Codex-flow@alpha truth + +# View scores for specific time period +npx Codex-flow@alpha truth --period 7d + +# View scores for specific agent +npx Codex-flow@alpha truth --agent coder --period 24h + +# Find files/tasks below threshold +npx Codex-flow@alpha truth --threshold 0.8 +``` + +**Output Formats:** +```bash +# Table format (default) +npx Codex-flow@alpha truth --format table + +# JSON for programmatic access +npx Codex-flow@alpha truth --format json + +# CSV for spreadsheet analysis +npx Codex-flow@alpha truth --format csv + +# HTML report with visualizations +npx Codex-flow@alpha truth --format html --export report.html +``` + +**Real-time Monitoring:** +```bash +# Watch mode with live updates +npx Codex-flow@alpha truth --watch + +# Export metrics automatically +npx Codex-flow@alpha truth --export .Codex-flow/metrics/truth-$(date +%Y%m%d).json +``` + +#### Truth Score Dashboard + +Example dashboard output: +``` +📊 Truth Metrics Dashboard +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Overall Truth Score: 0.947 ✅ +Trend: ↗️ +2.3% (7d) + +Top Performers: + verification-agent 0.982 ⭐ + code-analyzer 0.971 ⭐ + test-generator 0.958 ✅ + +Needs Attention: + refactor-agent 0.821 ⚠️ + docs-generator 0.794 ⚠️ + +Recent Tasks: + task-456 0.991 ✅ "Implement auth" + task-455 0.967 ✅ "Add tests" + task-454 0.743 ❌ "Refactor API" +``` + +#### Metrics Explained + +**Truth Scores (0.0-1.0):** +- `1.0-0.95`: Excellent ⭐ (production-ready) +- `0.94-0.85`: Good ✅ (acceptable quality) +- `0.84-0.75`: Warning ⚠️ (needs attention) +- `<0.75`: Critical ❌ (requires immediate action) + +**Trend Indicators:** +- ↗️ Improving (positive trend) +- → Stable (consistent performance) +- ↘️ Declining (quality regression detected) + +**Statistics:** +- **Mean Score**: Average truth score across all measurements +- **Median Score**: Middle value (less affected by outliers) +- **Standard Deviation**: Consistency of scores (lower = more consistent) +- **Confidence Interval**: Statistical reliability of measurements + +### Verification Checks + +#### Run Verification + +Execute comprehensive verification checks on code, tasks, or agent outputs. + +**File Verification:** +```bash +# Verify single file +npx Codex-flow@alpha verify check --file src/app.js + +# Verify directory recursively +npx Codex-flow@alpha verify check --directory src/ + +# Verify with auto-fix enabled +npx Codex-flow@alpha verify check --file src/utils.js --auto-fix + +# Verify current working directory +npx Codex-flow@alpha verify check +``` + +**Task Verification:** +```bash +# Verify specific task output +npx Codex-flow@alpha verify check --task task-123 + +# Verify with custom threshold +npx Codex-flow@alpha verify check --task task-456 --threshold 0.99 + +# Verbose output for debugging +npx Codex-flow@alpha verify check --task task-789 --verbose +``` + +**Batch Verification:** +```bash +# Verify multiple files in parallel +npx Codex-flow@alpha verify batch --files "*.js" --parallel + +# Verify with pattern matching +npx Codex-flow@alpha verify batch --pattern "src/**/*.ts" + +# Integration test suite +npx Codex-flow@alpha verify integration --test-suite full +``` + +#### Verification Criteria + +The verification system evaluates: + +1. **Code Correctness** + - Syntax validation + - Type checking (TypeScript) + - Logic flow analysis + - Error handling completeness + +2. **Best Practices** + - Code style adherence + - SOLID principles + - Design patterns usage + - Modularity and reusability + +3. **Security** + - Vulnerability scanning + - Secret detection + - Input validation + - Authentication/authorization checks + +4. **Performance** + - Algorithmic complexity + - Memory usage patterns + - Database query optimization + - Bundle size impact + +5. **Documentation** + - JSDoc/TypeDoc completeness + - README accuracy + - API documentation + - Code comments quality + +#### JSON Output for CI/CD + +```bash +# Get structured JSON output +npx Codex-flow@alpha verify check --json > verification.json + +# Example JSON structure: +{ + "overallScore": 0.947, + "passed": true, + "threshold": 0.95, + "checks": [ + { + "name": "code-correctness", + "score": 0.98, + "passed": true + }, + { + "name": "security", + "score": 0.91, + "passed": false, + "issues": [...] + } + ] +} +``` + +### Automatic Rollback + +#### Rollback Failed Changes + +Automatically revert changes that fail verification checks. + +**Basic Rollback:** +```bash +# Rollback to last known good state +npx Codex-flow@alpha verify rollback --last-good + +# Rollback to specific commit +npx Codex-flow@alpha verify rollback --to-commit abc123 + +# Interactive rollback with preview +npx Codex-flow@alpha verify rollback --interactive +``` + +**Smart Rollback:** +```bash +# Rollback only failed files (preserve good changes) +npx Codex-flow@alpha verify rollback --selective + +# Rollback with automatic backup +npx Codex-flow@alpha verify rollback --backup-first + +# Dry-run mode (preview without executing) +npx Codex-flow@alpha verify rollback --dry-run +``` + +**Rollback Performance:** +- Git-based rollback: <1 second +- Selective file rollback: <500ms +- Backup creation: Automatic before rollback + +### Verification Reports + +#### Generate Reports + +Create detailed verification reports with metrics and visualizations. + +**Report Formats:** +```bash +# JSON report +npx Codex-flow@alpha verify report --format json + +# HTML report with charts +npx Codex-flow@alpha verify report --export metrics.html --format html + +# CSV for data analysis +npx Codex-flow@alpha verify report --format csv --export metrics.csv + +# Markdown summary +npx Codex-flow@alpha verify report --format markdown +``` + +**Time-based Reports:** +```bash +# Last 24 hours +npx Codex-flow@alpha verify report --period 24h + +# Last 7 days +npx Codex-flow@alpha verify report --period 7d + +# Last 30 days with trends +npx Codex-flow@alpha verify report --period 30d --include-trends + +# Custom date range +npx Codex-flow@alpha verify report --from 2025-01-01 --to 2025-01-31 +``` + +**Report Content:** +- Overall truth scores +- Per-agent performance metrics +- Task completion quality +- Verification pass/fail rates +- Rollback frequency +- Quality improvement trends +- Statistical confidence intervals + +### Interactive Dashboard + +#### Launch Dashboard + +Run interactive web-based verification dashboard with real-time updates. + +```bash +# Launch dashboard on default port (3000) +npx Codex-flow@alpha verify dashboard + +# Custom port +npx Codex-flow@alpha verify dashboard --port 8080 + +# Export dashboard data +npx Codex-flow@alpha verify dashboard --export + +# Dashboard with auto-refresh +npx Codex-flow@alpha verify dashboard --refresh 5s +``` + +**Dashboard Features:** +- Real-time truth score updates (WebSocket) +- Interactive charts and graphs +- Agent performance comparison +- Task history timeline +- Rollback history viewer +- Export to PDF/HTML +- Filter by time period/agent/score + +### Configuration + +#### Default Configuration + +Set verification preferences in `.Codex-flow/config.json`: + +```json +{ + "verification": { + "threshold": 0.95, + "autoRollback": true, + "gitIntegration": true, + "hooks": { + "preCommit": true, + "preTask": true, + "postEdit": true + }, + "checks": { + "codeCorrectness": true, + "security": true, + "performance": true, + "documentation": true, + "bestPractices": true + } + }, + "truth": { + "defaultFormat": "table", + "defaultPeriod": "24h", + "warningThreshold": 0.85, + "criticalThreshold": 0.75, + "autoExport": { + "enabled": true, + "path": ".Codex-flow/metrics/truth-daily.json" + } + } +} +``` + +#### Threshold Configuration + +**Adjust verification strictness:** +```bash +# Strict mode (99% accuracy required) +npx Codex-flow@alpha verify check --threshold 0.99 + +# Lenient mode (90% acceptable) +npx Codex-flow@alpha verify check --threshold 0.90 + +# Set default threshold +npx Codex-flow@alpha config set verification.threshold 0.98 +``` + +**Per-environment thresholds:** +```json +{ + "verification": { + "thresholds": { + "production": 0.99, + "staging": 0.95, + "development": 0.90 + } + } +} +``` + +### Integration Examples + +#### CI/CD Integration + +**GitHub Actions:** +```yaml +name: Quality Verification + +on: [push, pull_request] + +jobs: + verify: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install Dependencies + run: npm install + + - name: Run Verification + run: | + npx Codex-flow@alpha verify check --json > verification.json + + - name: Check Truth Score + run: | + score=$(jq '.overallScore' verification.json) + if (( $(echo "$score < 0.95" | bc -l) )); then + echo "Truth score too low: $score" + exit 1 + fi + + - name: Upload Report + uses: actions/upload-artifact@v3 + with: + name: verification-report + path: verification.json +``` + +**GitLab CI:** +```yaml +verify: + stage: test + script: + - npx Codex-flow@alpha verify check --threshold 0.95 --json > verification.json + - | + score=$(jq '.overallScore' verification.json) + if [ $(echo "$score < 0.95" | bc) -eq 1 ]; then + echo "Verification failed with score: $score" + exit 1 + fi + artifacts: + paths: + - verification.json + reports: + junit: verification.json +``` + +#### Swarm Integration + +Run verification automatically during swarm operations: + +```bash +# Swarm with verification enabled +npx Codex-flow@alpha swarm --verify --threshold 0.98 + +# Hive Mind with auto-rollback +npx Codex-flow@alpha hive-mind --verify --rollback-on-fail + +# Training pipeline with verification +npx Codex-flow@alpha train --verify --threshold 0.99 +``` + +#### Pair Programming Integration + +Enable real-time verification during collaborative development: + +```bash +# Pair with verification +npx Codex-flow@alpha pair --verify --real-time + +# Pair with custom threshold +npx Codex-flow@alpha pair --verify --threshold 0.97 --auto-fix +``` + +### Advanced Workflows + +#### Continuous Verification + +Monitor codebase continuously during development: + +```bash +# Watch directory for changes +npx Codex-flow@alpha verify watch --directory src/ + +# Watch with auto-fix +npx Codex-flow@alpha verify watch --directory src/ --auto-fix + +# Watch with notifications +npx Codex-flow@alpha verify watch --notify --threshold 0.95 +``` + +#### Monitoring Integration + +Send metrics to external monitoring systems: + +```bash +# Export to Prometheus +npx Codex-flow@alpha truth --format json | \ + curl -X POST https://pushgateway.example.com/metrics/job/Codex-flow \ + -d @- + +# Send to DataDog +npx Codex-flow@alpha verify report --format json | \ + curl -X POST "https://api.datadoghq.com/api/v1/series?api_key=${DD_API_KEY}" \ + -H "Content-Type: application/json" \ + -d @- + +# Custom webhook +npx Codex-flow@alpha truth --format json | \ + curl -X POST https://metrics.example.com/api/truth \ + -H "Content-Type: application/json" \ + -d @- +``` + +#### Pre-commit Hooks + +Automatically verify before commits: + +```bash +# Install pre-commit hook +npx Codex-flow@alpha verify install-hook --pre-commit + +# .git/hooks/pre-commit example: +#!/bin/bash +npx Codex-flow@alpha verify check --threshold 0.95 --json > /tmp/verify.json + +score=$(jq '.overallScore' /tmp/verify.json) +if (( $(echo "$score < 0.95" | bc -l) )); then + echo "❌ Verification failed with score: $score" + echo "Run 'npx Codex-flow@alpha verify check --verbose' for details" + exit 1 +fi + +echo "✅ Verification passed with score: $score" +``` + +### Performance Metrics + +**Verification Speed:** +- Single file check: <100ms +- Directory scan: <500ms (per 100 files) +- Full codebase analysis: <5s (typical project) +- Truth score calculation: <50ms + +**Rollback Speed:** +- Git-based rollback: <1s +- Selective file rollback: <500ms +- Backup creation: <2s + +**Dashboard Performance:** +- Initial load: <1s +- Real-time updates: <100ms latency (WebSocket) +- Chart rendering: 60 FPS + +### Troubleshooting + +#### Common Issues + +**Low Truth Scores:** +```bash +# Get detailed breakdown +npx Codex-flow@alpha truth --verbose --threshold 0.0 + +# Check specific criteria +npx Codex-flow@alpha verify check --verbose + +# View agent-specific issues +npx Codex-flow@alpha truth --agent --format json +``` + +**Rollback Failures:** +```bash +# Check git status +git status + +# View rollback history +npx Codex-flow@alpha verify rollback --history + +# Manual rollback +git reset --hard HEAD~1 +``` + +**Verification Timeouts:** +```bash +# Increase timeout +npx Codex-flow@alpha verify check --timeout 60s + +# Verify in batches +npx Codex-flow@alpha verify batch --batch-size 10 +``` + +### Exit Codes + +Verification commands return standard exit codes: + +- `0`: Verification passed (score ≥ threshold) +- `1`: Verification failed (score < threshold) +- `2`: Error during verification (invalid input, system error) + +### Related Commands + +- `npx Codex-flow@alpha pair` - Collaborative development with verification +- `npx Codex-flow@alpha train` - Training with verification feedback +- `npx Codex-flow@alpha swarm` - Multi-agent coordination with quality checks +- `npx Codex-flow@alpha report` - Generate comprehensive project reports + +### Best Practices + +1. **Set Appropriate Thresholds**: Use 0.99 for critical code, 0.95 for standard, 0.90 for experimental +2. **Enable Auto-rollback**: Prevent bad code from persisting +3. **Monitor Trends**: Track improvement over time, not just current scores +4. **Integrate with CI/CD**: Make verification part of your pipeline +5. **Use Watch Mode**: Get immediate feedback during development +6. **Export Metrics**: Track quality metrics in your monitoring system +7. **Review Rollbacks**: Understand why changes were rejected +8. **Train Agents**: Use verification feedback to improve agent performance + +### Additional Resources + +- Truth Scoring Algorithm: See `/docs/truth-scoring.md` +- Verification Criteria: See `/docs/verification-criteria.md` +- Integration Examples: See `/examples/verification/` +- API Reference: See `/docs/api/verification.md` diff --git a/apis/admin/group.api b/apis/admin/group.api new file mode 100644 index 0000000..de4aad9 --- /dev/null +++ b/apis/admin/group.api @@ -0,0 +1,215 @@ +syntax = "v1" + +info ( + title: "Group API" + desc: "API for user group and node group management" + author: "Tension" + email: "tension@ppanel.com" + version: "0.0.1" +) + +import ( + "../types.api" + "./server.api" +) + +type ( + // ===== 节点组管理 ===== + // GetNodeGroupListRequest + GetNodeGroupListRequest { + Page int `form:"page"` + Size int `form:"size"` + GroupId string `form:"group_id,omitempty"` + } + // GetNodeGroupListResponse + GetNodeGroupListResponse { + Total int64 `json:"total"` + List []NodeGroup `json:"list"` + } + // CreateNodeGroupRequest + CreateNodeGroupRequest { + Name string `json:"name" validate:"required"` + Description string `json:"description"` + Sort int `json:"sort"` + ForCalculation *bool `json:"for_calculation"` + IsExpiredGroup *bool `json:"is_expired_group"` + ExpiredDaysLimit *int `json:"expired_days_limit"` + MaxTrafficGBExpired *int64 `json:"max_traffic_gb_expired,omitempty"` + SpeedLimit *int `json:"speed_limit"` + MinTrafficGB *int64 `json:"min_traffic_gb,omitempty"` + MaxTrafficGB *int64 `json:"max_traffic_gb,omitempty"` + } + // UpdateNodeGroupRequest + UpdateNodeGroupRequest { + Id int64 `json:"id" validate:"required"` + Name string `json:"name"` + Description string `json:"description"` + Sort int `json:"sort"` + ForCalculation *bool `json:"for_calculation"` + IsExpiredGroup *bool `json:"is_expired_group"` + ExpiredDaysLimit *int `json:"expired_days_limit"` + MaxTrafficGBExpired *int64 `json:"max_traffic_gb_expired,omitempty"` + SpeedLimit *int `json:"speed_limit"` + MinTrafficGB *int64 `json:"min_traffic_gb,omitempty"` + MaxTrafficGB *int64 `json:"max_traffic_gb,omitempty"` + } + // DeleteNodeGroupRequest + DeleteNodeGroupRequest { + Id int64 `json:"id" validate:"required"` + } + // ===== 分组配置管理 ===== + // GetGroupConfigRequest + GetGroupConfigRequest { + Keys []string `form:"keys,omitempty"` + } + // GetGroupConfigResponse + GetGroupConfigResponse { + Enabled bool `json:"enabled"` + Mode string `json:"mode"` + Config map[string]interface{} `json:"config"` + State RecalculationState `json:"state"` + } + // UpdateGroupConfigRequest + UpdateGroupConfigRequest { + Enabled bool `json:"enabled"` + Mode string `json:"mode"` + Config map[string]interface{} `json:"config"` + } + // RecalculationState + RecalculationState { + State string `json:"state"` + Progress int `json:"progress"` + Total int `json:"total"` + } + // ===== 分组操作 ===== + // RecalculateGroupRequest + RecalculateGroupRequest { + Mode string `json:"mode" validate:"required"` + TriggerType string `json:"trigger_type"` // "manual" or "scheduled" + } + // GetGroupHistoryRequest + GetGroupHistoryRequest { + Page int `form:"page"` + Size int `form:"size"` + GroupMode string `form:"group_mode,omitempty"` + TriggerType string `form:"trigger_type,omitempty"` + } + // GetGroupHistoryResponse + GetGroupHistoryResponse { + Total int64 `json:"total"` + List []GroupHistory `json:"list"` + } + // GetGroupHistoryDetailRequest + GetGroupHistoryDetailRequest { + Id int64 `form:"id" validate:"required"` + } + // GetGroupHistoryDetailResponse + GetGroupHistoryDetailResponse { + GroupHistoryDetail + } + // PreviewUserNodesRequest + PreviewUserNodesRequest { + UserId int64 `form:"user_id" validate:"required"` + } + // PreviewUserNodesResponse + PreviewUserNodesResponse { + UserId int64 `json:"user_id"` + NodeGroups []NodeGroupItem `json:"node_groups"` + } + // NodeGroupItem + NodeGroupItem { + Id int64 `json:"id"` + Name string `json:"name"` + Nodes []Node `json:"nodes"` + } + // ExportGroupResultRequest + ExportGroupResultRequest { + HistoryId *int64 `form:"history_id,omitempty"` + } + // ===== Reset Groups ===== + // ResetGroupsRequest + ResetGroupsRequest { + Confirm bool `json:"confirm" validate:"required"` + } + // ===== 套餐分组映射 ===== + // SubscribeGroupMappingItem + SubscribeGroupMappingItem { + SubscribeName string `json:"subscribe_name"` + NodeGroupName string `json:"node_group_name"` + } + // GetSubscribeGroupMappingRequest + GetSubscribeGroupMappingRequest {} + // GetSubscribeGroupMappingResponse + GetSubscribeGroupMappingResponse { + List []SubscribeGroupMappingItem `json:"list"` + } +) + +@server ( + prefix: v1/admin/group + group: admin/group + jwt: JwtAuth + middleware: AuthMiddleware +) +service ppanel { + // ===== 节点组管理 ===== + @doc "Get node group list" + @handler GetNodeGroupList + get /node/list (GetNodeGroupListRequest) returns (GetNodeGroupListResponse) + + @doc "Create node group" + @handler CreateNodeGroup + post /node (CreateNodeGroupRequest) + + @doc "Update node group" + @handler UpdateNodeGroup + put /node (UpdateNodeGroupRequest) + + @doc "Delete node group" + @handler DeleteNodeGroup + delete /node (DeleteNodeGroupRequest) + + // ===== 分组配置管理 ===== + @doc "Get group config" + @handler GetGroupConfig + get /config (GetGroupConfigRequest) returns (GetGroupConfigResponse) + + @doc "Update group config" + @handler UpdateGroupConfig + put /config (UpdateGroupConfigRequest) + + // ===== 分组操作 ===== + @doc "Recalculate group" + @handler RecalculateGroup + post /recalculate (RecalculateGroupRequest) + + @doc "Get recalculation status" + @handler GetRecalculationStatus + get /recalculation/status returns (RecalculationState) + + @doc "Get group history" + @handler GetGroupHistory + get /history (GetGroupHistoryRequest) returns (GetGroupHistoryResponse) + + @doc "Export group result" + @handler ExportGroupResult + get /export (ExportGroupResultRequest) + + // Routes with query parameters + @doc "Get group history detail" + @handler GetGroupHistoryDetail + get /history/detail (GetGroupHistoryDetailRequest) returns (GetGroupHistoryDetailResponse) + + @doc "Preview user nodes" + @handler PreviewUserNodes + get /preview (PreviewUserNodesRequest) returns (PreviewUserNodesResponse) + + @doc "Reset all groups" + @handler ResetGroups + post /reset (ResetGroupsRequest) + + @doc "Get subscribe group mapping" + @handler GetSubscribeGroupMapping + get /subscribe/mapping (GetSubscribeGroupMappingRequest) returns (GetSubscribeGroupMappingResponse) +} + diff --git a/apis/admin/marketing.api b/apis/admin/marketing.api index 8014c0d..5a18fb8 100644 --- a/apis/admin/marketing.api +++ b/apis/admin/marketing.api @@ -163,5 +163,9 @@ service ppanel { @doc "Query quota task list" @handler QueryQuotaTaskList get /quota/list (QueryQuotaTaskListRequest) returns (QueryQuotaTaskListResponse) + + @doc "Query quota task status" + @handler QueryQuotaTaskStatus + post /quota/status (QueryQuotaTaskStatusRequest) returns (QueryQuotaTaskStatusResponse) } diff --git a/apis/admin/server.api b/apis/admin/server.api index 87d1f7e..5f5479d 100644 --- a/apis/admin/server.api +++ b/apis/admin/server.api @@ -80,36 +80,40 @@ type ( Protocols []Protocol `json:"protocols"` } Node { - Id int64 `json:"id"` - Name string `json:"name"` - Tags []string `json:"tags"` - Port uint16 `json:"port"` - Address string `json:"address"` - ServerId int64 `json:"server_id"` - Protocol string `json:"protocol"` - Enabled *bool `json:"enabled"` - Sort int `json:"sort,omitempty"` - CreatedAt int64 `json:"created_at"` - UpdatedAt int64 `json:"updated_at"` + Id int64 `json:"id"` + Name string `json:"name"` + Tags []string `json:"tags"` + Port uint16 `json:"port"` + Address string `json:"address"` + ServerId int64 `json:"server_id"` + Protocol string `json:"protocol"` + Enabled *bool `json:"enabled"` + Sort int `json:"sort,omitempty"` + NodeGroupId int64 `json:"node_group_id,omitempty"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` + CreatedAt int64 `json:"created_at"` + UpdatedAt int64 `json:"updated_at"` } CreateNodeRequest { - Name string `json:"name"` - Tags []string `json:"tags,omitempty"` - Port uint16 `json:"port"` - Address string `json:"address"` - ServerId int64 `json:"server_id"` - Protocol string `json:"protocol"` - Enabled *bool `json:"enabled"` + Name string `json:"name"` + Tags []string `json:"tags,omitempty"` + Port uint16 `json:"port"` + Address string `json:"address"` + ServerId int64 `json:"server_id"` + Protocol string `json:"protocol"` + Enabled *bool `json:"enabled"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` } UpdateNodeRequest { - Id int64 `json:"id"` - Name string `json:"name"` - Tags []string `json:"tags,omitempty"` - Port uint16 `json:"port"` - Address string `json:"address"` - ServerId int64 `json:"server_id"` - Protocol string `json:"protocol"` - Enabled *bool `json:"enabled"` + Id int64 `json:"id"` + Name string `json:"name"` + Tags []string `json:"tags,omitempty"` + Port uint16 `json:"port"` + Address string `json:"address"` + ServerId int64 `json:"server_id"` + Protocol string `json:"protocol"` + Enabled *bool `json:"enabled"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` } ToggleNodeStatusRequest { Id int64 `json:"id"` @@ -119,9 +123,10 @@ type ( Id int64 `json:"id"` } FilterNodeListRequest { - Page int `form:"page"` - Size int `form:"size"` - Search string `form:"search,omitempty"` + Page int `form:"page"` + Size int `form:"size"` + Search string `form:"search,omitempty"` + NodeGroupId *int64 `form:"node_group_id,omitempty"` } FilterNodeListResponse { Total int64 `json:"total"` diff --git a/apis/admin/subscribe.api b/apis/admin/subscribe.api index a832b3a..8a662b8 100644 --- a/apis/admin/subscribe.api +++ b/apis/admin/subscribe.api @@ -48,6 +48,9 @@ type ( Quota int64 `json:"quota"` Nodes []int64 `json:"nodes"` NodeTags []string `json:"node_tags"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` + NodeGroupId int64 `json:"node_group_id"` + TrafficLimit []TrafficLimit `json:"traffic_limit"` Show *bool `json:"show"` Sell *bool `json:"sell"` DeductionRatio int64 `json:"deduction_ratio"` @@ -55,6 +58,7 @@ type ( ResetCycle int64 `json:"reset_cycle"` RenewalReset *bool `json:"renewal_reset"` ShowOriginalPrice bool `json:"show_original_price"` + AutoCreateGroup bool `json:"auto_create_group"` } UpdateSubscribeRequest { Id int64 `json:"id" validate:"required"` @@ -72,6 +76,9 @@ type ( Quota int64 `json:"quota"` Nodes []int64 `json:"nodes"` NodeTags []string `json:"node_tags"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` + NodeGroupId int64 `json:"node_group_id"` + TrafficLimit []TrafficLimit `json:"traffic_limit"` Show *bool `json:"show"` Sell *bool `json:"sell"` Sort int64 `json:"sort"` @@ -85,10 +92,11 @@ type ( Sort []SortItem `json:"sort"` } GetSubscribeListRequest { - Page int64 `form:"page" validate:"required"` - Size int64 `form:"size" validate:"required"` - Language string `form:"language,omitempty"` - Search string `form:"search,omitempty"` + Page int64 `form:"page" validate:"required"` + Size int64 `form:"size" validate:"required"` + Language string `form:"language,omitempty"` + Search string `form:"search,omitempty"` + NodeGroupId int64 `form:"node_group_id,omitempty"` } SubscribeItem { Subscribe diff --git a/apis/admin/user.api b/apis/admin/user.api index b2822c6..889a1cc 100644 --- a/apis/admin/user.api +++ b/apis/admin/user.api @@ -77,22 +77,27 @@ type ( IsAdmin bool `json:"is_admin"` } UserSubscribeDetail { - Id int64 `json:"id"` - UserId int64 `json:"user_id"` - User User `json:"user"` - OrderId int64 `json:"order_id"` - SubscribeId int64 `json:"subscribe_id"` - Subscribe Subscribe `json:"subscribe"` - StartTime int64 `json:"start_time"` - ExpireTime int64 `json:"expire_time"` - ResetTime int64 `json:"reset_time"` - Traffic int64 `json:"traffic"` - Download int64 `json:"download"` - Upload int64 `json:"upload"` - Token string `json:"token"` - Status uint8 `json:"status"` - CreatedAt int64 `json:"created_at"` - UpdatedAt int64 `json:"updated_at"` + Id int64 `json:"id"` + UserId int64 `json:"user_id"` + User User `json:"user"` + OrderId int64 `json:"order_id"` + SubscribeId int64 `json:"subscribe_id"` + Subscribe Subscribe `json:"subscribe"` + NodeGroupId int64 `json:"node_group_id"` + GroupLocked bool `json:"group_locked"` + StartTime int64 `json:"start_time"` + ExpireTime int64 `json:"expire_time"` + ResetTime int64 `json:"reset_time"` + Traffic int64 `json:"traffic"` + Download int64 `json:"download"` + Upload int64 `json:"upload"` + Token string `json:"token"` + Status uint8 `json:"status"` + EffectiveSpeed int64 `json:"effective_speed"` + IsThrottled bool `json:"is_throttled"` + ThrottleRule string `json:"throttle_rule,omitempty"` + CreatedAt int64 `json:"created_at"` + UpdatedAt int64 `json:"updated_at"` } BatchDeleteUserRequest { Ids []int64 `json:"ids" validate:"required"` diff --git a/apis/auth/auth.api b/apis/auth/auth.api index 84f0f02..c257e04 100644 --- a/apis/auth/auth.api +++ b/apis/auth/auth.api @@ -11,13 +11,16 @@ info ( type ( // User login request UserLoginRequest { - Identifier string `json:"identifier"` - Email string `json:"email" validate:"required"` - Password string `json:"password" validate:"required"` - IP string `header:"X-Original-Forwarded-For"` - UserAgent string `header:"User-Agent"` - LoginType string `header:"Login-Type"` - CfToken string `json:"cf_token,optional"` + Identifier string `json:"identifier"` + Email string `json:"email" validate:"required"` + Password string `json:"password" validate:"required"` + IP string `header:"X-Original-Forwarded-For"` + UserAgent string `header:"User-Agent"` + LoginType string `header:"Login-Type"` + CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } // Check user is exist request CheckUserRequest { @@ -29,37 +32,43 @@ type ( } // User login response UserRegisterRequest { - Identifier string `json:"identifier"` - Email string `json:"email" validate:"required"` - Password string `json:"password" validate:"required"` - Invite string `json:"invite,optional"` - Code string `json:"code,optional"` - IP string `header:"X-Original-Forwarded-For"` - UserAgent string `header:"User-Agent"` - LoginType string `header:"Login-Type"` - CfToken string `json:"cf_token,optional"` + Identifier string `json:"identifier"` + Email string `json:"email" validate:"required"` + Password string `json:"password" validate:"required"` + Invite string `json:"invite,optional"` + Code string `json:"code,optional"` + IP string `header:"X-Original-Forwarded-For"` + UserAgent string `header:"User-Agent"` + LoginType string `header:"Login-Type"` + CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } - // User login response + // User reset password request ResetPasswordRequest { - Identifier string `json:"identifier"` - Email string `json:"email" validate:"required"` - Password string `json:"password" validate:"required"` - Code string `json:"code,optional"` - IP string `header:"X-Original-Forwarded-For"` - UserAgent string `header:"User-Agent"` - LoginType string `header:"Login-Type"` - CfToken string `json:"cf_token,optional"` + Identifier string `json:"identifier"` + Email string `json:"email" validate:"required"` + Password string `json:"password" validate:"required"` + Code string `json:"code,optional"` + IP string `header:"X-Original-Forwarded-For"` + UserAgent string `header:"User-Agent"` + LoginType string `header:"Login-Type"` + CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } // Email login request EmailLoginRequest { - Identifier string `json:"identifier"` - Email string `json:"email" validate:"required"` - Code string `json:"code" validate:"required"` - Invite string `json:"invite,optional"` + Identifier string `json:"identifier" form:"identifier"` + Email string `json:"email" form:"email" validate:"required,email"` + Code string `json:"code" form:"code" validate:"required"` + Invite string `json:"invite,optional" form:"invite"` IP string `header:"X-Original-Forwarded-For"` UserAgent string `header:"User-Agent"` LoginType string `header:"Login-Type"` - CfToken string `json:"cf_token,optional"` + CfToken string `json:"cf_token,optional" form:"cf_token"` } LoginResponse { Token string `json:"token"` @@ -86,6 +95,9 @@ type ( UserAgent string `header:"User-Agent"` LoginType string `header:"Login-Type"` CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } // Check user is exist request TelephoneCheckUserRequest { @@ -108,6 +120,9 @@ type ( UserAgent string `header:"User-Agent"` LoginType string `header:"Login-Type,optional"` CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } // User login response TelephoneResetPasswordRequest { @@ -120,6 +135,9 @@ type ( UserAgent string `header:"User-Agent"` LoginType string `header:"Login-Type,optional"` CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } AppleLoginCallbackRequest { Code string `form:"code"` @@ -137,6 +155,21 @@ type ( CfToken string `json:"cf_token,optional"` ShortCode string `json:"short_code,optional"` } + GenerateCaptchaResponse { + Id string `json:"id"` + Image string `json:"image"` + Type string `json:"type"` + BlockImage string `json:"block_image,omitempty"` + } + SliderVerifyCaptchaRequest { + Id string `json:"id" validate:"required"` + X int `json:"x" validate:"required"` + Y int `json:"y" validate:"required"` + Trail string `json:"trail"` + } + SliderVerifyCaptchaResponse { + Token string `json:"token"` + } ) @server ( @@ -174,16 +207,47 @@ service ppanel { get /check/telephone (TelephoneCheckUserRequest) returns (TelephoneCheckUserResponse) @doc "User Telephone register" - @handler TelephoneUserRegister + @handler TelephoneRegister post /register/telephone (TelephoneRegisterRequest) returns (LoginResponse) - @doc "Reset password" + @doc "Reset password by telephone" @handler TelephoneResetPassword post /reset/telephone (TelephoneResetPasswordRequest) returns (LoginResponse) @doc "Device Login" @handler DeviceLogin post /login/device (DeviceLoginRequest) returns (LoginResponse) + + @doc "Generate captcha" + @handler GenerateCaptcha + post /captcha/generate returns (GenerateCaptchaResponse) + + @doc "Verify slider captcha" + @handler SliderVerifyCaptcha + post /captcha/slider/verify (SliderVerifyCaptchaRequest) returns (SliderVerifyCaptchaResponse) +} + +@server ( + prefix: v1/auth/admin + group: auth/admin + middleware: DeviceMiddleware +) +service ppanel { + @doc "Admin login" + @handler AdminLogin + post /login (UserLoginRequest) returns (LoginResponse) + + @doc "Admin reset password" + @handler AdminResetPassword + post /reset (ResetPasswordRequest) returns (LoginResponse) + + @doc "Generate captcha" + @handler AdminGenerateCaptcha + post /captcha/generate returns (GenerateCaptchaResponse) + + @doc "Verify slider captcha" + @handler AdminSliderVerifyCaptcha + post /captcha/slider/verify (SliderVerifyCaptchaRequest) returns (SliderVerifyCaptchaResponse) } @server ( @@ -203,4 +267,3 @@ service ppanel { @handler AppleLoginCallback post /callback/apple (AppleLoginCallbackRequest) } - diff --git a/apis/common.api b/apis/common.api index d264b43..3702523 100644 --- a/apis/common.api +++ b/apis/common.api @@ -12,10 +12,12 @@ import "./types.api" type ( VeifyConfig { - TurnstileSiteKey string `json:"turnstile_site_key"` - EnableLoginVerify bool `json:"enable_login_verify"` - EnableRegisterVerify bool `json:"enable_register_verify"` - EnableResetPasswordVerify bool `json:"enable_reset_password_verify"` + CaptchaType string `json:"captcha_type"` + TurnstileSiteKey string `json:"turnstile_site_key"` + EnableUserLoginCaptcha bool `json:"enable_user_login_captcha"` + EnableUserRegisterCaptcha bool `json:"enable_user_register_captcha"` + EnableAdminLoginCaptcha bool `json:"enable_admin_login_captcha"` + EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"` } GetGlobalConfigResponse { Site SiteConfig `json:"site"` @@ -94,6 +96,48 @@ type ( Message string `json:"message,omitempty"` Timestamp int64 `json:"timestamp,omitempty"` } + GetDownloadLinkRequest { + InviteCode string `form:"invite_code,optional"` + Platform string `form:"platform" validate:"required,oneof=windows mac ios android"` + } + GetDownloadLinkResponse { + Url string `json:"url"` + } + ContactRequest { + Name string `json:"name" validate:"required,max=100"` + Email string `json:"email" validate:"required,email"` + OtherContact string `json:"other_contact" validate:"max=200"` + Notes string `json:"notes" validate:"max=2000"` + } + ReportLogMessageRequest { + Platform string `json:"platform" validate:"required,max=32"` + AppVersion string `json:"app_version" validate:"required,max=64"` + OsName string `json:"os_name" validate:"max=64"` + OsVersion string `json:"os_version" validate:"max=64"` + DeviceId string `json:"device_id" validate:"required,max=255"` + UserId int64 `json:"user_id"` + SessionId string `json:"session_id" validate:"max=255"` + Level uint8 `json:"level"` + ErrorCode string `json:"error_code" validate:"max=128"` + Message string `json:"message" validate:"required,max=65535"` + Stack string `json:"stack" validate:"max=1048576"` + Context interface{} `json:"context"` + OccurredAt int64 `json:"occurred_at"` + } + ReportLogMessageResponse { + Id int64 `json:"id"` + } + LegacyCheckVerificationCodeRequest { + Method string `json:"method" form:"method" validate:"omitempty,oneof=email mobile"` + Account string `json:"account" form:"account"` + Email string `json:"email" form:"email"` + Code string `json:"code" form:"code" validate:"required"` + Type uint8 `json:"type" form:"type" validate:"required"` + } + LegacyCheckVerificationCodeResponse { + Status bool `json:"status"` + Exist bool `json:"exist"` + } ) @server ( @@ -141,5 +185,25 @@ service ppanel { @doc "Heartbeat" @handler Heartbeat get /heartbeat returns (HeartbeatResponse) + + @doc "Get Download Link" + @handler GetDownloadLink + get /client/download (GetDownloadLinkRequest) returns (GetDownloadLinkResponse) + + @doc "Submit Contact" + @handler SubmitContact + post /contact (ContactRequest) + + @doc "Report log message" + @handler ReportLogMessage + post /log/report (ReportLogMessageRequest) returns (ReportLogMessageResponse) + + @doc "Check verification code (legacy v1)" + @handler CheckCodeLegacy + post /check_code (LegacyCheckVerificationCodeRequest) returns (LegacyCheckVerificationCodeResponse) + + @doc "Check verification code (legacy v2, consume code)" + @handler CheckCodeLegacyV2 + post /check_code/v2 (LegacyCheckVerificationCodeRequest) returns (LegacyCheckVerificationCodeResponse) } diff --git a/apis/public/subscribe.api b/apis/public/subscribe.api index 445379b..9abba4f 100644 --- a/apis/public/subscribe.api +++ b/apis/public/subscribe.api @@ -71,5 +71,9 @@ service ppanel { @doc "Get user subscribe node info" @handler QueryUserSubscribeNodeList get /node/list returns (QueryUserSubscribeNodeListResponse) + + @doc "Get subscribe group list" + @handler QuerySubscribeGroupList + get /group/list returns (QuerySubscribeGroupListResponse) } diff --git a/apis/public/user.api b/apis/public/user.api index 498bbcb..e55919f 100644 --- a/apis/public/user.api +++ b/apis/public/user.api @@ -220,6 +220,22 @@ type ( FriendlyCount int64 `json:"friendly_count"` HistoryCount int64 `json:"history_count"` } + GetUserTrafficStatsRequest { + UserSubscribeId string `form:"user_subscribe_id" validate:"required"` + Days int `form:"days" validate:"required,oneof=7 30"` + } + DailyTrafficStats { + Date string `json:"date"` + Upload int64 `json:"upload"` + Download int64 `json:"download"` + Total int64 `json:"total"` + } + GetUserTrafficStatsResponse { + List []DailyTrafficStats `json:"list"` + TotalUpload int64 `json:"total_upload"` + TotalDownload int64 `json:"total_download"` + TotalTraffic int64 `json:"total_traffic"` + } ) @server ( @@ -374,11 +390,15 @@ service ppanel { @doc "Get Subscribe Status" @handler GetSubscribeStatus - get /subscribe_status (GetSubscribeStatusRequest) returns (GetSubscribeStatusResponse) + post /subscribe_status (GetSubscribeStatusRequest) returns (GetSubscribeStatusResponse) @doc "Get User Invite Stats" @handler GetUserInviteStats get /invite_stats (GetUserInviteStatsRequest) returns (GetUserInviteStatsResponse) + + @doc "Get User Traffic Statistics" + @handler GetUserTrafficStats + get /traffic_stats (GetUserTrafficStatsRequest) returns (GetUserTrafficStatsResponse) } @server ( diff --git a/apis/types.api b/apis/types.api index 53e4b3f..638415a 100644 --- a/apis/types.api +++ b/apis/types.api @@ -170,11 +170,13 @@ type ( DeviceLimit int64 `json:"device_limit"` } VerifyConfig { - TurnstileSiteKey string `json:"turnstile_site_key"` - TurnstileSecret string `json:"turnstile_secret"` - EnableLoginVerify bool `json:"enable_login_verify"` - EnableRegisterVerify bool `json:"enable_register_verify"` - EnableResetPasswordVerify bool `json:"enable_reset_password_verify"` + CaptchaType string `json:"captcha_type"` // local or turnstile + TurnstileSiteKey string `json:"turnstile_site_key"` + TurnstileSecret string `json:"turnstile_secret"` + EnableUserLoginCaptcha bool `json:"enable_user_login_captcha"` // User login captcha + EnableUserRegisterCaptcha bool `json:"enable_user_register_captcha"` // User register captcha + EnableAdminLoginCaptcha bool `json:"enable_admin_login_captcha"` // Admin login captcha + EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"` // User reset password captcha } NodeConfig { NodeSecret string `json:"node_secret"` @@ -226,6 +228,12 @@ type ( Quantity int64 `json:"quantity"` Discount float64 `json:"discount"` } + TrafficLimit { + StatType string `json:"stat_type"` + StatValue int64 `json:"stat_value"` + TrafficUsage int64 `json:"traffic_usage"` + SpeedLimit int64 `json:"speed_limit"` + } Subscribe { Id int64 `json:"id"` Name string `json:"name"` @@ -243,6 +251,9 @@ type ( Quota int64 `json:"quota"` Nodes []int64 `json:"nodes"` NodeTags []string `json:"node_tags"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` + NodeGroupId int64 `json:"node_group_id"` + TrafficLimit []TrafficLimit `json:"traffic_limit"` Show bool `json:"show"` Sell bool `json:"sell"` Sort int64 `json:"sort"` @@ -556,6 +567,7 @@ type ( FamilyId int64 `json:"family_id"` OwnerUserId int64 `json:"owner_user_id"` OwnerIdentifier string `json:"owner_identifier"` + OwnerAuthType string `json:"owner_auth_type"` Status string `json:"status"` ActiveMemberCount int64 `json:"active_member_count"` MaxMembers int64 `json:"max_members"` @@ -565,7 +577,9 @@ type ( FamilyMemberItem { UserId int64 `json:"user_id"` Identifier string `json:"identifier"` + AuthType string `json:"auth_type"` DeviceNo string `json:"device_no"` + DeviceType string `json:"device_type"` Role uint8 `json:"role"` RoleName string `json:"role_name"` Status uint8 `json:"status"` @@ -951,5 +965,42 @@ type ( ResetUserSubscribeTokenRequest { UserSubscribeId int64 `json:"user_subscribe_id"` } + // ===== 分组功能类型定义 ===== + // NodeGroup 节点组 + NodeGroup { + Id int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Sort int `json:"sort"` + ForCalculation bool `json:"for_calculation"` + IsExpiredGroup bool `json:"is_expired_group"` + ExpiredDaysLimit int `json:"expired_days_limit"` + MaxTrafficGBExpired int64 `json:"max_traffic_gb_expired,omitempty"` + SpeedLimit int `json:"speed_limit"` + MinTrafficGB int64 `json:"min_traffic_gb,omitempty"` + MaxTrafficGB int64 `json:"max_traffic_gb,omitempty"` + NodeCount int64 `json:"node_count,omitempty"` + CreatedAt int64 `json:"created_at"` + UpdatedAt int64 `json:"updated_at"` + } + // GroupHistory 分组历史记录 + GroupHistory { + Id int64 `json:"id"` + GroupMode string `json:"group_mode"` + TriggerType string `json:"trigger_type"` + TotalUsers int `json:"total_users"` + SuccessCount int `json:"success_count"` + FailedCount int `json:"failed_count"` + StartTime *int64 `json:"start_time,omitempty"` + EndTime *int64 `json:"end_time,omitempty"` + Operator string `json:"operator,omitempty"` + ErrorLog string `json:"error_log,omitempty"` + CreatedAt int64 `json:"created_at"` + } + // GroupHistoryDetail 分组历史详情 + GroupHistoryDetail { + GroupHistory + ConfigSnapshot map[string]interface{} `json:"config_snapshot,omitempty"` + } ) diff --git a/etc/ppanel.yaml b/etc/ppanel.yaml index 0ecc9bd..d19d1f7 100644 --- a/etc/ppanel.yaml +++ b/etc/ppanel.yaml @@ -15,10 +15,10 @@ Logger: # 日志配置 Level: debug # 日志级别: debug, info, warn, error, panic, fatal MySQL: - Addr: 103.150.215.44:3306 # host 网络模式; bridge 模式改为 mysql:3306 + Addr: 154.12.35.103:3306 # host 网络模式; bridge 模式改为 mysql:3306 Username: root # MySQL用户名 Password: jpcV41ppanel # MySQL密码,与 .env MYSQL_ROOT_PASSWORD 一致 - Dbname: hifast # MySQL数据库名 + Dbname: ppanel # MySQL数据库名 Config: charset=utf8mb4&parseTime=true&loc=Asia%2FShanghai MaxIdleConns: 10 MaxOpenConns: 100 diff --git a/go.mod b/go.mod index 88d8ca4..222d463 100644 --- a/go.mod +++ b/go.mod @@ -94,6 +94,7 @@ require ( github.com/gin-contrib/sse v1.0.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golang/glog v1.2.0 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -118,6 +119,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mojocn/base64Captcha v1.3.8 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect github.com/oschwald/maxminddb-golang v1.13.0 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect @@ -140,6 +142,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.13.0 // indirect golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d // indirect + golang.org/x/image v0.23.0 // indirect golang.org/x/net v0.34.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/text v0.22.0 // indirect diff --git a/go.sum b/go.sum index bec641c..ad1c08e 100644 --- a/go.sum +++ b/go.sum @@ -159,6 +159,8 @@ github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeD github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8= github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= @@ -274,6 +276,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mojocn/base64Captcha v1.3.8 h1:rrN9BhCwXKS8ht1e21kvR3iTaMgf4qPC9sRoV52bqEg= +github.com/mojocn/base64Captcha v1.3.8/go.mod h1:QFZy927L8HVP3+VV5z2b1EAEiv1KxVJKZbAucVgLUy4= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -405,12 +409,17 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d h1:N0hmiNbwsSNwHBAvR3QB5w25pUwH4tK0Y/RltD1j1h4= golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68= +golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -419,6 +428,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -434,7 +446,10 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -448,6 +463,10 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -466,14 +485,21 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -481,7 +507,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -499,6 +528,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/initialize/migrate/database/02142_add_groups.down.sql b/initialize/migrate/database/02142_add_groups.down.sql new file mode 100644 index 0000000..6765acc --- /dev/null +++ b/initialize/migrate/database/02142_add_groups.down.sql @@ -0,0 +1,28 @@ +-- Purpose: Rollback node group management tables +-- Author: Tension +-- Date: 2025-02-23 +-- Updated: 2025-03-06 + +-- ===== Remove system configuration entries ===== +DELETE FROM `system` WHERE `category` = 'group' AND `key` IN ('enabled', 'mode', 'auto_create_group'); + +-- ===== Remove columns and indexes from subscribe table ===== +ALTER TABLE `subscribe` DROP INDEX IF EXISTS `idx_node_group_id`; +ALTER TABLE `subscribe` DROP COLUMN IF EXISTS `node_group_id`; +ALTER TABLE `subscribe` DROP COLUMN IF EXISTS `node_group_ids`; + +-- ===== Remove columns and indexes from user_subscribe table ===== +ALTER TABLE `user_subscribe` DROP INDEX IF EXISTS `idx_node_group_id`; +ALTER TABLE `user_subscribe` DROP COLUMN IF EXISTS `node_group_id`; + +-- ===== Remove columns and indexes from nodes table ===== +ALTER TABLE `nodes` DROP COLUMN IF EXISTS `node_group_ids`; + +-- ===== Drop group_history_detail table ===== +DROP TABLE IF EXISTS `group_history_detail`; + +-- ===== Drop group_history table ===== +DROP TABLE IF EXISTS `group_history`; + +-- ===== Drop node_group table ===== +DROP TABLE IF EXISTS `node_group`; diff --git a/initialize/migrate/database/02142_add_groups.up.sql b/initialize/migrate/database/02142_add_groups.up.sql new file mode 100644 index 0000000..a4150ce --- /dev/null +++ b/initialize/migrate/database/02142_add_groups.up.sql @@ -0,0 +1,130 @@ +-- Purpose: Add node group management tables with multi-group support +-- Author: Tension +-- Date: 2025-02-23 +-- Updated: 2025-03-06 + +-- ===== Create node_group table ===== +DROP TABLE IF EXISTS `node_group`; +CREATE TABLE IF NOT EXISTS `node_group` ( + `id` bigint NOT NULL AUTO_INCREMENT, + `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT 'Name', + `description` varchar(500) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT 'Group Description', + `sort` int NOT NULL DEFAULT '0' COMMENT 'Sort Order', + `for_calculation` tinyint(1) NOT NULL DEFAULT 1 COMMENT 'For Grouping Calculation: 0=false, 1=true', + `min_traffic_gb` bigint DEFAULT 0 COMMENT 'Minimum Traffic (GB) for this node group', + `max_traffic_gb` bigint DEFAULT 0 COMMENT 'Maximum Traffic (GB) for this node group', + `created_at` datetime(3) DEFAULT NULL COMMENT 'Create Time', + `updated_at` datetime(3) DEFAULT NULL COMMENT 'Update Time', + PRIMARY KEY (`id`), + KEY `idx_sort` (`sort`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci COMMENT='Node Groups'; + +-- ===== Create group_history table ===== +DROP TABLE IF EXISTS `group_history`; +CREATE TABLE IF NOT EXISTS `group_history` ( + `id` bigint NOT NULL AUTO_INCREMENT, + `group_mode` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT 'Group Mode: average/subscribe/traffic', + `trigger_type` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT 'Trigger Type: manual/auto/schedule', + `state` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT 'State: pending/running/completed/failed', + `total_users` int NOT NULL DEFAULT '0' COMMENT 'Total Users', + `success_count` int NOT NULL DEFAULT '0' COMMENT 'Success Count', + `failed_count` int NOT NULL DEFAULT '0' COMMENT 'Failed Count', + `start_time` datetime(3) DEFAULT NULL COMMENT 'Start Time', + `end_time` datetime(3) DEFAULT NULL COMMENT 'End Time', + `operator` varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT 'Operator', + `error_message` text CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci COMMENT 'Error Message', + `created_at` datetime(3) DEFAULT NULL COMMENT 'Create Time', + PRIMARY KEY (`id`), + KEY `idx_group_mode` (`group_mode`), + KEY `idx_trigger_type` (`trigger_type`), + KEY `idx_state` (`state`), + KEY `idx_created_at` (`created_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci COMMENT='Group Calculation History'; + +-- ===== Create group_history_detail table ===== +-- Note: user_group_id column removed, using user_data JSON field instead +DROP TABLE IF EXISTS `group_history_detail`; +CREATE TABLE IF NOT EXISTS `group_history_detail` ( + `id` bigint NOT NULL AUTO_INCREMENT, + `history_id` bigint NOT NULL COMMENT 'History ID', + `node_group_id` bigint NOT NULL COMMENT 'Node Group ID', + `user_count` int NOT NULL DEFAULT '0' COMMENT 'User Count', + `node_count` int NOT NULL DEFAULT '0' COMMENT 'Node Count', + `user_data` TEXT COMMENT 'User data JSON (id and email/phone)', + `created_at` datetime(3) DEFAULT NULL COMMENT 'Create Time', + PRIMARY KEY (`id`), + KEY `idx_history_id` (`history_id`), + KEY `idx_node_group_id` (`node_group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci COMMENT='Group History Details'; + +-- ===== Add columns to nodes table ===== +SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'nodes' AND COLUMN_NAME = 'node_group_ids'); +SET @sql = IF(@column_exists = 0, + 'ALTER TABLE `nodes` ADD COLUMN `node_group_ids` JSON COMMENT ''Node Group IDs (JSON array, multiple groups)''', + 'SELECT ''Column node_group_ids already exists'''); +PREPARE stmt FROM @sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +-- ===== Add node_group_id column to user_subscribe table ===== +SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'user_subscribe' AND COLUMN_NAME = 'node_group_id'); +SET @sql = IF(@column_exists = 0, + 'ALTER TABLE `user_subscribe` ADD COLUMN `node_group_id` bigint NOT NULL DEFAULT 0 COMMENT ''Node Group ID (single ID)''', + 'SELECT ''Column node_group_id already exists'''); +PREPARE stmt FROM @sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +-- ===== Add index for user_subscribe.node_group_id ===== +SET @index_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'user_subscribe' AND INDEX_NAME = 'idx_node_group_id'); +SET @sql = IF(@index_exists = 0, + 'ALTER TABLE `user_subscribe` ADD INDEX `idx_node_group_id` (`node_group_id`)', + 'SELECT ''Index idx_node_group_id already exists'''); +PREPARE stmt FROM @sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +-- ===== Add group_locked column to user_subscribe table ===== +SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'user_subscribe' AND COLUMN_NAME = 'group_locked'); +SET @sql = IF(@column_exists = 0, + 'ALTER TABLE `user_subscribe` ADD COLUMN `group_locked` tinyint(1) NOT NULL DEFAULT 0 COMMENT ''Group Locked''', + 'SELECT ''Column group_locked already exists in user_subscribe table'''); +PREPARE stmt FROM @sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +-- ===== Add columns to subscribe table ===== +SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'subscribe' AND COLUMN_NAME = 'node_group_ids'); +SET @sql = IF(@column_exists = 0, + 'ALTER TABLE `subscribe` ADD COLUMN `node_group_ids` JSON COMMENT ''Node Group IDs (JSON array, multiple groups)''', + 'SELECT ''Column node_group_ids already exists'''); +PREPARE stmt FROM @sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +-- ===== Add default node_group_id column to subscribe table ===== +SET @column_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'subscribe' AND COLUMN_NAME = 'node_group_id'); +SET @sql = IF(@column_exists = 0, + 'ALTER TABLE `subscribe` ADD COLUMN `node_group_id` bigint NOT NULL DEFAULT 0 COMMENT ''Default Node Group ID (single ID)''', + 'SELECT ''Column node_group_id already exists in subscribe table'''); +PREPARE stmt FROM @sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +-- ===== Add index for subscribe.node_group_id ===== +SET @index_exists = (SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'subscribe' AND INDEX_NAME = 'idx_node_group_id'); +SET @sql = IF(@index_exists = 0, + 'ALTER TABLE `subscribe` ADD INDEX `idx_node_group_id` (`node_group_id`)', + 'SELECT ''Index idx_node_group_id already exists in subscribe table'''); +PREPARE stmt FROM @sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +-- ===== Insert system configuration entries ===== +INSERT INTO `system` (`category`, `key`, `value`, `desc`) VALUES + ('group', 'enabled', 'false', 'Group Management Enabled'), + ('group', 'mode', 'average', 'Group Mode: average/subscribe/traffic'), + ('group', 'auto_create_group', 'false', 'Auto-create user group when creating subscribe product') +ON DUPLICATE KEY UPDATE + `value` = VALUES(`value`), + `desc` = VALUES(`desc`); diff --git a/initialize/migrate/database/02143_update_verify_config.down.sql b/initialize/migrate/database/02143_update_verify_config.down.sql new file mode 100644 index 0000000..2c66df3 --- /dev/null +++ b/initialize/migrate/database/02143_update_verify_config.down.sql @@ -0,0 +1,17 @@ +-- Rollback: restore old verify configuration fields +INSERT INTO `system` (`category`, `key`, `value`, `type`, `desc`) VALUES + ('verify', 'EnableLoginVerify', 'false', 'bool', 'is enable login verify'), + ('verify', 'EnableRegisterVerify', 'false', 'bool', 'is enable register verify'), + ('verify', 'EnableResetPasswordVerify', 'false', 'bool', 'is enable reset password verify') +ON DUPLICATE KEY UPDATE + `value` = VALUES(`value`), + `desc` = VALUES(`desc`); + +-- Remove new captcha configuration fields +DELETE FROM `system` WHERE `category` = 'verify' AND `key` IN ( + 'CaptchaType', + 'EnableUserLoginCaptcha', + 'EnableUserRegisterCaptcha', + 'EnableAdminLoginCaptcha', + 'EnableUserResetPasswordCaptcha' +); diff --git a/initialize/migrate/database/02143_update_verify_config.up.sql b/initialize/migrate/database/02143_update_verify_config.up.sql new file mode 100644 index 0000000..b6d5137 --- /dev/null +++ b/initialize/migrate/database/02143_update_verify_config.up.sql @@ -0,0 +1,17 @@ +-- Add new captcha configuration fields +INSERT INTO `system` (`category`, `key`, `value`, `type`, `desc`) VALUES + ('verify', 'CaptchaType', 'local', 'string', 'Captcha type: local or turnstile'), + ('verify', 'EnableUserLoginCaptcha', 'false', 'bool', 'Enable captcha for user login'), + ('verify', 'EnableUserRegisterCaptcha', 'false', 'bool', 'Enable captcha for user registration'), + ('verify', 'EnableAdminLoginCaptcha', 'false', 'bool', 'Enable captcha for admin login'), + ('verify', 'EnableUserResetPasswordCaptcha', 'false', 'bool', 'Enable captcha for user reset password') +ON DUPLICATE KEY UPDATE + `value` = VALUES(`value`), + `desc` = VALUES(`desc`); + +-- Remove old verify configuration fields +DELETE FROM `system` WHERE `category` = 'verify' AND `key` IN ( + 'EnableLoginVerify', + 'EnableRegisterVerify', + 'EnableResetPasswordVerify' +); diff --git a/initialize/migrate/database/02144_add_expired_node_group.down.sql b/initialize/migrate/database/02144_add_expired_node_group.down.sql new file mode 100644 index 0000000..aeacd0e --- /dev/null +++ b/initialize/migrate/database/02144_add_expired_node_group.down.sql @@ -0,0 +1,12 @@ +-- 回滚 user_subscribe 表的过期流量字段 +ALTER TABLE `user_subscribe` +DROP COLUMN `expired_upload`, +DROP COLUMN `expired_download`; + +-- 回滚 node_group 表的过期节点组字段 +ALTER TABLE `node_group` +DROP INDEX `idx_is_expired_group`, +DROP COLUMN `speed_limit`, +DROP COLUMN `max_traffic_gb_expired`, +DROP COLUMN `expired_days_limit`, +DROP COLUMN `is_expired_group`; diff --git a/initialize/migrate/database/02144_add_expired_node_group.up.sql b/initialize/migrate/database/02144_add_expired_node_group.up.sql new file mode 100644 index 0000000..283f9a5 --- /dev/null +++ b/initialize/migrate/database/02144_add_expired_node_group.up.sql @@ -0,0 +1,14 @@ +-- 为 node_group 表添加过期节点组相关字段 +ALTER TABLE `node_group` +ADD COLUMN `is_expired_group` tinyint(1) NOT NULL DEFAULT 0 COMMENT 'Is Expired Group: 0=normal, 1=expired group' AFTER `for_calculation`, +ADD COLUMN `expired_days_limit` int NOT NULL DEFAULT 7 COMMENT 'Expired days limit (days)' AFTER `is_expired_group`, +ADD COLUMN `max_traffic_gb_expired` bigint DEFAULT 0 COMMENT 'Max traffic for expired users (GB)' AFTER `expired_days_limit`, +ADD COLUMN `speed_limit` int NOT NULL DEFAULT 0 COMMENT 'Speed limit (KB/s)' AFTER `max_traffic_gb_expired`; + +-- 添加索引 +ALTER TABLE `node_group` ADD INDEX `idx_is_expired_group` (`is_expired_group`); + +-- 为 user_subscribe 表添加过期流量统计字段 +ALTER TABLE `user_subscribe` +ADD COLUMN `expired_download` bigint NOT NULL DEFAULT 0 COMMENT 'Expired period download traffic (bytes)' AFTER `upload`, +ADD COLUMN `expired_upload` bigint NOT NULL DEFAULT 0 COMMENT 'Expired period upload traffic (bytes)' AFTER `expired_download`; diff --git a/initialize/migrate/database/02145_subscribe_traffic_limit.down.sql b/initialize/migrate/database/02145_subscribe_traffic_limit.down.sql new file mode 100644 index 0000000..4fdf319 --- /dev/null +++ b/initialize/migrate/database/02145_subscribe_traffic_limit.down.sql @@ -0,0 +1,6 @@ +-- Purpose: Rollback traffic_limit rules from subscribe +-- Author: Claude Code +-- Date: 2026-03-12 + +-- ===== Remove traffic_limit column from subscribe table ===== +ALTER TABLE `subscribe` DROP COLUMN IF EXISTS `traffic_limit`; diff --git a/initialize/migrate/database/02145_subscribe_traffic_limit.up.sql b/initialize/migrate/database/02145_subscribe_traffic_limit.up.sql new file mode 100644 index 0000000..18a9f30 --- /dev/null +++ b/initialize/migrate/database/02145_subscribe_traffic_limit.up.sql @@ -0,0 +1,22 @@ +-- Purpose: Add traffic_limit rules to subscribe +-- Author: Claude Code +-- Date: 2026-03-12 + +-- ===== Add traffic_limit column to subscribe table ===== +SET @column_exists = ( + SELECT COUNT(*) + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = DATABASE() + AND TABLE_NAME = 'subscribe' + AND COLUMN_NAME = 'traffic_limit' +); + +SET @sql = IF( + @column_exists = 0, + 'ALTER TABLE `subscribe` ADD COLUMN `traffic_limit` TEXT NULL COMMENT ''Traffic Limit Rules (JSON)'' AFTER `node_group_id`', + 'SELECT ''Column traffic_limit already exists in subscribe table''' +); + +PREPARE stmt FROM @sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; diff --git a/internal/handler/admin/group/createNodeGroupHandler.go b/internal/handler/admin/group/createNodeGroupHandler.go new file mode 100644 index 0000000..eaba8cf --- /dev/null +++ b/internal/handler/admin/group/createNodeGroupHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Create node group +func CreateNodeGroupHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.CreateNodeGroupRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewCreateNodeGroupLogic(c.Request.Context(), svcCtx) + err := l.CreateNodeGroup(&req) + result.HttpResult(c, nil, err) + } +} diff --git a/internal/handler/admin/group/deleteNodeGroupHandler.go b/internal/handler/admin/group/deleteNodeGroupHandler.go new file mode 100644 index 0000000..b93120d --- /dev/null +++ b/internal/handler/admin/group/deleteNodeGroupHandler.go @@ -0,0 +1,29 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Delete node group +func DeleteNodeGroupHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.DeleteNodeGroupRequest + if err := c.ShouldBind(&req); err != nil { + result.ParamErrorResult(c, err) + return + } + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewDeleteNodeGroupLogic(c.Request.Context(), svcCtx) + err := l.DeleteNodeGroup(&req) + result.HttpResult(c, nil, err) + } +} diff --git a/internal/handler/admin/group/exportGroupResultHandler.go b/internal/handler/admin/group/exportGroupResultHandler.go new file mode 100644 index 0000000..69b065f --- /dev/null +++ b/internal/handler/admin/group/exportGroupResultHandler.go @@ -0,0 +1,36 @@ +package group + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Export group result +func ExportGroupResultHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.ExportGroupResultRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewExportGroupResultLogic(c.Request.Context(), svcCtx) + data, filename, err := l.ExportGroupResult(&req) + if err != nil { + result.HttpResult(c, nil, err) + return + } + + // 设置响应头 + c.Header("Content-Type", "text/csv") + c.Header("Content-Disposition", "attachment; filename="+filename) + c.Data(http.StatusOK, "text/csv", data) + } +} diff --git a/internal/handler/admin/group/getGroupConfigHandler.go b/internal/handler/admin/group/getGroupConfigHandler.go new file mode 100644 index 0000000..ef24311 --- /dev/null +++ b/internal/handler/admin/group/getGroupConfigHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Get group config +func GetGroupConfigHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.GetGroupConfigRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewGetGroupConfigLogic(c.Request.Context(), svcCtx) + resp, err := l.GetGroupConfig(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/admin/group/getGroupHistoryDetailHandler.go b/internal/handler/admin/group/getGroupHistoryDetailHandler.go new file mode 100644 index 0000000..fa58f3c --- /dev/null +++ b/internal/handler/admin/group/getGroupHistoryDetailHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Get group history detail +func GetGroupHistoryDetailHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.GetGroupHistoryDetailRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewGetGroupHistoryDetailLogic(c.Request.Context(), svcCtx) + resp, err := l.GetGroupHistoryDetail(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/admin/group/getGroupHistoryHandler.go b/internal/handler/admin/group/getGroupHistoryHandler.go new file mode 100644 index 0000000..b6b5490 --- /dev/null +++ b/internal/handler/admin/group/getGroupHistoryHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Get group history +func GetGroupHistoryHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.GetGroupHistoryRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewGetGroupHistoryLogic(c.Request.Context(), svcCtx) + resp, err := l.GetGroupHistory(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/admin/group/getNodeGroupListHandler.go b/internal/handler/admin/group/getNodeGroupListHandler.go new file mode 100644 index 0000000..501138f --- /dev/null +++ b/internal/handler/admin/group/getNodeGroupListHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Get node group list +func GetNodeGroupListHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.GetNodeGroupListRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewGetNodeGroupListLogic(c.Request.Context(), svcCtx) + resp, err := l.GetNodeGroupList(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/admin/group/getRecalculationStatusHandler.go b/internal/handler/admin/group/getRecalculationStatusHandler.go new file mode 100644 index 0000000..e9b76b8 --- /dev/null +++ b/internal/handler/admin/group/getRecalculationStatusHandler.go @@ -0,0 +1,18 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/pkg/result" +) + +// Get recalculation status +func GetRecalculationStatusHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + + l := group.NewGetRecalculationStatusLogic(c.Request.Context(), svcCtx) + resp, err := l.GetRecalculationStatus() + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/admin/group/getSubscribeGroupMappingHandler.go b/internal/handler/admin/group/getSubscribeGroupMappingHandler.go new file mode 100644 index 0000000..4da798c --- /dev/null +++ b/internal/handler/admin/group/getSubscribeGroupMappingHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Get subscribe group mapping +func GetSubscribeGroupMappingHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.GetSubscribeGroupMappingRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewGetSubscribeGroupMappingLogic(c.Request.Context(), svcCtx) + resp, err := l.GetSubscribeGroupMapping(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/admin/group/previewUserNodesHandler.go b/internal/handler/admin/group/previewUserNodesHandler.go new file mode 100644 index 0000000..da3560d --- /dev/null +++ b/internal/handler/admin/group/previewUserNodesHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Preview user nodes +func PreviewUserNodesHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.PreviewUserNodesRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewPreviewUserNodesLogic(c.Request.Context(), svcCtx) + resp, err := l.PreviewUserNodes(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/admin/group/recalculateGroupHandler.go b/internal/handler/admin/group/recalculateGroupHandler.go new file mode 100644 index 0000000..848363d --- /dev/null +++ b/internal/handler/admin/group/recalculateGroupHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Recalculate group +func RecalculateGroupHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.RecalculateGroupRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewRecalculateGroupLogic(c.Request.Context(), svcCtx) + err := l.RecalculateGroup(&req) + result.HttpResult(c, nil, err) + } +} diff --git a/internal/handler/admin/group/resetGroupsHandler.go b/internal/handler/admin/group/resetGroupsHandler.go new file mode 100644 index 0000000..e0af912 --- /dev/null +++ b/internal/handler/admin/group/resetGroupsHandler.go @@ -0,0 +1,17 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/pkg/result" +) + +// Reset all groups +func ResetGroupsHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + l := group.NewResetGroupsLogic(c.Request.Context(), svcCtx) + err := l.ResetGroups() + result.HttpResult(c, nil, err) + } +} diff --git a/internal/handler/admin/group/updateGroupConfigHandler.go b/internal/handler/admin/group/updateGroupConfigHandler.go new file mode 100644 index 0000000..6f2ea1c --- /dev/null +++ b/internal/handler/admin/group/updateGroupConfigHandler.go @@ -0,0 +1,26 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Update group config +func UpdateGroupConfigHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.UpdateGroupConfigRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewUpdateGroupConfigLogic(c.Request.Context(), svcCtx) + err := l.UpdateGroupConfig(&req) + result.HttpResult(c, nil, err) + } +} diff --git a/internal/handler/admin/group/updateNodeGroupHandler.go b/internal/handler/admin/group/updateNodeGroupHandler.go new file mode 100644 index 0000000..e9f9058 --- /dev/null +++ b/internal/handler/admin/group/updateNodeGroupHandler.go @@ -0,0 +1,33 @@ +package group + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Update node group +func UpdateNodeGroupHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.UpdateNodeGroupRequest + if err := c.ShouldBindUri(&req); err != nil { + result.ParamErrorResult(c, err) + return + } + if err := c.ShouldBind(&req); err != nil { + result.ParamErrorResult(c, err) + return + } + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := group.NewUpdateNodeGroupLogic(c.Request.Context(), svcCtx) + err := l.UpdateNodeGroup(&req) + result.HttpResult(c, nil, err) + } +} diff --git a/internal/handler/auth/admin/adminGenerateCaptchaHandler.go b/internal/handler/auth/admin/adminGenerateCaptchaHandler.go new file mode 100644 index 0000000..caabd45 --- /dev/null +++ b/internal/handler/auth/admin/adminGenerateCaptchaHandler.go @@ -0,0 +1,18 @@ +package admin + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/auth/admin" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/pkg/result" +) + +// Generate captcha +func AdminGenerateCaptchaHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + + l := admin.NewAdminGenerateCaptchaLogic(c.Request.Context(), svcCtx) + resp, err := l.AdminGenerateCaptcha() + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/auth/admin/adminLoginHandler.go b/internal/handler/auth/admin/adminLoginHandler.go new file mode 100644 index 0000000..95239bd --- /dev/null +++ b/internal/handler/auth/admin/adminLoginHandler.go @@ -0,0 +1,30 @@ +package admin + +import ( + "github.com/gin-gonic/gin" + adminLogic "github.com/perfect-panel/server/internal/logic/auth/admin" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Admin login +func AdminLoginHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.UserLoginRequest + _ = c.ShouldBind(&req) + // get client ip + req.IP = c.ClientIP() + req.UserAgent = c.Request.UserAgent() + + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := adminLogic.NewAdminLoginLogic(c.Request.Context(), svcCtx) + resp, err := l.AdminLogin(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/auth/admin/adminResetPasswordHandler.go b/internal/handler/auth/admin/adminResetPasswordHandler.go new file mode 100644 index 0000000..9fb909c --- /dev/null +++ b/internal/handler/auth/admin/adminResetPasswordHandler.go @@ -0,0 +1,29 @@ +package admin + +import ( + "github.com/gin-gonic/gin" + adminLogic "github.com/perfect-panel/server/internal/logic/auth/admin" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Admin reset password +func AdminResetPasswordHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.ResetPasswordRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + // get client ip + req.IP = c.ClientIP() + req.UserAgent = c.Request.UserAgent() + + l := adminLogic.NewAdminResetPasswordLogic(c.Request.Context(), svcCtx) + resp, err := l.AdminResetPassword(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/auth/admin/adminSliderVerifyCaptchaHandler.go b/internal/handler/auth/admin/adminSliderVerifyCaptchaHandler.go new file mode 100644 index 0000000..59f2f69 --- /dev/null +++ b/internal/handler/auth/admin/adminSliderVerifyCaptchaHandler.go @@ -0,0 +1,26 @@ +package admin + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/auth/admin" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Verify slider captcha +func AdminSliderVerifyCaptchaHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.SliderVerifyCaptchaRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := admin.NewAdminSliderVerifyCaptchaLogic(c.Request.Context(), svcCtx) + resp, err := l.AdminSliderVerifyCaptcha(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/auth/generateCaptchaHandler.go b/internal/handler/auth/generateCaptchaHandler.go new file mode 100644 index 0000000..d263da7 --- /dev/null +++ b/internal/handler/auth/generateCaptchaHandler.go @@ -0,0 +1,18 @@ +package auth + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/auth" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/pkg/result" +) + +// Generate captcha +func GenerateCaptchaHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + + l := auth.NewGenerateCaptchaLogic(c.Request.Context(), svcCtx) + resp, err := l.GenerateCaptcha() + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/auth/resetPasswordHandler.go b/internal/handler/auth/resetPasswordHandler.go index d4edc9b..8de4ca6 100644 --- a/internal/handler/auth/resetPasswordHandler.go +++ b/internal/handler/auth/resetPasswordHandler.go @@ -1,16 +1,11 @@ package auth import ( - "time" - "github.com/gin-gonic/gin" "github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/pkg/result" - "github.com/perfect-panel/server/pkg/turnstile" - "github.com/perfect-panel/server/pkg/xerr" - "github.com/pkg/errors" ) // Reset password @@ -25,17 +20,8 @@ func ResetPasswordHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { } // get client ip req.IP = c.ClientIP() - if svcCtx.Config.Verify.ResetPasswordVerify { - verifyTurns := turnstile.New(turnstile.Config{ - Secret: svcCtx.Config.Verify.TurnstileSecret, - Timeout: 3 * time.Second, - }) - if verify, err := verifyTurns.Verify(c, req.CfToken, req.IP); err != nil || !verify { - err = errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "error: %v, verify: %v", err, verify) - result.HttpResult(c, nil, err) - return - } - } + req.UserAgent = c.Request.UserAgent() + l := auth.NewResetPasswordLogic(c.Request.Context(), svcCtx) resp, err := l.ResetPassword(&req) result.HttpResult(c, resp, err) diff --git a/internal/handler/auth/sliderVerifyCaptchaHandler.go b/internal/handler/auth/sliderVerifyCaptchaHandler.go new file mode 100644 index 0000000..c873623 --- /dev/null +++ b/internal/handler/auth/sliderVerifyCaptchaHandler.go @@ -0,0 +1,26 @@ +package auth + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/auth" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Verify slider captcha +func SliderVerifyCaptchaHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.SliderVerifyCaptchaRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := auth.NewSliderVerifyCaptchaLogic(c.Request.Context(), svcCtx) + resp, err := l.SliderVerifyCaptcha(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/auth/telephoneRegisterHandler.go b/internal/handler/auth/telephoneRegisterHandler.go new file mode 100644 index 0000000..7e4f355 --- /dev/null +++ b/internal/handler/auth/telephoneRegisterHandler.go @@ -0,0 +1,26 @@ +package auth + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/auth" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// User Telephone register +func TelephoneRegisterHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.TelephoneRegisterRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := auth.NewTelephoneRegisterLogic(c.Request.Context(), svcCtx) + resp, err := l.TelephoneRegister(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/auth/telephoneResetPasswordHandler.go b/internal/handler/auth/telephoneResetPasswordHandler.go index 16a5105..bc0f8a6 100644 --- a/internal/handler/auth/telephoneResetPasswordHandler.go +++ b/internal/handler/auth/telephoneResetPasswordHandler.go @@ -1,14 +1,13 @@ package auth import ( - "time" - "github.com/gin-gonic/gin" "github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" "github.com/perfect-panel/server/pkg/result" - "github.com/perfect-panel/server/pkg/turnstile" + "github.com/perfect-panel/server/pkg/tool" "github.com/perfect-panel/server/pkg/xerr" "github.com/pkg/errors" ) @@ -25,17 +24,44 @@ func TelephoneResetPasswordHandler(svcCtx *svc.ServiceContext) func(c *gin.Conte } // get client ip req.IP = c.ClientIP() - if svcCtx.Config.Verify.ResetPasswordVerify { - verifyTurns := turnstile.New(turnstile.Config{ - Secret: svcCtx.Config.Verify.TurnstileSecret, - Timeout: 3 * time.Second, + + // Get verify config from database + verifyCfg, err := svcCtx.SystemModel.GetVerifyConfig(c.Request.Context()) + if err != nil { + result.HttpResult(c, nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "get verify config failed: %v", err)) + return + } + + var config struct { + CaptchaType string `json:"captcha_type"` + EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &config) + + // Verify captcha if enabled + if config.EnableUserResetPasswordCaptcha { + captchaService := captcha.NewService(captcha.Config{ + Type: captcha.CaptchaType(config.CaptchaType), + TurnstileSecret: config.TurnstileSecret, + RedisClient: svcCtx.Redis, }) - if verify, err := verifyTurns.Verify(c.Request.Context(), req.CfToken, req.IP); err != nil || !verify { - err = errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "error: %v, verify: %v", err, verify) - result.HttpResult(c, nil, err) + + var token, code string + if config.CaptchaType == "turnstile" { + token = req.CfToken + } else { + token = req.CaptchaId + code = req.CaptchaCode + } + + verified, err := captchaService.Verify(c.Request.Context(), token, code, req.IP) + if err != nil || !verified { + result.HttpResult(c, nil, errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "captcha verification failed: %v", err)) return } } + l := auth.NewTelephoneResetPasswordLogic(c, svcCtx) resp, err := l.TelephoneResetPassword(&req) result.HttpResult(c, resp, err) diff --git a/internal/handler/auth/telephoneUserRegisterHandler.go b/internal/handler/auth/telephoneUserRegisterHandler.go index 45a7ba8..306777d 100644 --- a/internal/handler/auth/telephoneUserRegisterHandler.go +++ b/internal/handler/auth/telephoneUserRegisterHandler.go @@ -1,16 +1,11 @@ package auth import ( - "time" - "github.com/gin-gonic/gin" "github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/pkg/result" - "github.com/perfect-panel/server/pkg/turnstile" - "github.com/perfect-panel/server/pkg/xerr" - "github.com/pkg/errors" ) // User Telephone register @@ -26,17 +21,7 @@ func TelephoneUserRegisterHandler(svcCtx *svc.ServiceContext) func(c *gin.Contex // get client ip req.IP = c.ClientIP() req.UserAgent = c.Request.UserAgent() - if svcCtx.Config.Verify.RegisterVerify { - verifyTurns := turnstile.New(turnstile.Config{ - Secret: svcCtx.Config.Verify.TurnstileSecret, - Timeout: 3 * time.Second, - }) - if verify, err := verifyTurns.Verify(c, req.CfToken, req.IP); err != nil || !verify { - err = errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "error: %v, verify: %v", err, verify) - result.HttpResult(c, nil, err) - return - } - } + l := auth.NewTelephoneUserRegisterLogic(c.Request.Context(), svcCtx) resp, err := l.TelephoneUserRegister(&req) result.HttpResult(c, resp, err) diff --git a/internal/handler/auth/userLoginHandler.go b/internal/handler/auth/userLoginHandler.go index 20eff59..a188876 100644 --- a/internal/handler/auth/userLoginHandler.go +++ b/internal/handler/auth/userLoginHandler.go @@ -1,16 +1,11 @@ package auth import ( - "time" - "github.com/gin-gonic/gin" "github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/pkg/result" - "github.com/perfect-panel/server/pkg/turnstile" - "github.com/perfect-panel/server/pkg/xerr" - "github.com/pkg/errors" ) // User login @@ -21,17 +16,7 @@ func UserLoginHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { // get client ip req.IP = c.ClientIP() req.UserAgent = c.Request.UserAgent() - if svcCtx.Config.Verify.LoginVerify && !svcCtx.Config.Debug { - verifyTurns := turnstile.New(turnstile.Config{ - Secret: svcCtx.Config.Verify.TurnstileSecret, - Timeout: 3 * time.Second, - }) - if verify, err := verifyTurns.Verify(c, req.CfToken, req.IP); err != nil || !verify { - err = errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "error: %v, verify: %v", err, verify) - result.HttpResult(c, nil, err) - return - } - } + validateErr := svcCtx.Validate(&req) if validateErr != nil { result.ParamErrorResult(c, validateErr) diff --git a/internal/handler/auth/userRegisterHandler.go b/internal/handler/auth/userRegisterHandler.go index ea40223..a11c743 100644 --- a/internal/handler/auth/userRegisterHandler.go +++ b/internal/handler/auth/userRegisterHandler.go @@ -1,16 +1,11 @@ package auth import ( - "time" - "github.com/gin-gonic/gin" "github.com/perfect-panel/server/internal/logic/auth" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/pkg/result" - "github.com/perfect-panel/server/pkg/turnstile" - "github.com/perfect-panel/server/pkg/xerr" - "github.com/pkg/errors" ) // User register @@ -21,16 +16,7 @@ func UserRegisterHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { // get client ip req.IP = c.ClientIP() req.UserAgent = c.Request.UserAgent() - if svcCtx.Config.Verify.RegisterVerify { - verifyTurns := turnstile.New(turnstile.Config{ - Secret: svcCtx.Config.Verify.TurnstileSecret, - Timeout: 3 * time.Second, - }) - if verify, err := verifyTurns.Verify(c, req.CfToken, req.IP); err != nil || !verify { - result.HttpResult(c, nil, errors.Wrapf(xerr.NewErrCode(xerr.TooManyRequests), "verify error: %v", err.Error())) - return - } - } + validateErr := svcCtx.Validate(&req) if validateErr != nil { result.ParamErrorResult(c, validateErr) diff --git a/internal/handler/public/user/getUserTrafficStatsHandler.go b/internal/handler/public/user/getUserTrafficStatsHandler.go new file mode 100644 index 0000000..5c3c7dd --- /dev/null +++ b/internal/handler/public/user/getUserTrafficStatsHandler.go @@ -0,0 +1,26 @@ +package user + +import ( + "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/logic/public/user" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/result" +) + +// Get User Traffic Statistics +func GetUserTrafficStatsHandler(svcCtx *svc.ServiceContext) func(c *gin.Context) { + return func(c *gin.Context) { + var req types.GetUserTrafficStatsRequest + _ = c.ShouldBind(&req) + validateErr := svcCtx.Validate(&req) + if validateErr != nil { + result.ParamErrorResult(c, validateErr) + return + } + + l := user.NewGetUserTrafficStatsLogic(c.Request.Context(), svcCtx) + resp, err := l.GetUserTrafficStats(&req) + result.HttpResult(c, resp, err) + } +} diff --git a/internal/handler/routes.go b/internal/handler/routes.go index 23bed10..e82d219 100644 --- a/internal/handler/routes.go +++ b/internal/handler/routes.go @@ -12,6 +12,7 @@ import ( adminConsole "github.com/perfect-panel/server/internal/handler/admin/console" adminCoupon "github.com/perfect-panel/server/internal/handler/admin/coupon" adminDocument "github.com/perfect-panel/server/internal/handler/admin/document" + adminGroup "github.com/perfect-panel/server/internal/handler/admin/group" adminLog "github.com/perfect-panel/server/internal/handler/admin/log" adminMarketing "github.com/perfect-panel/server/internal/handler/admin/marketing" adminOrder "github.com/perfect-panel/server/internal/handler/admin/order" @@ -24,6 +25,7 @@ import ( adminTool "github.com/perfect-panel/server/internal/handler/admin/tool" adminUser "github.com/perfect-panel/server/internal/handler/admin/user" auth "github.com/perfect-panel/server/internal/handler/auth" + authAdmin "github.com/perfect-panel/server/internal/handler/auth/admin" authOauth "github.com/perfect-panel/server/internal/handler/auth/oauth" common "github.com/perfect-panel/server/internal/handler/common" publicAnnouncement "github.com/perfect-panel/server/internal/handler/public/announcement" @@ -189,6 +191,53 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { adminDocumentGroupRouter.GET("/list", adminDocument.GetDocumentListHandler(serverCtx)) } + adminGroupGroupRouter := router.Group("/v1/admin/group") + adminGroupGroupRouter.Use(middleware.AuthMiddleware(serverCtx)) + + { + // Get group config + adminGroupGroupRouter.GET("/config", adminGroup.GetGroupConfigHandler(serverCtx)) + + // Update group config + adminGroupGroupRouter.PUT("/config", adminGroup.UpdateGroupConfigHandler(serverCtx)) + + // Export group result + adminGroupGroupRouter.GET("/export", adminGroup.ExportGroupResultHandler(serverCtx)) + + // Get group history + adminGroupGroupRouter.GET("/history", adminGroup.GetGroupHistoryHandler(serverCtx)) + + // Get group history detail + adminGroupGroupRouter.GET("/history/detail", adminGroup.GetGroupHistoryDetailHandler(serverCtx)) + + // Create node group + adminGroupGroupRouter.POST("/node", adminGroup.CreateNodeGroupHandler(serverCtx)) + + // Update node group + adminGroupGroupRouter.PUT("/node", adminGroup.UpdateNodeGroupHandler(serverCtx)) + + // Delete node group + adminGroupGroupRouter.DELETE("/node", adminGroup.DeleteNodeGroupHandler(serverCtx)) + + // Get node group list + adminGroupGroupRouter.GET("/node/list", adminGroup.GetNodeGroupListHandler(serverCtx)) + + // Preview user nodes + adminGroupGroupRouter.GET("/preview", adminGroup.PreviewUserNodesHandler(serverCtx)) + + // Recalculate group + adminGroupGroupRouter.POST("/recalculate", adminGroup.RecalculateGroupHandler(serverCtx)) + + // Get recalculation status + adminGroupGroupRouter.GET("/recalculation/status", adminGroup.GetRecalculationStatusHandler(serverCtx)) + + // Reset all groups + adminGroupGroupRouter.POST("/reset", adminGroup.ResetGroupsHandler(serverCtx)) + + // Get subscribe group mapping + adminGroupGroupRouter.GET("/subscribe/mapping", adminGroup.GetSubscribeGroupMappingHandler(serverCtx)) + } + adminLogGroupRouter := router.Group("/v1/admin/log") adminLogGroupRouter.Use(middleware.AuthMiddleware(serverCtx)) @@ -272,6 +321,9 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { // Query quota task pre-count adminMarketingGroupRouter.POST("/quota/pre-count", adminMarketing.QueryQuotaTaskPreCountHandler(serverCtx)) + + // Query quota task status + adminMarketingGroupRouter.POST("/quota/status", adminMarketing.QueryQuotaTaskStatusHandler(serverCtx)) } adminOrderGroupRouter := router.Group("/v1/admin/order") @@ -659,6 +711,12 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { authGroupRouter.Use(middleware.DeviceMiddleware(serverCtx)) { + // Generate captcha + authGroupRouter.POST("/captcha/generate", auth.GenerateCaptchaHandler(serverCtx)) + + // Verify slider captcha + authGroupRouter.POST("/captcha/slider/verify", auth.SliderVerifyCaptchaHandler(serverCtx)) + // Check user is exist authGroupRouter.GET("/check", auth.CheckUserHandler(serverCtx)) @@ -681,15 +739,32 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { authGroupRouter.POST("/register", auth.UserRegisterHandler(serverCtx)) // User Telephone register - authGroupRouter.POST("/register/telephone", auth.TelephoneUserRegisterHandler(serverCtx)) + authGroupRouter.POST("/register/telephone", auth.TelephoneRegisterHandler(serverCtx)) // Reset password authGroupRouter.POST("/reset", auth.ResetPasswordHandler(serverCtx)) - // Reset password + // Reset password by telephone authGroupRouter.POST("/reset/telephone", auth.TelephoneResetPasswordHandler(serverCtx)) } + authAdminGroupRouter := router.Group("/v1/auth/admin") + authAdminGroupRouter.Use(middleware.DeviceMiddleware(serverCtx)) + + { + // Generate captcha + authAdminGroupRouter.POST("/captcha/generate", authAdmin.AdminGenerateCaptchaHandler(serverCtx)) + + // Verify slider captcha + authAdminGroupRouter.POST("/captcha/slider/verify", authAdmin.AdminSliderVerifyCaptchaHandler(serverCtx)) + + // Admin login + authAdminGroupRouter.POST("/login", authAdmin.AdminLoginHandler(serverCtx)) + + // Admin reset password + authAdminGroupRouter.POST("/reset", authAdmin.AdminResetPasswordHandler(serverCtx)) + } + authOauthGroupRouter := router.Group("/v1/auth/oauth") { @@ -742,6 +817,15 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { // Submit contact info commonGroupRouter.POST("/contact", common.SubmitContactHandler(serverCtx)) + + // Report log message + commonGroupRouter.POST("/log/report", common.ReportLogMessageHandler(serverCtx)) + + // Check verification code (legacy v1) + commonGroupRouter.POST("/check_code", auth.CheckCodeLegacyV1Handler(serverCtx)) + + // Check verification code (legacy v2, consume code) + commonGroupRouter.POST("/check_code/v2", auth.CheckCodeLegacyV2Handler(serverCtx)) } publicAnnouncementGroupRouter := router.Group("/v1/public/announcement") @@ -857,6 +941,9 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { // Get user subscribe node info publicSubscribeGroupRouter.GET("/node/list", publicSubscribe.QueryUserSubscribeNodeListHandler(serverCtx)) + + // Get subscribe group list + publicSubscribeGroupRouter.GET("/group/list", publicSubscribe.QuerySubscribeGroupListHandler(serverCtx)) } publicTicketGroupRouter := router.Group("/v1/public/ticket") @@ -891,11 +978,11 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { // Get Agent Downloads publicUserGroupRouter.GET("/agent_downloads", publicUser.GetAgentDownloadsHandler(serverCtx)) - publicUserGroupRouter.GET("/agent/downloads", publicUser.GetAgentDownloadsHandler(serverCtx)) + publicUserGroupRouter.GET("/agent/downloads", publicUser.GetAgentDownloadsHandler(serverCtx)) // alias: backward-compat // Get Agent Realtime publicUserGroupRouter.GET("/agent_realtime", publicUser.GetAgentRealtimeHandler(serverCtx)) - publicUserGroupRouter.GET("/agent/realtime", publicUser.GetAgentRealtimeHandler(serverCtx)) + publicUserGroupRouter.GET("/agent/realtime", publicUser.GetAgentRealtimeHandler(serverCtx)) // alias: backward-compat // Query User Balance Log publicUserGroupRouter.GET("/balance_log", publicUser.QueryUserBalanceLogHandler(serverCtx)) @@ -944,11 +1031,11 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { // Get Invite Sales publicUserGroupRouter.GET("/invite_sales", publicUser.GetInviteSalesHandler(serverCtx)) - publicUserGroupRouter.GET("/invite/sales", publicUser.GetInviteSalesHandler(serverCtx)) + publicUserGroupRouter.GET("/invite/sales", publicUser.GetInviteSalesHandler(serverCtx)) // alias: backward-compat // Get User Invite Stats publicUserGroupRouter.GET("/invite_stats", publicUser.GetUserInviteStatsHandler(serverCtx)) - publicUserGroupRouter.GET("/invite/stats", publicUser.GetUserInviteStatsHandler(serverCtx)) + publicUserGroupRouter.GET("/invite/stats", publicUser.GetUserInviteStatsHandler(serverCtx)) // alias: backward-compat // Get Login Log publicUserGroupRouter.GET("/login_log", publicUser.GetLoginLogHandler(serverCtx)) @@ -980,6 +1067,9 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { // Reset User Subscribe Token publicUserGroupRouter.PUT("/subscribe_token", publicUser.ResetUserSubscribeTokenHandler(serverCtx)) + // Get User Traffic Statistics + publicUserGroupRouter.GET("/traffic_stats", publicUser.GetUserTrafficStatsHandler(serverCtx)) + // Unbind Device publicUserGroupRouter.PUT("/unbind_device", publicUser.UnbindDeviceHandler(serverCtx)) @@ -1030,10 +1120,10 @@ func RegisterHandlers(router *gin.Engine, serverCtx *svc.ServiceContext) { serverGroupRouter.GET("/user", server.GetServerUserListHandler(serverCtx)) } - serverV2GroupRouter := router.Group("/v2/server") + serverGroupRouterV2 := router.Group("/v2/server") { // Get Server Protocol Config - serverV2GroupRouter.GET("/:server_id", server.QueryServerProtocolConfigHandler(serverCtx)) + serverGroupRouterV2.GET("/:server_id", server.QueryServerProtocolConfigHandler(serverCtx)) } } diff --git a/internal/logic/admin/group/createNodeGroupLogic.go b/internal/logic/admin/group/createNodeGroupLogic.go new file mode 100644 index 0000000..9e68c10 --- /dev/null +++ b/internal/logic/admin/group/createNodeGroupLogic.go @@ -0,0 +1,81 @@ +package group + +import ( + "context" + "errors" + "time" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" +) + +type CreateNodeGroupLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +func NewCreateNodeGroupLogic(ctx context.Context, svcCtx *svc.ServiceContext) *CreateNodeGroupLogic { + return &CreateNodeGroupLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *CreateNodeGroupLogic) CreateNodeGroup(req *types.CreateNodeGroupRequest) error { + // 验证:系统中只能有一个过期节点组 + if req.IsExpiredGroup != nil && *req.IsExpiredGroup { + var count int64 + err := l.svcCtx.DB.Model(&group.NodeGroup{}). + Where("is_expired_group = ?", true). + Count(&count).Error + if err != nil { + logger.Errorf("failed to check expired group count: %v", err) + return err + } + if count > 0 { + return errors.New("system already has an expired node group, cannot create multiple") + } + } + + // 创建节点组 + nodeGroup := &group.NodeGroup{ + Name: req.Name, + Description: req.Description, + Sort: req.Sort, + ForCalculation: req.ForCalculation, + IsExpiredGroup: req.IsExpiredGroup, + MaxTrafficGBExpired: req.MaxTrafficGBExpired, + MinTrafficGB: req.MinTrafficGB, + MaxTrafficGB: req.MaxTrafficGB, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + // 设置过期节点组的默认值 + if req.IsExpiredGroup != nil && *req.IsExpiredGroup { + // 过期节点组不参与分组计算 + falseValue := false + nodeGroup.ForCalculation = &falseValue + + if req.ExpiredDaysLimit != nil { + nodeGroup.ExpiredDaysLimit = *req.ExpiredDaysLimit + } else { + nodeGroup.ExpiredDaysLimit = 7 // 默认7天 + } + if req.SpeedLimit != nil { + nodeGroup.SpeedLimit = *req.SpeedLimit + } + } + + if err := l.svcCtx.DB.Create(nodeGroup).Error; err != nil { + logger.Errorf("failed to create node group: %v", err) + return err + } + + logger.Infof("created node group: node_group_id=%d", nodeGroup.Id) + return nil +} diff --git a/internal/logic/admin/group/deleteNodeGroupLogic.go b/internal/logic/admin/group/deleteNodeGroupLogic.go new file mode 100644 index 0000000..16c89d4 --- /dev/null +++ b/internal/logic/admin/group/deleteNodeGroupLogic.go @@ -0,0 +1,62 @@ +package group + +import ( + "context" + "errors" + "fmt" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/node" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + "gorm.io/gorm" +) + +type DeleteNodeGroupLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +func NewDeleteNodeGroupLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteNodeGroupLogic { + return &DeleteNodeGroupLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *DeleteNodeGroupLogic) DeleteNodeGroup(req *types.DeleteNodeGroupRequest) error { + // 查询节点组信息 + var nodeGroup group.NodeGroup + if err := l.svcCtx.DB.Where("id = ?", req.Id).First(&nodeGroup).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return errors.New("node group not found") + } + logger.Errorf("failed to find node group: %v", err) + return err + } + + // 检查是否有关联节点(使用JSON_CONTAINS查询node_group_ids数组) + var nodeCount int64 + if err := l.svcCtx.DB.Model(&node.Node{}).Where("JSON_CONTAINS(node_group_ids, ?)", fmt.Sprintf("[%d]", nodeGroup.Id)).Count(&nodeCount).Error; err != nil { + logger.Errorf("failed to count nodes in group: %v", err) + return err + } + if nodeCount > 0 { + return fmt.Errorf("cannot delete group with %d associated nodes, please migrate nodes first", nodeCount) + } + + // 使用 GORM Transaction 删除节点组 + return l.svcCtx.DB.Transaction(func(tx *gorm.DB) error { + // 删除节点组 + if err := tx.Where("id = ?", req.Id).Delete(&group.NodeGroup{}).Error; err != nil { + logger.Errorf("failed to delete node group: %v", err) + return err // 自动回滚 + } + + logger.Infof("deleted node group: id=%d", nodeGroup.Id) + return nil // 自动提交 + }) +} diff --git a/internal/logic/admin/group/exportGroupResultLogic.go b/internal/logic/admin/group/exportGroupResultLogic.go new file mode 100644 index 0000000..a84befa --- /dev/null +++ b/internal/logic/admin/group/exportGroupResultLogic.go @@ -0,0 +1,129 @@ +package group + +import ( + "bytes" + "context" + "encoding/csv" + "fmt" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" +) + +type ExportGroupResultLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +func NewExportGroupResultLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ExportGroupResultLogic { + return &ExportGroupResultLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +// ExportGroupResult 导出分组结果为 CSV +// 返回:CSV 数据(字节切片)、文件名、错误 +func (l *ExportGroupResultLogic) ExportGroupResult(req *types.ExportGroupResultRequest) ([]byte, string, error) { + var records [][]string + + // CSV 表头 + records = append(records, []string{"用户ID", "节点组ID", "节点组名称"}) + + if req.HistoryId != nil { + // 导出指定历史的详细结果 + // 1. 查询分组历史详情 + var details []group.GroupHistoryDetail + if err := l.svcCtx.DB.Where("history_id = ?", *req.HistoryId).Find(&details).Error; err != nil { + logger.Errorf("failed to get group history details: %v", err) + return nil, "", err + } + + // 2. 为每个组生成记录 + for _, detail := range details { + // 从 UserData JSON 解析用户信息 + type UserInfo struct { + Id int64 `json:"id"` + Email string `json:"email"` + } + var users []UserInfo + if err := l.svcCtx.DB.Raw("SELECT * FROM JSON_ARRAY(?)", detail.UserData).Scan(&users).Error; err != nil { + // 如果解析失败,尝试用标准 JSON 解析 + logger.Errorf("failed to parse user data: %v", err) + continue + } + + // 查询节点组名称 + var nodeGroup group.NodeGroup + l.svcCtx.DB.Where("id = ?", detail.NodeGroupId).First(&nodeGroup) + + // 为每个用户生成记录 + for _, user := range users { + records = append(records, []string{ + fmt.Sprintf("%d", user.Id), + fmt.Sprintf("%d", nodeGroup.Id), + nodeGroup.Name, + }) + } + } + } else { + // 导出当前所有用户的分组情况 + type UserNodeGroupInfo struct { + Id int64 `json:"id"` + NodeGroupId int64 `json:"node_group_id"` + } + var userSubscribes []UserNodeGroupInfo + if err := l.svcCtx.DB.Model(&user.Subscribe{}). + Select("DISTINCT user_id as id, node_group_id"). + Where("node_group_id > ?", 0). + Find(&userSubscribes).Error; err != nil { + logger.Errorf("failed to get users: %v", err) + return nil, "", err + } + + // 为每个用户生成记录 + for _, us := range userSubscribes { + // 查询节点组信息 + var nodeGroup group.NodeGroup + if err := l.svcCtx.DB.Where("id = ?", us.NodeGroupId).First(&nodeGroup).Error; err != nil { + logger.Errorf("failed to find node group: %v", err) + // 跳过该用户 + continue + } + + records = append(records, []string{ + fmt.Sprintf("%d", us.Id), + fmt.Sprintf("%d", nodeGroup.Id), + nodeGroup.Name, + }) + } + } + + // 生成 CSV 数据 + var buf bytes.Buffer + writer := csv.NewWriter(&buf) + writer.WriteAll(records) + writer.Flush() + + if err := writer.Error(); err != nil { + logger.Errorf("failed to write csv: %v", err) + return nil, "", err + } + + // 添加 UTF-8 BOM + bom := []byte{0xEF, 0xBB, 0xBF} + csvData := buf.Bytes() + result := make([]byte, 0, len(bom)+len(csvData)) + result = append(result, bom...) + result = append(result, csvData...) + + // 生成文件名 + filename := fmt.Sprintf("group_result_%d.csv", req.HistoryId) + + return result, filename, nil +} diff --git a/internal/logic/admin/group/getGroupConfigLogic.go b/internal/logic/admin/group/getGroupConfigLogic.go new file mode 100644 index 0000000..2aedb10 --- /dev/null +++ b/internal/logic/admin/group/getGroupConfigLogic.go @@ -0,0 +1,125 @@ +package group + +import ( + "context" + "encoding/json" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/system" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + "github.com/pkg/errors" + "gorm.io/gorm" +) + +type GetGroupConfigLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Get group config +func NewGetGroupConfigLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetGroupConfigLogic { + return &GetGroupConfigLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *GetGroupConfigLogic) GetGroupConfig(req *types.GetGroupConfigRequest) (resp *types.GetGroupConfigResponse, err error) { + // 读取基础配置 + var enabledConfig system.System + var modeConfig system.System + var averageConfig system.System + var subscribeConfig system.System + var trafficConfig system.System + + // 从 system_config 表读取配置 + if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "enabled").First(&enabledConfig).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + l.Errorw("failed to get group enabled config", logger.Field("error", err.Error())) + return nil, err + } + + if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "mode").First(&modeConfig).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + l.Errorw("failed to get group mode config", logger.Field("error", err.Error())) + return nil, err + } + + // 读取 JSON 配置 + config := make(map[string]interface{}) + + if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "average_config").First(&averageConfig).Error; err == nil { + var averageCfg map[string]interface{} + if err := json.Unmarshal([]byte(averageConfig.Value), &averageCfg); err == nil { + config["average_config"] = averageCfg + } + } + + if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "subscribe_config").First(&subscribeConfig).Error; err == nil { + var subscribeCfg map[string]interface{} + if err := json.Unmarshal([]byte(subscribeConfig.Value), &subscribeCfg); err == nil { + config["subscribe_config"] = subscribeCfg + } + } + + if err := l.svcCtx.DB.Where("`category` = 'group' and `key` = ?", "traffic_config").First(&trafficConfig).Error; err == nil { + var trafficCfg map[string]interface{} + if err := json.Unmarshal([]byte(trafficConfig.Value), &trafficCfg); err == nil { + config["traffic_config"] = trafficCfg + } + } + + // 解析基础配置 + enabled := enabledConfig.Value == "true" + mode := modeConfig.Value + if mode == "" { + mode = "average" // 默认模式 + } + + // 获取重算状态 + state, err := l.getRecalculationState() + if err != nil { + l.Errorw("failed to get recalculation state", logger.Field("error", err.Error())) + // 继续执行,不影响配置获取 + state = &types.RecalculationState{ + State: "idle", + Progress: 0, + Total: 0, + } + } + + resp = &types.GetGroupConfigResponse{ + Enabled: enabled, + Mode: mode, + Config: config, + State: *state, + } + + return resp, nil +} + +// getRecalculationState 获取重算状态 +func (l *GetGroupConfigLogic) getRecalculationState() (*types.RecalculationState, error) { + var history group.GroupHistory + err := l.svcCtx.DB.Order("id desc").First(&history).Error + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return &types.RecalculationState{ + State: "idle", + Progress: 0, + Total: 0, + }, nil + } + return nil, err + } + + state := &types.RecalculationState{ + State: history.State, + Progress: history.TotalUsers, + Total: history.TotalUsers, + } + + return state, nil +} diff --git a/internal/logic/admin/group/getGroupHistoryDetailLogic.go b/internal/logic/admin/group/getGroupHistoryDetailLogic.go new file mode 100644 index 0000000..d868d55 --- /dev/null +++ b/internal/logic/admin/group/getGroupHistoryDetailLogic.go @@ -0,0 +1,109 @@ +package group + +import ( + "context" + "encoding/json" + "errors" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + "gorm.io/gorm" +) + +type GetGroupHistoryDetailLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +func NewGetGroupHistoryDetailLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetGroupHistoryDetailLogic { + return &GetGroupHistoryDetailLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *GetGroupHistoryDetailLogic) GetGroupHistoryDetail(req *types.GetGroupHistoryDetailRequest) (resp *types.GetGroupHistoryDetailResponse, err error) { + // 查询分组历史记录 + var history group.GroupHistory + if err := l.svcCtx.DB.Where("id = ?", req.Id).First(&history).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, errors.New("group history not found") + } + logger.Errorf("failed to find group history: %v", err) + return nil, err + } + + // 查询分组历史详情 + var details []group.GroupHistoryDetail + if err := l.svcCtx.DB.Where("history_id = ?", req.Id).Find(&details).Error; err != nil { + logger.Errorf("failed to find group history details: %v", err) + return nil, err + } + + // 转换时间格式 + var startTime, endTime *int64 + if history.StartTime != nil { + t := history.StartTime.Unix() + startTime = &t + } + if history.EndTime != nil { + t := history.EndTime.Unix() + endTime = &t + } + + // 构建 GroupHistoryDetail + historyDetail := types.GroupHistoryDetail{ + GroupHistory: types.GroupHistory{ + Id: history.Id, + GroupMode: history.GroupMode, + TriggerType: history.TriggerType, + TotalUsers: history.TotalUsers, + SuccessCount: history.SuccessCount, + FailedCount: history.FailedCount, + StartTime: startTime, + EndTime: endTime, + ErrorLog: history.ErrorMessage, + CreatedAt: history.CreatedAt.Unix(), + }, + } + + // 如果有详情记录,构建 ConfigSnapshot + if len(details) > 0 { + configSnapshot := make(map[string]interface{}) + configSnapshot["group_details"] = details + + // 获取配置快照(从 system_config 读取) + var configValue string + if history.GroupMode == "average" { + l.svcCtx.DB.Table("system_config"). + Where("`key` = ?", "group.average_config"). + Select("value"). + Scan(&configValue) + } else if history.GroupMode == "traffic" { + l.svcCtx.DB.Table("system_config"). + Where("`key` = ?", "group.traffic_config"). + Select("value"). + Scan(&configValue) + } + + // 解析 JSON 配置 + if configValue != "" { + var config map[string]interface{} + if err := json.Unmarshal([]byte(configValue), &config); err == nil { + configSnapshot["config"] = config + } + } + + historyDetail.ConfigSnapshot = configSnapshot + } + + resp = &types.GetGroupHistoryDetailResponse{ + GroupHistoryDetail: historyDetail, + } + + return resp, nil +} diff --git a/internal/logic/admin/group/getGroupHistoryLogic.go b/internal/logic/admin/group/getGroupHistoryLogic.go new file mode 100644 index 0000000..6eee9c3 --- /dev/null +++ b/internal/logic/admin/group/getGroupHistoryLogic.go @@ -0,0 +1,87 @@ +package group + +import ( + "context" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" +) + +type GetGroupHistoryLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +func NewGetGroupHistoryLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetGroupHistoryLogic { + return &GetGroupHistoryLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *GetGroupHistoryLogic) GetGroupHistory(req *types.GetGroupHistoryRequest) (resp *types.GetGroupHistoryResponse, err error) { + var histories []group.GroupHistory + var total int64 + + // 构建查询 + query := l.svcCtx.DB.Model(&group.GroupHistory{}) + + // 添加过滤条件 + if req.GroupMode != "" { + query = query.Where("group_mode = ?", req.GroupMode) + } + if req.TriggerType != "" { + query = query.Where("trigger_type = ?", req.TriggerType) + } + + // 获取总数 + if err := query.Count(&total).Error; err != nil { + logger.Errorf("failed to count group histories: %v", err) + return nil, err + } + + // 分页查询 + offset := (req.Page - 1) * req.Size + if err := query.Order("id DESC").Offset(offset).Limit(req.Size).Find(&histories).Error; err != nil { + logger.Errorf("failed to find group histories: %v", err) + return nil, err + } + + // 转换为响应格式 + var list []types.GroupHistory + for _, h := range histories { + var startTime, endTime *int64 + if h.StartTime != nil { + t := h.StartTime.Unix() + startTime = &t + } + if h.EndTime != nil { + t := h.EndTime.Unix() + endTime = &t + } + + list = append(list, types.GroupHistory{ + Id: h.Id, + GroupMode: h.GroupMode, + TriggerType: h.TriggerType, + TotalUsers: h.TotalUsers, + SuccessCount: h.SuccessCount, + FailedCount: h.FailedCount, + StartTime: startTime, + EndTime: endTime, + ErrorLog: h.ErrorMessage, + CreatedAt: h.CreatedAt.Unix(), + }) + } + + resp = &types.GetGroupHistoryResponse{ + Total: total, + List: list, + } + + return resp, nil +} diff --git a/internal/logic/admin/group/getNodeGroupListLogic.go b/internal/logic/admin/group/getNodeGroupListLogic.go new file mode 100644 index 0000000..9595393 --- /dev/null +++ b/internal/logic/admin/group/getNodeGroupListLogic.go @@ -0,0 +1,103 @@ +package group + +import ( + "context" + "fmt" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/node" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" +) + +type GetNodeGroupListLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +func NewGetNodeGroupListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetNodeGroupListLogic { + return &GetNodeGroupListLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *GetNodeGroupListLogic) GetNodeGroupList(req *types.GetNodeGroupListRequest) (resp *types.GetNodeGroupListResponse, err error) { + var nodeGroups []group.NodeGroup + var total int64 + + // 构建查询 + query := l.svcCtx.DB.Model(&group.NodeGroup{}) + + // 获取总数 + if err := query.Count(&total).Error; err != nil { + logger.Errorf("failed to count node groups: %v", err) + return nil, err + } + + // 分页查询 + offset := (req.Page - 1) * req.Size + if err := query.Order("sort ASC").Offset(offset).Limit(req.Size).Find(&nodeGroups).Error; err != nil { + logger.Errorf("failed to find node groups: %v", err) + return nil, err + } + + // 转换为响应格式 + var list []types.NodeGroup + for _, ng := range nodeGroups { + // 统计该组的节点数(JSON数组查询) + var nodeCount int64 + l.svcCtx.DB.Model(&node.Node{}).Where("JSON_CONTAINS(node_group_ids, ?)", fmt.Sprintf("[%d]", ng.Id)).Count(&nodeCount) + + // 处理指针类型的字段 + var forCalculation bool + if ng.ForCalculation != nil { + forCalculation = *ng.ForCalculation + } else { + forCalculation = true // 默认值 + } + + var isExpiredGroup bool + if ng.IsExpiredGroup != nil { + isExpiredGroup = *ng.IsExpiredGroup + } + + var minTrafficGB, maxTrafficGB, maxTrafficGBExpired int64 + if ng.MinTrafficGB != nil { + minTrafficGB = *ng.MinTrafficGB + } + if ng.MaxTrafficGB != nil { + maxTrafficGB = *ng.MaxTrafficGB + } + if ng.MaxTrafficGBExpired != nil { + maxTrafficGBExpired = *ng.MaxTrafficGBExpired + } + + list = append(list, types.NodeGroup{ + Id: ng.Id, + Name: ng.Name, + Description: ng.Description, + Sort: ng.Sort, + ForCalculation: forCalculation, + IsExpiredGroup: isExpiredGroup, + ExpiredDaysLimit: ng.ExpiredDaysLimit, + MaxTrafficGBExpired: maxTrafficGBExpired, + SpeedLimit: ng.SpeedLimit, + MinTrafficGB: minTrafficGB, + MaxTrafficGB: maxTrafficGB, + NodeCount: nodeCount, + CreatedAt: ng.CreatedAt.Unix(), + UpdatedAt: ng.UpdatedAt.Unix(), + }) + } + + resp = &types.GetNodeGroupListResponse{ + Total: total, + List: list, + } + + return resp, nil +} diff --git a/internal/logic/admin/group/getRecalculationStatusLogic.go b/internal/logic/admin/group/getRecalculationStatusLogic.go new file mode 100644 index 0000000..9a04f80 --- /dev/null +++ b/internal/logic/admin/group/getRecalculationStatusLogic.go @@ -0,0 +1,57 @@ +package group + +import ( + "context" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + "github.com/pkg/errors" + "gorm.io/gorm" +) + +type GetRecalculationStatusLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Get recalculation status +func NewGetRecalculationStatusLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetRecalculationStatusLogic { + return &GetRecalculationStatusLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *GetRecalculationStatusLogic) GetRecalculationStatus() (resp *types.RecalculationState, err error) { + // 返回最近的一条 GroupHistory 记录 + var history group.GroupHistory + err = l.svcCtx.DB.Order("id desc").First(&history).Error + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + // 如果没有历史记录,返回空闲状态 + resp = &types.RecalculationState{ + State: "idle", + Progress: 0, + Total: 0, + } + return resp, nil + } + l.Errorw("failed to get group history", logger.Field("error", err.Error())) + return nil, err + } + + // 转换为 RecalculationState 格式 + // Progress = 已处理的用户数(成功+失败),Total = 总用户数 + processedUsers := history.SuccessCount + history.FailedCount + resp = &types.RecalculationState{ + State: history.State, + Progress: processedUsers, + Total: history.TotalUsers, + } + + return resp, nil +} diff --git a/internal/logic/admin/group/getSubscribeGroupMappingLogic.go b/internal/logic/admin/group/getSubscribeGroupMappingLogic.go new file mode 100644 index 0000000..cd26305 --- /dev/null +++ b/internal/logic/admin/group/getSubscribeGroupMappingLogic.go @@ -0,0 +1,71 @@ +package group + +import ( + "context" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/subscribe" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" +) + +type GetSubscribeGroupMappingLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Get subscribe group mapping +func NewGetSubscribeGroupMappingLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetSubscribeGroupMappingLogic { + return &GetSubscribeGroupMappingLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *GetSubscribeGroupMappingLogic) GetSubscribeGroupMapping(req *types.GetSubscribeGroupMappingRequest) (resp *types.GetSubscribeGroupMappingResponse, err error) { + // 1. 查询所有订阅套餐 + var subscribes []subscribe.Subscribe + if err := l.svcCtx.DB.Model(&subscribe.Subscribe{}).Find(&subscribes).Error; err != nil { + l.Errorw("[GetSubscribeGroupMapping] failed to query subscribes", logger.Field("error", err.Error())) + return nil, err + } + + // 2. 查询所有节点组 + var nodeGroups []group.NodeGroup + if err := l.svcCtx.DB.Model(&group.NodeGroup{}).Find(&nodeGroups).Error; err != nil { + l.Errorw("[GetSubscribeGroupMapping] failed to query node groups", logger.Field("error", err.Error())) + return nil, err + } + + // 创建 node_group_id -> node_group_name 的映射 + nodeGroupMap := make(map[int64]string) + for _, ng := range nodeGroups { + nodeGroupMap[ng.Id] = ng.Name + } + + // 3. 构建映射结果:套餐 -> 默认节点组(一对一) + var mappingList []types.SubscribeGroupMappingItem + + for _, sub := range subscribes { + // 获取套餐的默认节点组(node_group_ids 数组的第一个) + nodeGroupName := "" + if len(sub.NodeGroupIds) > 0 { + defaultNodeGroupId := sub.NodeGroupIds[0] + nodeGroupName = nodeGroupMap[defaultNodeGroupId] + } + + mappingList = append(mappingList, types.SubscribeGroupMappingItem{ + SubscribeName: sub.Name, + NodeGroupName: nodeGroupName, + }) + } + + resp = &types.GetSubscribeGroupMappingResponse{ + List: mappingList, + } + + return resp, nil +} diff --git a/internal/logic/admin/group/previewUserNodesLogic.go b/internal/logic/admin/group/previewUserNodesLogic.go new file mode 100644 index 0000000..53b32da --- /dev/null +++ b/internal/logic/admin/group/previewUserNodesLogic.go @@ -0,0 +1,577 @@ +package group + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/node" + "github.com/perfect-panel/server/internal/model/subscribe" + "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/tool" +) + +type PreviewUserNodesLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +func NewPreviewUserNodesLogic(ctx context.Context, svcCtx *svc.ServiceContext) *PreviewUserNodesLogic { + return &PreviewUserNodesLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *PreviewUserNodesLogic) PreviewUserNodes(req *types.PreviewUserNodesRequest) (resp *types.PreviewUserNodesResponse, err error) { + logger.Infof("[PreviewUserNodes] userId: %v", req.UserId) + + // 1. 查询用户的所有有效订阅(只查询可用状态:0-Pending, 1-Active) + type UserSubscribe struct { + Id int64 + UserId int64 + SubscribeId int64 + NodeGroupId int64 // 用户订阅的 node_group_id(单个ID) + } + var userSubscribes []UserSubscribe + err = l.svcCtx.DB.Model(&user.Subscribe{}). + Select("id, user_id, subscribe_id, node_group_id"). + Where("user_id = ? AND status IN ?", req.UserId, []int8{0, 1}). + Find(&userSubscribes).Error + if err != nil { + logger.Errorf("[PreviewUserNodes] failed to get user subscribes: %v", err) + return nil, err + } + + if len(userSubscribes) == 0 { + logger.Infof("[PreviewUserNodes] no user subscribes found") + resp = &types.PreviewUserNodesResponse{ + UserId: req.UserId, + NodeGroups: []types.NodeGroupItem{}, + } + return resp, nil + } + + logger.Infof("[PreviewUserNodes] found %v user subscribes", len(userSubscribes)) + + // 2. 按优先级获取 node_group_id:user_subscribe.node_group_id > subscribe.node_group_id > subscribe.node_group_ids[0] + // 收集所有订阅ID以便批量查询 + subscribeIds := make([]int64, len(userSubscribes)) + for i, us := range userSubscribes { + subscribeIds[i] = us.SubscribeId + } + + // 批量查询订阅信息 + type SubscribeInfo struct { + Id int64 + NodeGroupId int64 + NodeGroupIds string // JSON string + Nodes string // JSON string - 直接分配的节点ID + NodeTags string // 节点标签 + } + var subscribeInfos []SubscribeInfo + err = l.svcCtx.DB.Model(&subscribe.Subscribe{}). + Select("id, node_group_id, node_group_ids, nodes, node_tags"). + Where("id IN ?", subscribeIds). + Find(&subscribeInfos).Error + if err != nil { + logger.Errorf("[PreviewUserNodes] failed to get subscribe infos: %v", err) + return nil, err + } + + // 创建 subscribe_id -> SubscribeInfo 的映射 + subInfoMap := make(map[int64]SubscribeInfo) + for _, si := range subscribeInfos { + subInfoMap[si.Id] = si + } + + // 按优先级获取每个用户订阅的 node_group_id + var allNodeGroupIds []int64 + for _, us := range userSubscribes { + nodeGroupId := int64(0) + + // 优先级1: user_subscribe.node_group_id + if us.NodeGroupId != 0 { + nodeGroupId = us.NodeGroupId + logger.Debugf("[PreviewUserNodes] user_subscribe_id=%d using node_group_id=%d", us.Id, nodeGroupId) + } else { + // 优先级2: subscribe.node_group_id + subInfo, ok := subInfoMap[us.SubscribeId] + if ok { + if subInfo.NodeGroupId != 0 { + nodeGroupId = subInfo.NodeGroupId + logger.Debugf("[PreviewUserNodes] user_subscribe_id=%d using subscribe.node_group_id=%d", us.Id, nodeGroupId) + } else if subInfo.NodeGroupIds != "" && subInfo.NodeGroupIds != "null" && subInfo.NodeGroupIds != "[]" { + // 优先级3: subscribe.node_group_ids[0] + var nodeGroupIds []int64 + if err := json.Unmarshal([]byte(subInfo.NodeGroupIds), &nodeGroupIds); err == nil && len(nodeGroupIds) > 0 { + nodeGroupId = nodeGroupIds[0] + logger.Debugf("[PreviewUserNodes] user_subscribe_id=%d using subscribe.node_group_ids[0]=%d", us.Id, nodeGroupId) + } + } + } + } + + if nodeGroupId != 0 { + allNodeGroupIds = append(allNodeGroupIds, nodeGroupId) + } + } + + // 去重 + allNodeGroupIds = removeDuplicateInt64(allNodeGroupIds) + + logger.Infof("[PreviewUserNodes] collected node_group_ids with priority: %v", allNodeGroupIds) + + // 3. 收集所有订阅中直接分配的节点ID + var allDirectNodeIds []int64 + for _, subInfo := range subscribeInfos { + if subInfo.Nodes != "" && subInfo.Nodes != "null" { + // nodes 是逗号分隔的字符串,如 "1,2,3" + nodeIdStrs := strings.Split(subInfo.Nodes, ",") + for _, idStr := range nodeIdStrs { + idStr = strings.TrimSpace(idStr) + if idStr != "" { + var nodeId int64 + if _, err := fmt.Sscanf(idStr, "%d", &nodeId); err == nil { + allDirectNodeIds = append(allDirectNodeIds, nodeId) + } + } + } + logger.Debugf("[PreviewUserNodes] subscribe_id=%d has direct nodes: %s", subInfo.Id, subInfo.Nodes) + } + } + // 去重 + allDirectNodeIds = removeDuplicateInt64(allDirectNodeIds) + logger.Infof("[PreviewUserNodes] collected direct node_ids: %v", allDirectNodeIds) + + // 4. 判断分组功能是否启用 + type SystemConfig struct { + Value string + } + var config SystemConfig + l.svcCtx.DB.Model(&struct { + Category string `gorm:"column:category"` + Key string `gorm:"column:key"` + Value string `gorm:"column:value"` + }{}). + Table("system"). + Where("`category` = ? AND `key` = ?", "group", "enabled"). + Select("value"). + Scan(&config) + + logger.Infof("[PreviewUserNodes] groupEnabled: %v", config.Value) + + isGroupEnabled := config.Value == "true" || config.Value == "1" + + var filteredNodes []node.Node + + if isGroupEnabled { + // === 启用分组功能:通过用户订阅的 node_group_id 查询节点 === + logger.Infof("[PreviewUserNodes] using group-based node filtering") + + if len(allNodeGroupIds) == 0 && len(allDirectNodeIds) == 0 { + logger.Infof("[PreviewUserNodes] no node groups and no direct nodes found in user subscribes") + resp = &types.PreviewUserNodesResponse{ + UserId: req.UserId, + NodeGroups: []types.NodeGroupItem{}, + } + return resp, nil + } + + // 5. 查询所有启用的节点(只有当有节点组时才查询) + if len(allNodeGroupIds) > 0 { + var dbNodes []node.Node + err = l.svcCtx.DB.Model(&node.Node{}). + Where("enabled = ?", true). + Find(&dbNodes).Error + if err != nil { + logger.Errorf("[PreviewUserNodes] failed to get nodes: %v", err) + return nil, err + } + + // 6. 过滤出包含至少一个匹配节点组的节点(仅显示用户真正所在分组的节点,不包含公共节点) + for _, n := range dbNodes { + // 节点未配置节点组(公共节点),预览时不显示 + if len(n.NodeGroupIds) == 0 { + continue + } + + // 检查节点的 node_group_ids 是否与订阅的 node_group_id 有交集 + for _, nodeGroupId := range n.NodeGroupIds { + if tool.Contains(allNodeGroupIds, nodeGroupId) { + filteredNodes = append(filteredNodes, n) + break + } + } + } + + logger.Infof("[PreviewUserNodes] found %v nodes using group filter", len(filteredNodes)) + } + + } else { + // === 未启用分组功能:通过订阅的 node_tags 查询节点 === + logger.Infof("[PreviewUserNodes] using tag-based node filtering") + + // 从已查询的 subscribeInfos 中获取 node_tags + var allTags []string + for _, subInfo := range subscribeInfos { + if subInfo.NodeTags != "" { + tags := strings.Split(subInfo.NodeTags, ",") + allTags = append(allTags, tags...) + } + } + // 去重 + allTags = tool.RemoveDuplicateElements(allTags...) + // 去除空字符串 + allTags = tool.RemoveStringElement(allTags, "") + + logger.Infof("[PreviewUserNodes] merged tags from subscribes: %v", allTags) + + if len(allTags) == 0 && len(allDirectNodeIds) == 0 { + logger.Infof("[PreviewUserNodes] no tags and no direct nodes found in subscribes") + resp = &types.PreviewUserNodesResponse{ + UserId: req.UserId, + NodeGroups: []types.NodeGroupItem{}, + } + return resp, nil + } + + // 8. 查询所有启用的节点(只有当有 tags 时才查询) + if len(allTags) > 0 { + var dbNodes []node.Node + err = l.svcCtx.DB.Model(&node.Node{}). + Where("enabled = ?", true). + Find(&dbNodes).Error + if err != nil { + logger.Errorf("[PreviewUserNodes] failed to get nodes: %v", err) + return nil, err + } + + // 9. 过滤出包含至少一个匹配标签的节点 + for _, n := range dbNodes { + if n.Tags == "" { + continue + } + nodeTags := strings.Split(n.Tags, ",") + // 检查是否有交集 + for _, tag := range nodeTags { + if tag != "" && tool.Contains(allTags, tag) { + filteredNodes = append(filteredNodes, n) + break + } + } + } + + logger.Infof("[PreviewUserNodes] found %v nodes using tag filter", len(filteredNodes)) + } + } + + // 10. 根据是否启用分组功能,选择不同的分组方式 + nodeGroupItems := make([]types.NodeGroupItem, 0) + + if isGroupEnabled { + // === 启用分组:按节点组分组 === + // 转换为 types.Node 并按节点组分组 + type NodeWithGroup struct { + Node node.Node + NodeGroupIds []int64 + } + + nodesWithGroup := make([]NodeWithGroup, 0, len(filteredNodes)) + for _, n := range filteredNodes { + nodesWithGroup = append(nodesWithGroup, NodeWithGroup{ + Node: n, + NodeGroupIds: n.NodeGroupIds, + }) + } + + // 按节点组分组节点 + type NodeGroupMap struct { + Id int64 + Nodes []types.Node + } + + // 创建节点组映射:group_id -> nodes + groupMap := make(map[int64]*NodeGroupMap) + + // 获取所有涉及的节点组ID + allGroupIds := make([]int64, 0) + for _, ng := range nodesWithGroup { + if len(ng.NodeGroupIds) > 0 { + // 如果节点属于节点组,按第一个节点组分组 + firstGroupId := ng.NodeGroupIds[0] + if _, exists := groupMap[firstGroupId]; !exists { + groupMap[firstGroupId] = &NodeGroupMap{ + Id: firstGroupId, + Nodes: []types.Node{}, + } + allGroupIds = append(allGroupIds, firstGroupId) + } + + // 转换节点 + tags := []string{} + if ng.Node.Tags != "" { + tags = strings.Split(ng.Node.Tags, ",") + } + node := types.Node{ + Id: ng.Node.Id, + Name: ng.Node.Name, + Tags: tags, + Port: ng.Node.Port, + Address: ng.Node.Address, + ServerId: ng.Node.ServerId, + Protocol: ng.Node.Protocol, + Enabled: ng.Node.Enabled, + Sort: ng.Node.Sort, + NodeGroupIds: []int64(ng.Node.NodeGroupIds), + CreatedAt: ng.Node.CreatedAt.Unix(), + UpdatedAt: ng.Node.UpdatedAt.Unix(), + } + + groupMap[firstGroupId].Nodes = append(groupMap[firstGroupId].Nodes, node) + } else { + // 没有节点组的节点,使用 group_id = 0 作为"无节点组"分组 + if _, exists := groupMap[0]; !exists { + groupMap[0] = &NodeGroupMap{ + Id: 0, + Nodes: []types.Node{}, + } + } + + tags := []string{} + if ng.Node.Tags != "" { + tags = strings.Split(ng.Node.Tags, ",") + } + node := types.Node{ + Id: ng.Node.Id, + Name: ng.Node.Name, + Tags: tags, + Port: ng.Node.Port, + Address: ng.Node.Address, + ServerId: ng.Node.ServerId, + Protocol: ng.Node.Protocol, + Enabled: ng.Node.Enabled, + Sort: ng.Node.Sort, + NodeGroupIds: []int64(ng.Node.NodeGroupIds), + CreatedAt: ng.Node.CreatedAt.Unix(), + UpdatedAt: ng.Node.UpdatedAt.Unix(), + } + + groupMap[0].Nodes = append(groupMap[0].Nodes, node) + } + } + + // 查询节点组信息并构建响应 + nodeGroupInfoMap := make(map[int64]string) + validGroupIds := make([]int64, 0) + + if len(allGroupIds) > 0 { + type NodeGroupInfo struct { + Id int64 + Name string + } + var nodeGroupInfos []NodeGroupInfo + err = l.svcCtx.DB.Model(&group.NodeGroup{}). + Select("id, name"). + Where("id IN ?", allGroupIds). + Find(&nodeGroupInfos).Error + if err != nil { + logger.Errorf("[PreviewUserNodes] failed to get node group infos: %v", err) + return nil, err + } + + logger.Infof("[PreviewUserNodes] found %v node group infos from %v requested", len(nodeGroupInfos), len(allGroupIds)) + + // 创建节点组信息映射和有效节点组ID列表 + for _, ngInfo := range nodeGroupInfos { + nodeGroupInfoMap[ngInfo.Id] = ngInfo.Name + validGroupIds = append(validGroupIds, ngInfo.Id) + logger.Debugf("[PreviewUserNodes] node_group[%d] = %s", ngInfo.Id, ngInfo.Name) + } + + // 记录无效的节点组ID + for _, requestedId := range allGroupIds { + found := false + for _, validId := range validGroupIds { + if requestedId == validId { + found = true + break + } + } + if !found { + logger.Infof("[PreviewUserNodes] node_group_id %d not found in database, treating as public nodes", requestedId) + } + } + } + + // 构建响应:根据有效节点组ID重新分组节点 + publicNodes := make([]types.Node, 0) + + // 遍历所有分组,重新分类节点 + for groupId, gm := range groupMap { + if groupId == 0 { + // 本来就是无节点组的节点 + publicNodes = append(publicNodes, gm.Nodes...) + continue + } + + // 检查这个节点组ID是否有效 + isValid := false + for _, validId := range validGroupIds { + if groupId == validId { + isValid = true + break + } + } + + if isValid { + // 节点组有效,添加到对应的分组 + groupName := nodeGroupInfoMap[groupId] + if groupName == "" { + groupName = fmt.Sprintf("Group %d", groupId) + } + nodeGroupItems = append(nodeGroupItems, types.NodeGroupItem{ + Id: groupId, + Name: groupName, + Nodes: gm.Nodes, + }) + logger.Infof("[PreviewUserNodes] adding node group: id=%d, name=%s, nodes=%d", groupId, groupName, len(gm.Nodes)) + } else { + // 节点组无效,节点归入公共节点组 + logger.Infof("[PreviewUserNodes] node_group_id %d invalid, moving %d nodes to public group", groupId, len(gm.Nodes)) + publicNodes = append(publicNodes, gm.Nodes...) + } + } + + // 预览模式不显示公共节点(node_group_ids 为空的节点),只展示用户真正所在分组的节点 + if len(publicNodes) > 0 { + logger.Infof("[PreviewUserNodes] skipping %d public nodes (not in user's assigned group)", len(publicNodes)) + } + + } else { + // === 未启用分组:按 tag 分组 === + // 按 tag 分组节点 + tagGroupMap := make(map[string][]types.Node) + + for _, n := range filteredNodes { + tags := []string{} + if n.Tags != "" { + tags = strings.Split(n.Tags, ",") + } + + // 转换节点 + node := types.Node{ + Id: n.Id, + Name: n.Name, + Tags: tags, + Port: n.Port, + Address: n.Address, + ServerId: n.ServerId, + Protocol: n.Protocol, + Enabled: n.Enabled, + Sort: n.Sort, + NodeGroupIds: []int64(n.NodeGroupIds), + CreatedAt: n.CreatedAt.Unix(), + UpdatedAt: n.UpdatedAt.Unix(), + } + + // 将节点添加到每个匹配的 tag 分组中 + if len(tags) > 0 { + for _, tag := range tags { + tag = strings.TrimSpace(tag) + if tag != "" { + tagGroupMap[tag] = append(tagGroupMap[tag], node) + } + } + } else { + // 没有 tag 的节点放入特殊分组 + tagGroupMap[""] = append(tagGroupMap[""], node) + } + } + + // 构建响应:按 tag 分组 + for tag, nodes := range tagGroupMap { + nodeGroupItems = append(nodeGroupItems, types.NodeGroupItem{ + Id: 0, // tag 分组使用 ID 0 + Name: tag, + Nodes: nodes, + }) + logger.Infof("[PreviewUserNodes] adding tag group: tag=%s, nodes=%d", tag, len(nodes)) + } + } + + // 添加套餐节点组(直接分配的节点) + if len(allDirectNodeIds) > 0 { + // 查询直接分配的节点详情 + var directNodes []node.Node + err = l.svcCtx.DB.Model(&node.Node{}). + Where("id IN ? AND enabled = ?", allDirectNodeIds, true). + Find(&directNodes).Error + if err != nil { + logger.Errorf("[PreviewUserNodes] failed to get direct nodes: %v", err) + return nil, err + } + + if len(directNodes) > 0 { + // 转换为 types.Node + directNodeItems := make([]types.Node, 0, len(directNodes)) + for _, n := range directNodes { + tags := []string{} + if n.Tags != "" { + tags = strings.Split(n.Tags, ",") + } + directNodeItems = append(directNodeItems, types.Node{ + Id: n.Id, + Name: n.Name, + Tags: tags, + Port: n.Port, + Address: n.Address, + ServerId: n.ServerId, + Protocol: n.Protocol, + Enabled: n.Enabled, + Sort: n.Sort, + NodeGroupIds: []int64(n.NodeGroupIds), + CreatedAt: n.CreatedAt.Unix(), + UpdatedAt: n.UpdatedAt.Unix(), + }) + } + + // 添加套餐节点组(使用特殊ID -1,Name 为空字符串,前端根据 ID -1 进行国际化) + nodeGroupItems = append(nodeGroupItems, types.NodeGroupItem{ + Id: -1, + Name: "", // 空字符串,前端根据 ID -1 识别并国际化 + Nodes: directNodeItems, + }) + logger.Infof("[PreviewUserNodes] adding subscription nodes group: nodes=%d", len(directNodeItems)) + } + } + + // 14. 返回结果 + resp = &types.PreviewUserNodesResponse{ + UserId: req.UserId, + NodeGroups: nodeGroupItems, + } + + logger.Infof("[PreviewUserNodes] returning %v node groups for user %v", len(resp.NodeGroups), req.UserId) + return resp, nil +} + +// removeDuplicateInt64 去重 []int64 +func removeDuplicateInt64(slice []int64) []int64 { + keys := make(map[int64]bool) + var list []int64 + for _, entry := range slice { + if !keys[entry] { + keys[entry] = true + list = append(list, entry) + } + } + return list +} diff --git a/internal/logic/admin/group/recalculateGroupLogic.go b/internal/logic/admin/group/recalculateGroupLogic.go new file mode 100644 index 0000000..9b485b9 --- /dev/null +++ b/internal/logic/admin/group/recalculateGroupLogic.go @@ -0,0 +1,818 @@ +package group + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/node" + "github.com/perfect-panel/server/internal/model/subscribe" + "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + "github.com/pkg/errors" + "gorm.io/gorm" +) + +type RecalculateGroupLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Recalculate group +func NewRecalculateGroupLogic(ctx context.Context, svcCtx *svc.ServiceContext) *RecalculateGroupLogic { + return &RecalculateGroupLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *RecalculateGroupLogic) RecalculateGroup(req *types.RecalculateGroupRequest) error { + // 验证 mode 参数 + if req.Mode != "average" && req.Mode != "subscribe" && req.Mode != "traffic" { + return errors.New("invalid mode, must be one of: average, subscribe, traffic") + } + + // 创建 GroupHistory 记录(state=pending) + triggerType := req.TriggerType + if triggerType == "" { + triggerType = "manual" // 默认为手动触发 + } + + history := &group.GroupHistory{ + GroupMode: req.Mode, + TriggerType: triggerType, + TotalUsers: 0, + SuccessCount: 0, + FailedCount: 0, + } + now := time.Now() + history.StartTime = &now + + // 使用 GORM Transaction 执行分组重算 + err := l.svcCtx.DB.Transaction(func(tx *gorm.DB) error { + // 创建历史记录 + if err := tx.Create(history).Error; err != nil { + l.Errorw("failed to create group history", logger.Field("error", err.Error())) + return err + } + + // 更新状态为 running + if err := tx.Model(history).Update("state", "running").Error; err != nil { + l.Errorw("failed to update history state to running", logger.Field("error", err.Error())) + return err + } + + // 根据 mode 执行不同的分组算法 + var affectedCount int + var err error + + switch req.Mode { + case "average": + affectedCount, err = l.executeAverageGrouping(tx, history.Id) + if err != nil { + l.Errorw("failed to execute average grouping", logger.Field("error", err.Error())) + return err + } + case "subscribe": + affectedCount, err = l.executeSubscribeGrouping(tx, history.Id) + if err != nil { + l.Errorw("failed to execute subscribe grouping", logger.Field("error", err.Error())) + return err + } + case "traffic": + affectedCount, err = l.executeTrafficGrouping(tx, history.Id) + if err != nil { + l.Errorw("failed to execute traffic grouping", logger.Field("error", err.Error())) + return err + } + } + + // 更新 GroupHistory 记录(state=completed, 统计成功/失败数) + endTime := time.Now() + updates := map[string]interface{}{ + "state": "completed", + "total_users": affectedCount, + "success_count": affectedCount, // 暂时假设所有都成功 + "failed_count": 0, + "end_time": endTime, + } + + if err := tx.Model(history).Updates(updates).Error; err != nil { + l.Errorw("failed to update history state to completed", logger.Field("error", err.Error())) + return err + } + + l.Infof("group recalculation completed: mode=%s, affected_users=%d", req.Mode, affectedCount) + return nil + }) + + if err != nil { + // 如果失败,更新历史记录状态为 failed + updateErr := l.svcCtx.DB.Model(history).Updates(map[string]interface{}{ + "state": "failed", + "error_message": err.Error(), + "end_time": time.Now(), + }).Error + if updateErr != nil { + l.Errorw("failed to update history state to failed", logger.Field("error", updateErr.Error())) + } + return err + } + + return nil +} + +// getUserEmail 查询用户的邮箱 +func (l *RecalculateGroupLogic) getUserEmail(tx *gorm.DB, userId int64) string { + type UserAuthMethod struct { + AuthIdentifier string `json:"auth_identifier"` + } + + var authMethod UserAuthMethod + if err := tx.Model(&user.AuthMethods{}). + Select("auth_identifier"). + Where("user_id = ? AND (auth_type = ? OR auth_type = ?)", userId, "email", "6"). + First(&authMethod).Error; err != nil { + return "" + } + + return authMethod.AuthIdentifier +} + +// executeAverageGrouping 实现平均分组算法(随机分配节点组到用户订阅) +// 新逻辑:获取所有有效用户订阅,从订阅的节点组ID中随机选择一个,设置到用户订阅的 node_group_id 字段 +func (l *RecalculateGroupLogic) executeAverageGrouping(tx *gorm.DB, historyId int64) (int, error) { + // 1. 查询所有有效且未锁定的用户订阅(status IN (0, 1)) + type UserSubscribeInfo struct { + Id int64 `json:"id"` + UserId int64 `json:"user_id"` + SubscribeId int64 `json:"subscribe_id"` + } + + var userSubscribes []UserSubscribeInfo + if err := tx.Model(&user.Subscribe{}). + Select("id, user_id, subscribe_id"). + Where("group_locked = ? AND status IN (0, 1)", 0). // 只查询未锁定且有效的用户订阅 + Scan(&userSubscribes).Error; err != nil { + return 0, err + } + + if len(userSubscribes) == 0 { + l.Infof("average grouping: no valid and unlocked user subscribes found") + return 0, nil + } + + l.Infof("average grouping: found %d valid and unlocked user subscribes", len(userSubscribes)) + + // 1.5 查询所有参与计算的节点组ID + var calculationNodeGroups []group.NodeGroup + if err := tx.Model(&group.NodeGroup{}). + Select("id"). + Where("for_calculation = ?", true). + Scan(&calculationNodeGroups).Error; err != nil { + l.Errorw("failed to query calculation node groups", logger.Field("error", err.Error())) + return 0, err + } + + // 创建参与计算的节点组ID集合(用于快速查找) + calculationNodeGroupIds := make(map[int64]bool) + for _, ng := range calculationNodeGroups { + calculationNodeGroupIds[ng.Id] = true + } + + l.Infof("average grouping: found %d node groups with for_calculation=true", len(calculationNodeGroupIds)) + + // 2. 批量查询订阅的节点组ID信息 + subscribeIds := make([]int64, len(userSubscribes)) + for i, us := range userSubscribes { + subscribeIds[i] = us.SubscribeId + } + + type SubscribeInfo struct { + Id int64 `json:"id"` + NodeGroupIds string `json:"node_group_ids"` // JSON string + } + var subscribeInfos []SubscribeInfo + if err := tx.Model(&subscribe.Subscribe{}). + Select("id, node_group_ids"). + Where("id IN ?", subscribeIds). + Find(&subscribeInfos).Error; err != nil { + l.Errorw("failed to query subscribe infos", logger.Field("error", err.Error())) + return 0, err + } + + // 创建 subscribe_id -> SubscribeInfo 的映射 + subInfoMap := make(map[int64]SubscribeInfo) + for _, si := range subscribeInfos { + subInfoMap[si.Id] = si + } + + // 用于存储统计信息(按节点组ID统计用户数) + groupUsersMap := make(map[int64][]struct { + Id int64 `json:"id"` + Email string `json:"email"` + }) + nodeGroupUserCount := make(map[int64]int) // node_group_id -> user_count + nodeGroupNodeCount := make(map[int64]int) // node_group_id -> node_count + + // 3. 遍历所有用户订阅,按序平均分配节点组 + affectedCount := 0 + failedCount := 0 + + // 为每个订阅维护一个分配索引,用于按序循环分配 + subscribeAllocationIndex := make(map[int64]int) // subscribe_id -> current_index + + for _, us := range userSubscribes { + subInfo, ok := subInfoMap[us.SubscribeId] + if !ok { + l.Infow("subscribe not found", + logger.Field("user_subscribe_id", us.Id), + logger.Field("subscribe_id", us.SubscribeId)) + failedCount++ + continue + } + + // 解析订阅的节点组ID列表,并过滤出参与计算的节点组 + var nodeGroupIds []int64 + if subInfo.NodeGroupIds != "" && subInfo.NodeGroupIds != "[]" { + var allNodeGroupIds []int64 + if err := json.Unmarshal([]byte(subInfo.NodeGroupIds), &allNodeGroupIds); err != nil { + l.Errorw("failed to parse node_group_ids", + logger.Field("subscribe_id", subInfo.Id), + logger.Field("node_group_ids", subInfo.NodeGroupIds), + logger.Field("error", err.Error())) + failedCount++ + continue + } + + // 只保留参与计算的节点组 + for _, ngId := range allNodeGroupIds { + if calculationNodeGroupIds[ngId] { + nodeGroupIds = append(nodeGroupIds, ngId) + } + } + + if len(nodeGroupIds) == 0 && len(allNodeGroupIds) > 0 { + l.Debugw("all node_group_ids are not for calculation, setting to 0", + logger.Field("subscribe_id", subInfo.Id), + logger.Field("total_node_groups", len(allNodeGroupIds))) + } + } + + // 如果没有节点组ID,跳过 + if len(nodeGroupIds) == 0 { + l.Debugf("no valid node_group_ids for subscribe_id=%d, setting to 0", subInfo.Id) + if err := tx.Model(&user.Subscribe{}). + Where("id = ?", us.Id). + Update("node_group_id", 0).Error; err != nil { + l.Errorw("failed to update user_subscribe node_group_id", + logger.Field("user_subscribe_id", us.Id), + logger.Field("error", err.Error())) + failedCount++ + continue + } + } + + // 按序选择节点组ID(循环轮询分配) + selectedNodeGroupId := int64(0) + if len(nodeGroupIds) > 0 { + // 获取当前订阅的分配索引 + currentIndex := subscribeAllocationIndex[us.SubscribeId] + // 选择当前索引对应的节点组 + selectedNodeGroupId = nodeGroupIds[currentIndex] + // 更新索引,循环使用(轮询) + subscribeAllocationIndex[us.SubscribeId] = (currentIndex + 1) % len(nodeGroupIds) + + l.Debugf("assigning user_subscribe_id=%d (subscribe_id=%d) to node_group_id=%d (index=%d, total_options=%d, mode=sequential)", + us.Id, us.SubscribeId, selectedNodeGroupId, currentIndex, len(nodeGroupIds)) + } + + // 更新 user_subscribe 的 node_group_id 字段(单个ID) + if err := tx.Model(&user.Subscribe{}). + Where("id = ?", us.Id). + Update("node_group_id", selectedNodeGroupId).Error; err != nil { + l.Errorw("failed to update user_subscribe node_group_id", + logger.Field("user_subscribe_id", us.Id), + logger.Field("error", err.Error())) + failedCount++ + continue + } + + // 只统计有节点组的用户 + if selectedNodeGroupId > 0 { + // 查询用户邮箱,用于保存到历史记录 + email := l.getUserEmail(tx, us.UserId) + groupUsersMap[selectedNodeGroupId] = append(groupUsersMap[selectedNodeGroupId], struct { + Id int64 `json:"id"` + Email string `json:"email"` + }{ + Id: us.UserId, + Email: email, + }) + nodeGroupUserCount[selectedNodeGroupId]++ + } + + affectedCount++ + } + + l.Infof("average grouping completed: affected=%d, failed=%d", affectedCount, failedCount) + + // 4. 创建分组历史详情记录(按节点组ID统计) + for nodeGroupId, users := range groupUsersMap { + userCount := len(users) + if userCount == 0 { + continue + } + + // 统计该节点组的节点数 + var nodeCount int64 = 0 + if nodeGroupId > 0 { + if err := tx.Model(&node.Node{}). + Where("JSON_CONTAINS(node_group_ids, ?)", fmt.Sprintf("[%d]", nodeGroupId)). + Count(&nodeCount).Error; err != nil { + l.Errorw("failed to count nodes", + logger.Field("node_group_id", nodeGroupId), + logger.Field("error", err.Error())) + } + } + nodeGroupNodeCount[nodeGroupId] = int(nodeCount) + + // 序列化用户信息为 JSON + userDataJSON := "[]" + if jsonData, err := json.Marshal(users); err == nil { + userDataJSON = string(jsonData) + } else { + l.Errorw("failed to marshal user data", + logger.Field("node_group_id", nodeGroupId), + logger.Field("error", err.Error())) + } + + // 创建历史详情(使用 node_group_id 作为分组标识) + detail := &group.GroupHistoryDetail{ + HistoryId: historyId, + NodeGroupId: nodeGroupId, + UserCount: userCount, + NodeCount: int(nodeCount), + UserData: userDataJSON, + } + + if err := tx.Create(detail).Error; err != nil { + l.Errorw("failed to create group history detail", + logger.Field("node_group_id", nodeGroupId), + logger.Field("error", err.Error())) + } + + l.Infof("Average Group (node_group_id=%d): users=%d, nodes=%d", + nodeGroupId, userCount, nodeCount) + } + + return affectedCount, nil +} + +// executeSubscribeGrouping 实现基于订阅套餐的分组算法 +// 逻辑:查询有效订阅 → 获取订阅的 node_group_ids → 取第一个 node_group_id(如果有) → 更新 user_subscribe.node_group_id +// 订阅过期的用户 → 设置 node_group_id 为 0 +func (l *RecalculateGroupLogic) executeSubscribeGrouping(tx *gorm.DB, historyId int64) (int, error) { + // 1. 查询所有有效且未锁定的用户订阅(status IN (0, 1), group_locked = 0) + type UserSubscribeInfo struct { + Id int64 `json:"id"` + UserId int64 `json:"user_id"` + SubscribeId int64 `json:"subscribe_id"` + } + + var userSubscribes []UserSubscribeInfo + if err := tx.Model(&user.Subscribe{}). + Select("id, user_id, subscribe_id"). + Where("group_locked = ? AND status IN (0, 1)", 0). + Scan(&userSubscribes).Error; err != nil { + l.Errorw("failed to query user subscribes", logger.Field("error", err.Error())) + return 0, err + } + + if len(userSubscribes) == 0 { + l.Infof("subscribe grouping: no valid and unlocked user subscribes found") + return 0, nil + } + + l.Infof("subscribe grouping: found %d valid and unlocked user subscribes", len(userSubscribes)) + + // 1.5 查询所有参与计算的节点组ID + var calculationNodeGroups []group.NodeGroup + if err := tx.Model(&group.NodeGroup{}). + Select("id"). + Where("for_calculation = ?", true). + Scan(&calculationNodeGroups).Error; err != nil { + l.Errorw("failed to query calculation node groups", logger.Field("error", err.Error())) + return 0, err + } + + // 创建参与计算的节点组ID集合(用于快速查找) + calculationNodeGroupIds := make(map[int64]bool) + for _, ng := range calculationNodeGroups { + calculationNodeGroupIds[ng.Id] = true + } + + l.Infof("subscribe grouping: found %d node groups with for_calculation=true", len(calculationNodeGroupIds)) + + // 2. 批量查询订阅的节点组ID信息 + subscribeIds := make([]int64, len(userSubscribes)) + for i, us := range userSubscribes { + subscribeIds[i] = us.SubscribeId + } + + type SubscribeInfo struct { + Id int64 `json:"id"` + NodeGroupIds string `json:"node_group_ids"` // JSON string + } + var subscribeInfos []SubscribeInfo + if err := tx.Model(&subscribe.Subscribe{}). + Select("id, node_group_ids"). + Where("id IN ?", subscribeIds). + Find(&subscribeInfos).Error; err != nil { + l.Errorw("failed to query subscribe infos", logger.Field("error", err.Error())) + return 0, err + } + + // 创建 subscribe_id -> SubscribeInfo 的映射 + subInfoMap := make(map[int64]SubscribeInfo) + for _, si := range subscribeInfos { + subInfoMap[si.Id] = si + } + + // 用于存储统计信息(按节点组ID统计用户数) + type UserInfo struct { + Id int64 `json:"id"` + Email string `json:"email"` + } + groupUsersMap := make(map[int64][]UserInfo) + nodeGroupUserCount := make(map[int64]int) // node_group_id -> user_count + nodeGroupNodeCount := make(map[int64]int) // node_group_id -> node_count + + // 3. 遍历所有用户订阅,取第一个节点组ID + affectedCount := 0 + failedCount := 0 + + for _, us := range userSubscribes { + subInfo, ok := subInfoMap[us.SubscribeId] + if !ok { + l.Infow("subscribe not found", + logger.Field("user_subscribe_id", us.Id), + logger.Field("subscribe_id", us.SubscribeId)) + failedCount++ + continue + } + + // 解析订阅的节点组ID列表,并过滤出参与计算的节点组 + var nodeGroupIds []int64 + if subInfo.NodeGroupIds != "" && subInfo.NodeGroupIds != "[]" { + var allNodeGroupIds []int64 + if err := json.Unmarshal([]byte(subInfo.NodeGroupIds), &allNodeGroupIds); err != nil { + l.Errorw("failed to parse node_group_ids", + logger.Field("subscribe_id", subInfo.Id), + logger.Field("node_group_ids", subInfo.NodeGroupIds), + logger.Field("error", err.Error())) + failedCount++ + continue + } + + // 只保留参与计算的节点组 + for _, ngId := range allNodeGroupIds { + if calculationNodeGroupIds[ngId] { + nodeGroupIds = append(nodeGroupIds, ngId) + } + } + + if len(nodeGroupIds) == 0 && len(allNodeGroupIds) > 0 { + l.Debugw("all node_group_ids are not for calculation, setting to 0", + logger.Field("subscribe_id", subInfo.Id), + logger.Field("total_node_groups", len(allNodeGroupIds))) + } + } + + // 取第一个参与计算的节点组ID(如果有),否则设置为 0 + selectedNodeGroupId := int64(0) + if len(nodeGroupIds) > 0 { + selectedNodeGroupId = nodeGroupIds[0] + } + + l.Debugf("assigning user_subscribe_id=%d (subscribe_id=%d) to node_group_id=%d (total_options=%d, selected_first)", + us.Id, us.SubscribeId, selectedNodeGroupId, len(nodeGroupIds)) + + // 更新 user_subscribe 的 node_group_id 字段 + if err := tx.Model(&user.Subscribe{}). + Where("id = ?", us.Id). + Update("node_group_id", selectedNodeGroupId).Error; err != nil { + l.Errorw("failed to update user_subscribe node_group_id", + logger.Field("user_subscribe_id", us.Id), + logger.Field("error", err.Error())) + failedCount++ + continue + } + + // 只统计有节点组的用户 + if selectedNodeGroupId > 0 { + // 查询用户邮箱,用于保存到历史记录 + email := l.getUserEmail(tx, us.UserId) + groupUsersMap[selectedNodeGroupId] = append(groupUsersMap[selectedNodeGroupId], UserInfo{ + Id: us.UserId, + Email: email, + }) + nodeGroupUserCount[selectedNodeGroupId]++ + } + + affectedCount++ + } + + l.Infof("subscribe grouping completed: affected=%d, failed=%d", affectedCount, failedCount) + + // 4. 处理订阅过期/失效的用户,设置 node_group_id 为 0 + // 查询所有没有有效订阅且未锁定的用户订阅记录 + var expiredUserSubscribes []struct { + Id int64 `json:"id"` + UserId int64 `json:"user_id"` + } + + if err := tx.Raw(` + SELECT us.id, us.user_id + FROM user_subscribe as us + WHERE us.group_locked = 0 + AND us.status NOT IN (0, 1) + `).Scan(&expiredUserSubscribes).Error; err != nil { + l.Errorw("failed to query expired user subscribes", logger.Field("error", err.Error())) + // 继续处理,不因为过期用户查询失败而影响 + } else { + l.Infof("found %d expired user subscribes for subscribe-based grouping, will set node_group_id to 0", len(expiredUserSubscribes)) + + expiredAffectedCount := 0 + for _, eu := range expiredUserSubscribes { + // 更新 user_subscribe 表的 node_group_id 字段到 0 + if err := tx.Model(&user.Subscribe{}). + Where("id = ?", eu.Id). + Update("node_group_id", 0).Error; err != nil { + l.Errorw("failed to update expired user subscribe node_group_id", + logger.Field("user_subscribe_id", eu.Id), + logger.Field("error", err.Error())) + continue + } + + expiredAffectedCount++ + } + + l.Infof("expired user subscribes grouping completed: affected=%d", expiredAffectedCount) + } + + // 5. 创建分组历史详情记录(按节点组ID统计) + for nodeGroupId, users := range groupUsersMap { + userCount := len(users) + if userCount == 0 { + continue + } + + // 统计该节点组的节点数 + var nodeCount int64 = 0 + if nodeGroupId > 0 { + if err := tx.Model(&node.Node{}). + Where("JSON_CONTAINS(node_group_ids, ?)", fmt.Sprintf("[%d]", nodeGroupId)). + Count(&nodeCount).Error; err != nil { + l.Errorw("failed to count nodes", + logger.Field("node_group_id", nodeGroupId), + logger.Field("error", err.Error())) + } + } + nodeGroupNodeCount[nodeGroupId] = int(nodeCount) + + // 序列化用户信息为 JSON + userDataJSON := "[]" + if jsonData, err := json.Marshal(users); err == nil { + userDataJSON = string(jsonData) + } else { + l.Errorw("failed to marshal user data", + logger.Field("node_group_id", nodeGroupId), + logger.Field("error", err.Error())) + } + + // 创建历史详情 + detail := &group.GroupHistoryDetail{ + HistoryId: historyId, + NodeGroupId: nodeGroupId, + UserCount: userCount, + NodeCount: int(nodeCount), + UserData: userDataJSON, + } + + if err := tx.Create(detail).Error; err != nil { + l.Errorw("failed to create group history detail", + logger.Field("node_group_id", nodeGroupId), + logger.Field("error", err.Error())) + } + + l.Infof("Subscribe Group (node_group_id=%d): users=%d, nodes=%d", + nodeGroupId, userCount, nodeCount) + } + + return affectedCount, nil +} + +// executeTrafficGrouping 实现基于流量的分组算法 +// 逻辑:根据配置的流量范围,将用户分配到对应的用户组 +func (l *RecalculateGroupLogic) executeTrafficGrouping(tx *gorm.DB, historyId int64) (int, error) { + // 用于存储每个节点组的用户信息(id 和 email) + type UserInfo struct { + Id int64 `json:"id"` + Email string `json:"email"` + } + groupUsersMap := make(map[int64][]UserInfo) // node_group_id -> []UserInfo + + // 1. 获取所有设置了流量区间的节点组 + var nodeGroups []group.NodeGroup + if err := tx.Where("for_calculation = ?", true). + Where("max_traffic_gb > 0"). + Find(&nodeGroups).Error; err != nil { + l.Errorw("failed to query node groups", logger.Field("error", err.Error())) + return 0, err + } + + if len(nodeGroups) == 0 { + l.Infow("no node groups with traffic ranges configured") + return 0, nil + } + + l.Infow("executeTrafficGrouping loaded node groups", + logger.Field("node_groups_count", len(nodeGroups))) + + // 2. 查询所有有效且未锁定的用户订阅及其已用流量 + type UserSubscribeInfo struct { + Id int64 + UserId int64 + Upload int64 + Download int64 + UsedTraffic int64 // 已用流量 = upload + download (bytes) + } + + var userSubscribes []UserSubscribeInfo + if err := tx.Model(&user.Subscribe{}). + Select("id, user_id, upload, download, (upload + download) as used_traffic"). + Where("group_locked = ? AND status IN (0, 1)", 0). // 只查询有效且未锁定的用户订阅 + Scan(&userSubscribes).Error; err != nil { + l.Errorw("failed to query user subscribes", logger.Field("error", err.Error())) + return 0, err + } + + if len(userSubscribes) == 0 { + l.Infow("no valid and unlocked user subscribes found") + return 0, nil + } + + l.Infow("found user subscribes for traffic-based grouping", logger.Field("count", len(userSubscribes))) + + // 3. 根据流量范围分配节点组ID到用户订阅 + affectedCount := 0 + groupUserCount := make(map[int64]int) // node_group_id -> user_count + + for _, us := range userSubscribes { + // 将字节转换为 GB + usedTrafficGB := float64(us.UsedTraffic) / (1024 * 1024 * 1024) + + // 查找匹配的流量范围(使用左闭右开区间 [Min, Max)) + var targetNodeGroupId int64 = 0 + for _, ng := range nodeGroups { + if ng.MinTrafficGB == nil || ng.MaxTrafficGB == nil { + continue + } + minTraffic := float64(*ng.MinTrafficGB) + maxTraffic := float64(*ng.MaxTrafficGB) + + // 检查是否在区间内 [min, max) + if usedTrafficGB >= minTraffic && usedTrafficGB < maxTraffic { + targetNodeGroupId = ng.Id + break + } + } + + // 如果没有匹配到任何范围,targetNodeGroupId 保持为 0(不分配节点组) + + // 更新 user_subscribe 的 node_group_id 字段 + if err := tx.Model(&user.Subscribe{}). + Where("id = ?", us.Id). + Update("node_group_id", targetNodeGroupId).Error; err != nil { + l.Errorw("failed to update user subscribe node_group_id", + logger.Field("user_subscribe_id", us.Id), + logger.Field("target_node_group_id", targetNodeGroupId), + logger.Field("error", err.Error())) + continue + } + + // 只有分配了节点组的用户才记录到历史 + if targetNodeGroupId > 0 { + // 查询用户邮箱,用于保存到历史记录 + email := l.getUserEmail(tx, us.UserId) + userInfo := UserInfo{ + Id: us.UserId, + Email: email, + } + groupUsersMap[targetNodeGroupId] = append(groupUsersMap[targetNodeGroupId], userInfo) + groupUserCount[targetNodeGroupId]++ + + l.Debugf("assigned user subscribe %d (traffic: %.2fGB) to node group %d", + us.Id, usedTrafficGB, targetNodeGroupId) + } else { + l.Debugf("user subscribe %d (traffic: %.2fGB) not assigned to any node group", + us.Id, usedTrafficGB) + } + + affectedCount++ + } + + l.Infof("traffic-based grouping completed: affected_subscribes=%d", affectedCount) + + // 4. 创建分组历史详情记录(只统计有用户的节点组) + nodeGroupCount := make(map[int64]int) // node_group_id -> node_count + for _, ng := range nodeGroups { + nodeGroupCount[ng.Id] = 1 // 每个节点组计为1 + } + + for nodeGroupId, userCount := range groupUserCount { + userDataJSON, err := json.Marshal(groupUsersMap[nodeGroupId]) + if err != nil { + l.Errorw("failed to marshal user data", + logger.Field("node_group_id", nodeGroupId), + logger.Field("error", err.Error())) + continue + } + + detail := group.GroupHistoryDetail{ + HistoryId: historyId, + NodeGroupId: nodeGroupId, + UserCount: userCount, + NodeCount: nodeGroupCount[nodeGroupId], + UserData: string(userDataJSON), + } + if err := tx.Create(&detail).Error; err != nil { + l.Errorw("failed to create group history detail", + logger.Field("history_id", historyId), + logger.Field("node_group_id", nodeGroupId), + logger.Field("error", err.Error())) + } + } + + return affectedCount, nil +} + +// containsIgnoreCase checks if a string contains another substring (case-insensitive) +func containsIgnoreCase(s, substr string) bool { + if len(substr) == 0 { + return true + } + if len(s) < len(substr) { + return false + } + + // Simple case-insensitive contains check + sLower := toLower(s) + substrLower := toLower(substr) + + return contains(sLower, substrLower) +} + +// toLower converts a string to lowercase +func toLower(s string) string { + result := make([]rune, len(s)) + for i, r := range s { + if r >= 'A' && r <= 'Z' { + result[i] = r + ('a' - 'A') + } else { + result[i] = r + } + } + return string(result) +} + +// contains checks if a string contains another substring (case-sensitive) +func contains(s, substr string) bool { + return len(s) >= len(substr) && indexOf(s, substr) >= 0 +} + +// indexOf returns the index of the first occurrence of substr in s, or -1 if not found +func indexOf(s, substr string) int { + n := len(substr) + if n == 0 { + return 0 + } + if n > len(s) { + return -1 + } + + // Simple string search + for i := 0; i <= len(s)-n; i++ { + if s[i:i+n] == substr { + return i + } + } + return -1 +} diff --git a/internal/logic/admin/group/resetGroupsLogic.go b/internal/logic/admin/group/resetGroupsLogic.go new file mode 100644 index 0000000..eaaa098 --- /dev/null +++ b/internal/logic/admin/group/resetGroupsLogic.go @@ -0,0 +1,82 @@ +package group + +import ( + "context" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/node" + "github.com/perfect-panel/server/internal/model/subscribe" + "github.com/perfect-panel/server/internal/model/system" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/pkg/logger" +) + +type ResetGroupsLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// NewResetGroupsLogic Reset all groups (delete all node groups and reset related data) +func NewResetGroupsLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ResetGroupsLogic { + return &ResetGroupsLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *ResetGroupsLogic) ResetGroups() error { + // 1. Delete all node groups + err := l.svcCtx.DB.Where("1 = 1").Delete(&group.NodeGroup{}).Error + if err != nil { + l.Errorw("Failed to delete all node groups", logger.Field("error", err.Error())) + return err + } + l.Infow("Successfully deleted all node groups") + + // 2. Clear node_group_ids for all subscribes (products) + err = l.svcCtx.DB.Model(&subscribe.Subscribe{}).Where("1 = 1").Update("node_group_ids", "[]").Error + if err != nil { + l.Errorw("Failed to clear subscribes' node_group_ids", logger.Field("error", err.Error())) + return err + } + l.Infow("Successfully cleared all subscribes' node_group_ids") + + // 3. Clear node_group_ids for all nodes + err = l.svcCtx.DB.Model(&node.Node{}).Where("1 = 1").Update("node_group_ids", "[]").Error + if err != nil { + l.Errorw("Failed to clear nodes' node_group_ids", logger.Field("error", err.Error())) + return err + } + l.Infow("Successfully cleared all nodes' node_group_ids") + + // 4. Clear group history + err = l.svcCtx.DB.Where("1 = 1").Delete(&group.GroupHistory{}).Error + if err != nil { + l.Errorw("Failed to clear group history", logger.Field("error", err.Error())) + // Non-critical error, continue anyway + } else { + l.Infow("Successfully cleared group history") + } + + // 7. Clear group history details + err = l.svcCtx.DB.Where("1 = 1").Delete(&group.GroupHistoryDetail{}).Error + if err != nil { + l.Errorw("Failed to clear group history details", logger.Field("error", err.Error())) + // Non-critical error, continue anyway + } else { + l.Infow("Successfully cleared group history details") + } + + // 5. Delete all group config settings + err = l.svcCtx.DB.Where("`category` = ?", "group").Delete(&system.System{}).Error + if err != nil { + l.Errorw("Failed to delete group config", logger.Field("error", err.Error())) + return err + } + l.Infow("Successfully deleted all group config settings") + + l.Infow("Group reset completed successfully") + return nil +} diff --git a/internal/logic/admin/group/updateGroupConfigLogic.go b/internal/logic/admin/group/updateGroupConfigLogic.go new file mode 100644 index 0000000..0980373 --- /dev/null +++ b/internal/logic/admin/group/updateGroupConfigLogic.go @@ -0,0 +1,188 @@ +package group + +import ( + "context" + "encoding/json" + + "github.com/perfect-panel/server/internal/model/system" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + "github.com/pkg/errors" + "gorm.io/gorm" +) + +type UpdateGroupConfigLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Update group config +func NewUpdateGroupConfigLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateGroupConfigLogic { + return &UpdateGroupConfigLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *UpdateGroupConfigLogic) UpdateGroupConfig(req *types.UpdateGroupConfigRequest) error { + // 验证 mode 是否为合法值 + if req.Mode != "" { + if req.Mode != "average" && req.Mode != "subscribe" && req.Mode != "traffic" { + return errors.New("invalid mode, must be one of: average, subscribe, traffic") + } + } + + // 使用 GORM Transaction 更新配置 + err := l.svcCtx.DB.Transaction(func(tx *gorm.DB) error { + // 更新 enabled 配置(使用 Upsert 逻辑) + enabledValue := "false" + if req.Enabled { + enabledValue = "true" + } + result := tx.Model(&system.System{}). + Where("`category` = 'group' and `key` = ?", "enabled"). + Update("value", enabledValue) + if result.Error != nil { + l.Errorw("failed to update group enabled config", logger.Field("error", result.Error.Error())) + return result.Error + } + // 如果没有更新任何行,说明记录不存在,需要插入 + if result.RowsAffected == 0 { + if err := tx.Create(&system.System{ + Category: "group", + Key: "enabled", + Value: enabledValue, + Desc: "Group Feature Enabled", + }).Error; err != nil { + l.Errorw("failed to create group enabled config", logger.Field("error", err.Error())) + return err + } + } + + // 更新 mode 配置(使用 Upsert 逻辑) + if req.Mode != "" { + result := tx.Model(&system.System{}). + Where("`category` = 'group' and `key` = ?", "mode"). + Update("value", req.Mode) + if result.Error != nil { + l.Errorw("failed to update group mode config", logger.Field("error", result.Error.Error())) + return result.Error + } + // 如果没有更新任何行,说明记录不存在,需要插入 + if result.RowsAffected == 0 { + if err := tx.Create(&system.System{ + Category: "group", + Key: "mode", + Value: req.Mode, + Desc: "Group Mode", + }).Error; err != nil { + l.Errorw("failed to create group mode config", logger.Field("error", err.Error())) + return err + } + } + } + + // 更新 JSON 配置 + if req.Config != nil { + // 更新 average_config + if averageConfig, ok := req.Config["average_config"]; ok { + jsonBytes, err := json.Marshal(averageConfig) + if err != nil { + l.Errorw("failed to marshal average_config", logger.Field("error", err.Error())) + return errors.Wrap(err, "failed to marshal average_config") + } + // 使用 Upsert 逻辑:先尝试 UPDATE,如果不存在则 INSERT + result := tx.Model(&system.System{}). + Where("`category` = 'group' and `key` = ?", "average_config"). + Update("value", string(jsonBytes)) + if result.Error != nil { + l.Errorw("failed to update group average_config", logger.Field("error", result.Error.Error())) + return result.Error + } + // 如果没有更新任何行,说明记录不存在,需要插入 + if result.RowsAffected == 0 { + if err := tx.Create(&system.System{ + Category: "group", + Key: "average_config", + Value: string(jsonBytes), + Desc: "Average Group Config", + }).Error; err != nil { + l.Errorw("failed to create group average_config", logger.Field("error", err.Error())) + return err + } + } + } + + // 更新 subscribe_config + if subscribeConfig, ok := req.Config["subscribe_config"]; ok { + jsonBytes, err := json.Marshal(subscribeConfig) + if err != nil { + l.Errorw("failed to marshal subscribe_config", logger.Field("error", err.Error())) + return errors.Wrap(err, "failed to marshal subscribe_config") + } + // 使用 Upsert 逻辑:先尝试 UPDATE,如果不存在则 INSERT + result := tx.Model(&system.System{}). + Where("`category` = 'group' and `key` = ?", "subscribe_config"). + Update("value", string(jsonBytes)) + if result.Error != nil { + l.Errorw("failed to update group subscribe_config", logger.Field("error", result.Error.Error())) + return result.Error + } + // 如果没有更新任何行,说明记录不存在,需要插入 + if result.RowsAffected == 0 { + if err := tx.Create(&system.System{ + Category: "group", + Key: "subscribe_config", + Value: string(jsonBytes), + Desc: "Subscribe Group Config", + }).Error; err != nil { + l.Errorw("failed to create group subscribe_config", logger.Field("error", err.Error())) + return err + } + } + } + + // 更新 traffic_config + if trafficConfig, ok := req.Config["traffic_config"]; ok { + jsonBytes, err := json.Marshal(trafficConfig) + if err != nil { + l.Errorw("failed to marshal traffic_config", logger.Field("error", err.Error())) + return errors.Wrap(err, "failed to marshal traffic_config") + } + // 使用 Upsert 逻辑:先尝试 UPDATE,如果不存在则 INSERT + result := tx.Model(&system.System{}). + Where("`category` = 'group' and `key` = ?", "traffic_config"). + Update("value", string(jsonBytes)) + if result.Error != nil { + l.Errorw("failed to update group traffic_config", logger.Field("error", result.Error.Error())) + return result.Error + } + // 如果没有更新任何行,说明记录不存在,需要插入 + if result.RowsAffected == 0 { + if err := tx.Create(&system.System{ + Category: "group", + Key: "traffic_config", + Value: string(jsonBytes), + Desc: "Traffic Group Config", + }).Error; err != nil { + l.Errorw("failed to create group traffic_config", logger.Field("error", err.Error())) + return err + } + } + } + } + + return nil + }) + + if err != nil { + l.Errorw("failed to update group config", logger.Field("error", err.Error())) + return err + } + + l.Infof("group config updated successfully: enabled=%v, mode=%s", req.Enabled, req.Mode) + return nil +} diff --git a/internal/logic/admin/group/updateNodeGroupLogic.go b/internal/logic/admin/group/updateNodeGroupLogic.go new file mode 100644 index 0000000..b7d6fa4 --- /dev/null +++ b/internal/logic/admin/group/updateNodeGroupLogic.go @@ -0,0 +1,185 @@ +package group + +import ( + "context" + "errors" + "time" + + "github.com/perfect-panel/server/internal/model/group" + "github.com/perfect-panel/server/internal/model/subscribe" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + "gorm.io/gorm" +) + +type UpdateNodeGroupLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +func NewUpdateNodeGroupLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateNodeGroupLogic { + return &UpdateNodeGroupLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *UpdateNodeGroupLogic) UpdateNodeGroup(req *types.UpdateNodeGroupRequest) error { + // 检查节点组是否存在 + var nodeGroup group.NodeGroup + if err := l.svcCtx.DB.Where("id = ?", req.Id).First(&nodeGroup).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return errors.New("node group not found") + } + logger.Errorf("failed to find node group: %v", err) + return err + } + + // 验证:系统中只能有一个过期节点组 + if req.IsExpiredGroup != nil && *req.IsExpiredGroup { + var count int64 + err := l.svcCtx.DB.Model(&group.NodeGroup{}). + Where("is_expired_group = ? AND id != ?", true, req.Id). + Count(&count).Error + if err != nil { + logger.Errorf("failed to check expired group count: %v", err) + return err + } + if count > 0 { + return errors.New("system already has an expired node group, cannot create multiple") + } + + // 验证:被订阅商品设置为默认节点组的不能设置为过期节点组 + var subscribeCount int64 + err = l.svcCtx.DB.Model(&subscribe.Subscribe{}). + Where("node_group_id = ?", req.Id). + Count(&subscribeCount).Error + if err != nil { + logger.Errorf("failed to check subscribe usage: %v", err) + return err + } + if subscribeCount > 0 { + return errors.New("this node group is used as default node group in subscription products, cannot set as expired group") + } + } + + // 构建更新数据 + updates := map[string]interface{}{ + "updated_at": time.Now(), + } + if req.Name != "" { + updates["name"] = req.Name + } + if req.Description != "" { + updates["description"] = req.Description + } + if req.Sort != 0 { + updates["sort"] = req.Sort + } + if req.ForCalculation != nil { + updates["for_calculation"] = *req.ForCalculation + } + if req.IsExpiredGroup != nil { + updates["is_expired_group"] = *req.IsExpiredGroup + // 过期节点组不参与分组计算 + if *req.IsExpiredGroup { + updates["for_calculation"] = false + } + } + if req.ExpiredDaysLimit != nil { + updates["expired_days_limit"] = *req.ExpiredDaysLimit + } + if req.MaxTrafficGBExpired != nil { + updates["max_traffic_gb_expired"] = *req.MaxTrafficGBExpired + } + if req.SpeedLimit != nil { + updates["speed_limit"] = *req.SpeedLimit + } + + // 获取新的流量区间值 + newMinTraffic := nodeGroup.MinTrafficGB + newMaxTraffic := nodeGroup.MaxTrafficGB + if req.MinTrafficGB != nil { + newMinTraffic = req.MinTrafficGB + updates["min_traffic_gb"] = *req.MinTrafficGB + } + if req.MaxTrafficGB != nil { + newMaxTraffic = req.MaxTrafficGB + updates["max_traffic_gb"] = *req.MaxTrafficGB + } + + // 校验流量区间 + if err := l.validateTrafficRange(int(req.Id), newMinTraffic, newMaxTraffic); err != nil { + return err + } + + // 执行更新 + if err := l.svcCtx.DB.Model(&nodeGroup).Updates(updates).Error; err != nil { + logger.Errorf("failed to update node group: %v", err) + return err + } + + logger.Infof("updated node group: id=%d", req.Id) + return nil +} + +// validateTrafficRange 校验流量区间:不能重叠、不能留空档、最小值不能大于最大值 +func (l *UpdateNodeGroupLogic) validateTrafficRange(currentNodeGroupId int, newMin, newMax *int64) error { + // 处理指针值 + minVal := int64(0) + maxVal := int64(0) + if newMin != nil { + minVal = *newMin + } + if newMax != nil { + maxVal = *newMax + } + + // 检查最小值是否大于最大值 + if minVal > maxVal { + return errors.New("minimum traffic cannot exceed maximum traffic") + } + + // 如果两个值都为0,表示不参与流量分组,不需要校验 + if minVal == 0 && maxVal == 0 { + return nil + } + + // 查询所有其他设置了流量区间的节点组 + var otherGroups []group.NodeGroup + if err := l.svcCtx.DB. + Where("id != ?", currentNodeGroupId). + Where("(min_traffic_gb > 0 OR max_traffic_gb > 0)"). + Find(&otherGroups).Error; err != nil { + logger.Errorf("failed to query other node groups: %v", err) + return err + } + + // 检查是否有重叠 + for _, other := range otherGroups { + otherMin := int64(0) + otherMax := int64(0) + if other.MinTrafficGB != nil { + otherMin = *other.MinTrafficGB + } + if other.MaxTrafficGB != nil { + otherMax = *other.MaxTrafficGB + } + + // 如果对方也没设置区间,跳过 + if otherMin == 0 && otherMax == 0 { + continue + } + + // 检查是否有重叠: 如果两个区间相交,就是重叠 + // 不重叠的条件是: newMax <= otherMin OR newMin >= otherMax + if !(maxVal <= otherMin || minVal >= otherMax) { + return errors.New("traffic range overlaps with another node group") + } + } + + return nil +} diff --git a/internal/logic/admin/server/createNodeLogic.go b/internal/logic/admin/server/createNodeLogic.go index 78ce987..38044de 100644 --- a/internal/logic/admin/server/createNodeLogic.go +++ b/internal/logic/admin/server/createNodeLogic.go @@ -29,13 +29,14 @@ func NewCreateNodeLogic(ctx context.Context, svcCtx *svc.ServiceContext) *Create func (l *CreateNodeLogic) CreateNode(req *types.CreateNodeRequest) error { data := node.Node{ - Name: req.Name, - Tags: tool.StringSliceToString(req.Tags), - Enabled: req.Enabled, - Port: req.Port, - Address: req.Address, - ServerId: req.ServerId, - Protocol: req.Protocol, + Name: req.Name, + Tags: tool.StringSliceToString(req.Tags), + Enabled: req.Enabled, + Port: req.Port, + Address: req.Address, + ServerId: req.ServerId, + Protocol: req.Protocol, + NodeGroupIds: node.JSONInt64Slice(req.NodeGroupIds), } err := l.svcCtx.NodeModel.InsertNode(l.ctx, &data) if err != nil { diff --git a/internal/logic/admin/server/filterNodeListLogic.go b/internal/logic/admin/server/filterNodeListLogic.go index 2e41cec..47f8574 100644 --- a/internal/logic/admin/server/filterNodeListLogic.go +++ b/internal/logic/admin/server/filterNodeListLogic.go @@ -29,10 +29,17 @@ func NewFilterNodeListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *Fi } func (l *FilterNodeListLogic) FilterNodeList(req *types.FilterNodeListRequest) (resp *types.FilterNodeListResponse, err error) { + // Convert NodeGroupId to []int64 for model + var nodeGroupIds []int64 + if req.NodeGroupId != nil { + nodeGroupIds = []int64{*req.NodeGroupId} + } + total, data, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{ - Page: req.Page, - Size: req.Size, - Search: req.Search, + Page: req.Page, + Size: req.Size, + Search: req.Search, + NodeGroupIds: nodeGroupIds, }) if err != nil { @@ -43,17 +50,18 @@ func (l *FilterNodeListLogic) FilterNodeList(req *types.FilterNodeListRequest) ( list := make([]types.Node, 0) for _, datum := range data { list = append(list, types.Node{ - Id: datum.Id, - Name: datum.Name, - Tags: tool.RemoveDuplicateElements(strings.Split(datum.Tags, ",")...), - Port: datum.Port, - Address: datum.Address, - ServerId: datum.ServerId, - Protocol: datum.Protocol, - Enabled: datum.Enabled, - Sort: datum.Sort, - CreatedAt: datum.CreatedAt.UnixMilli(), - UpdatedAt: datum.UpdatedAt.UnixMilli(), + Id: datum.Id, + Name: datum.Name, + Tags: tool.RemoveDuplicateElements(strings.Split(datum.Tags, ",")...), + Port: datum.Port, + Address: datum.Address, + ServerId: datum.ServerId, + Protocol: datum.Protocol, + Enabled: datum.Enabled, + Sort: datum.Sort, + NodeGroupIds: []int64(datum.NodeGroupIds), + CreatedAt: datum.CreatedAt.UnixMilli(), + UpdatedAt: datum.UpdatedAt.UnixMilli(), }) } diff --git a/internal/logic/admin/server/updateNodeLogic.go b/internal/logic/admin/server/updateNodeLogic.go index 2af8a4d..3b0c291 100644 --- a/internal/logic/admin/server/updateNodeLogic.go +++ b/internal/logic/admin/server/updateNodeLogic.go @@ -40,6 +40,7 @@ func (l *UpdateNodeLogic) UpdateNode(req *types.UpdateNodeRequest) error { data.Address = req.Address data.Protocol = req.Protocol data.Enabled = req.Enabled + data.NodeGroupIds = node.JSONInt64Slice(req.NodeGroupIds) err = l.svcCtx.NodeModel.UpdateNode(l.ctx, data) if err != nil { l.Errorw("[UpdateNode] Update Database Error: ", logger.Field("error", err.Error())) diff --git a/internal/logic/admin/subscribe/createSubscribeLogic.go b/internal/logic/admin/subscribe/createSubscribeLogic.go index 411b026..290da21 100644 --- a/internal/logic/admin/subscribe/createSubscribeLogic.go +++ b/internal/logic/admin/subscribe/createSubscribeLogic.go @@ -34,6 +34,12 @@ func (l *CreateSubscribeLogic) CreateSubscribe(req *types.CreateSubscribeRequest val, _ := json.Marshal(req.Discount) discount = string(val) } + + trafficLimit := "" + if len(req.TrafficLimit) > 0 { + val, _ := json.Marshal(req.TrafficLimit) + trafficLimit = string(val) + } sub := &subscribe.Subscribe{ Id: 0, Name: req.Name, @@ -51,6 +57,9 @@ func (l *CreateSubscribeLogic) CreateSubscribe(req *types.CreateSubscribeRequest NewUserOnly: req.NewUserOnly, Nodes: tool.Int64SliceToString(req.Nodes), NodeTags: tool.StringSliceToString(req.NodeTags), + NodeGroupIds: subscribe.JSONInt64Slice(req.NodeGroupIds), + NodeGroupId: req.NodeGroupId, + TrafficLimit: trafficLimit, Show: req.Show, Sell: req.Sell, Sort: 0, diff --git a/internal/logic/admin/subscribe/getSubscribeDetailsLogic.go b/internal/logic/admin/subscribe/getSubscribeDetailsLogic.go index 6defdf1..fc29938 100644 --- a/internal/logic/admin/subscribe/getSubscribeDetailsLogic.go +++ b/internal/logic/admin/subscribe/getSubscribeDetailsLogic.go @@ -42,6 +42,12 @@ func (l *GetSubscribeDetailsLogic) GetSubscribeDetails(req *types.GetSubscribeDe l.Logger.Error("[GetSubscribeDetailsLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("discount", sub.Discount)) } } + if sub.TrafficLimit != "" { + err = json.Unmarshal([]byte(sub.TrafficLimit), &resp.TrafficLimit) + if err != nil { + l.Logger.Error("[GetSubscribeDetailsLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("traffic_limit", sub.TrafficLimit)) + } + } resp.Nodes = tool.StringToInt64Slice(sub.Nodes) resp.NodeTags = strings.Split(sub.NodeTags, ",") return resp, nil diff --git a/internal/logic/admin/subscribe/getSubscribeListLogic.go b/internal/logic/admin/subscribe/getSubscribeListLogic.go index e8c7866..6cf6ba6 100644 --- a/internal/logic/admin/subscribe/getSubscribeListLogic.go +++ b/internal/logic/admin/subscribe/getSubscribeListLogic.go @@ -30,12 +30,20 @@ func NewGetSubscribeListLogic(ctx context.Context, svcCtx *svc.ServiceContext) * } func (l *GetSubscribeListLogic) GetSubscribeList(req *types.GetSubscribeListRequest) (resp *types.GetSubscribeListResponse, err error) { - total, list, err := l.svcCtx.SubscribeModel.FilterList(l.ctx, &subscribe.FilterParams{ + // Build filter params + filterParams := &subscribe.FilterParams{ Page: int(req.Page), Size: int(req.Size), Language: req.Language, Search: req.Search, - }) + } + + // Add NodeGroupId filter if provided + if req.NodeGroupId > 0 { + filterParams.NodeGroupId = &req.NodeGroupId + } + + total, list, err := l.svcCtx.SubscribeModel.FilterList(l.ctx, filterParams) if err != nil { l.Logger.Error("[GetSubscribeListLogic] get subscribe list failed: ", logger.Field("error", err.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "get subscribe list failed: %v", err.Error()) @@ -54,8 +62,22 @@ func (l *GetSubscribeListLogic) GetSubscribeList(req *types.GetSubscribeListRequ l.Logger.Error("[GetSubscribeListLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("discount", item.Discount)) } } + if item.TrafficLimit != "" { + err = json.Unmarshal([]byte(item.TrafficLimit), &sub.TrafficLimit) + if err != nil { + l.Logger.Error("[GetSubscribeListLogic] JSON unmarshal failed: ", logger.Field("error", err.Error()), logger.Field("traffic_limit", item.TrafficLimit)) + } + } sub.Nodes = tool.StringToInt64Slice(item.Nodes) sub.NodeTags = strings.Split(item.NodeTags, ",") + // Handle NodeGroupIds - convert from JSONInt64Slice to []int64 + if item.NodeGroupIds != nil { + sub.NodeGroupIds = []int64(item.NodeGroupIds) + } else { + sub.NodeGroupIds = []int64{} + } + // NodeGroupId is already int64, should be copied by DeepCopy + sub.NodeGroupId = item.NodeGroupId resultList = append(resultList, sub) } diff --git a/internal/logic/admin/subscribe/updateSubscribeLogic.go b/internal/logic/admin/subscribe/updateSubscribeLogic.go index 123d5e0..8e4af98 100644 --- a/internal/logic/admin/subscribe/updateSubscribeLogic.go +++ b/internal/logic/admin/subscribe/updateSubscribeLogic.go @@ -42,6 +42,12 @@ func (l *UpdateSubscribeLogic) UpdateSubscribe(req *types.UpdateSubscribeRequest val, _ := json.Marshal(req.Discount) discount = string(val) } + + trafficLimit := "" + if len(req.TrafficLimit) > 0 { + val, _ := json.Marshal(req.TrafficLimit) + trafficLimit = string(val) + } sub := &subscribe.Subscribe{ Id: req.Id, Name: req.Name, @@ -59,6 +65,9 @@ func (l *UpdateSubscribeLogic) UpdateSubscribe(req *types.UpdateSubscribeRequest NewUserOnly: req.NewUserOnly, Nodes: tool.Int64SliceToString(req.Nodes), NodeTags: tool.StringSliceToString(req.NodeTags), + NodeGroupIds: subscribe.JSONInt64Slice(req.NodeGroupIds), + NodeGroupId: req.NodeGroupId, + TrafficLimit: trafficLimit, Show: req.Show, Sell: req.Sell, Sort: req.Sort, diff --git a/internal/logic/admin/user/createUserSubscribeLogic.go b/internal/logic/admin/user/createUserSubscribeLogic.go index 08876f8..e294cb5 100644 --- a/internal/logic/admin/user/createUserSubscribeLogic.go +++ b/internal/logic/admin/user/createUserSubscribeLogic.go @@ -6,6 +6,7 @@ import ( "time" "github.com/google/uuid" + "github.com/perfect-panel/server/internal/logic/admin/group" "github.com/perfect-panel/server/internal/model/user" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" @@ -64,6 +65,7 @@ func (l *CreateUserSubscribeLogic) CreateUserSubscribe(req *types.CreateUserSubs Upload: 0, Token: uuidx.SubscribeToken(fmt.Sprintf("adminCreate:%d", time.Now().UnixMilli())), UUID: uuid.New().String(), + NodeGroupId: sub.NodeGroupId, Status: 1, } if err = l.svcCtx.UserModel.InsertSubscribe(l.ctx, &userSub); err != nil { @@ -71,6 +73,60 @@ func (l *CreateUserSubscribeLogic) CreateUserSubscribe(req *types.CreateUserSubs return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseInsertError), "InsertSubscribe error: %v", err.Error()) } + // Trigger user group recalculation (runs in background) + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Check if group management is enabled + var groupEnabled string + err := l.svcCtx.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "enabled"). + Select("value"). + Scan(&groupEnabled).Error + if err != nil || groupEnabled != "true" && groupEnabled != "1" { + l.Debugf("Group management not enabled, skipping recalculation") + return + } + + // Get the configured grouping mode + var groupMode string + err = l.svcCtx.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "mode"). + Select("value"). + Scan(&groupMode).Error + if err != nil { + l.Errorw("Failed to get group mode", logger.Field("error", err.Error())) + return + } + + // Validate group mode + if groupMode != "average" && groupMode != "subscribe" && groupMode != "traffic" { + l.Debugf("Invalid group mode (current: %s), skipping", groupMode) + return + } + + // Trigger group recalculation with the configured mode + logic := group.NewRecalculateGroupLogic(ctx, l.svcCtx) + req := &types.RecalculateGroupRequest{ + Mode: groupMode, + } + + if err := logic.RecalculateGroup(req); err != nil { + l.Errorw("Failed to recalculate user group", + logger.Field("user_id", userInfo.Id), + logger.Field("error", err.Error()), + ) + return + } + + l.Infow("Successfully recalculated user group after admin created subscription", + logger.Field("user_id", userInfo.Id), + logger.Field("subscribe_id", userSub.Id), + logger.Field("mode", groupMode), + ) + }() + err = l.svcCtx.UserModel.UpdateUserCache(l.ctx, userInfo) if err != nil { l.Errorw("UpdateUserCache error", logger.Field("error", err.Error())) @@ -81,5 +137,6 @@ func (l *CreateUserSubscribeLogic) CreateUserSubscribe(req *types.CreateUserSubs if err != nil { logger.Errorw("ClearSubscribe error", logger.Field("error", err.Error())) } + return nil } diff --git a/internal/logic/admin/user/deleteUserLogic.go b/internal/logic/admin/user/deleteUserLogic.go index 5253d09..4910e93 100644 --- a/internal/logic/admin/user/deleteUserLogic.go +++ b/internal/logic/admin/user/deleteUserLogic.go @@ -2,39 +2,257 @@ package user import ( "context" + "fmt" "os" "strings" + usermodel "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/internal/config" + "github.com/perfect-panel/server/internal/model/iap/apple" + "github.com/perfect-panel/server/internal/model/log" + "github.com/perfect-panel/server/internal/model/logmessage" + "github.com/perfect-panel/server/internal/model/order" + "github.com/perfect-panel/server/internal/model/ticket" + "github.com/perfect-panel/server/internal/model/traffic" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" - "github.com/perfect-panel/server/pkg/logger" + pkglogger "github.com/perfect-panel/server/pkg/logger" "github.com/perfect-panel/server/pkg/xerr" "github.com/pkg/errors" + "gorm.io/gorm" ) type DeleteUserLogic struct { ctx context.Context svcCtx *svc.ServiceContext - logger.Logger + pkglogger.Logger } func NewDeleteUserLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteUserLogic { return &DeleteUserLogic{ ctx: ctx, svcCtx: svcCtx, - Logger: logger.WithContext(ctx), + Logger: pkglogger.WithContext(ctx), } } func (l *DeleteUserLogic) DeleteUser(req *types.GetDetailRequest) error { isDemo := strings.ToLower(os.Getenv("PPANEL_MODE")) == "demo" - if req.Id == 2 && isDemo { return errors.Wrapf(xerr.NewErrCodeMsg(503, "Demo mode does not allow deletion of the admin user"), "delete user failed: cannot delete admin user in demo mode") } - err := l.svcCtx.UserModel.Delete(l.ctx, req.Id) + + return l.purgeUser(req.Id) +} + +// purgeUser 硬删除用户及其所有关联数据,无孤儿数据残留。 +// 删除顺序:先删子表(引用 user_id),再删主表(user)。 +func (l *DeleteUserLogic) purgeUser(userID int64) error { + // 1. 事务前:收集需要清缓存的数据 + userInfo, err := l.svcCtx.UserModel.FindOne(l.ctx, userID) if err != nil { - return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseDeletedError), "delete user error: %v", err.Error()) + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil + } + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find user failed: %v", err) } + + authMethods, _ := l.svcCtx.UserModel.FindUserAuthMethods(l.ctx, userID) + subscribes, _ := l.svcCtx.UserModel.QueryUserSubscribe(l.ctx, userID) + + // 2. 查 ticket id 列表(ticket_follow 通过 ticket_id 关联,需要先查再删) + var ticketIDs []int64 + l.svcCtx.DB.WithContext(l.ctx).Model(&ticket.Ticket{}). + Where("user_id = ?", userID).Pluck("id", &ticketIDs) + + // 3. 查家庭关系(需要处理家庭解散) + var familyMember usermodel.UserFamilyMember + isFamilyOwner := false + var familyID int64 + if err := l.svcCtx.DB.WithContext(l.ctx). + Model(&usermodel.UserFamilyMember{}). + Where("user_id = ?", userID). + First(&familyMember).Error; err == nil { + familyID = familyMember.FamilyId + var family usermodel.UserFamily + if err2 := l.svcCtx.DB.WithContext(l.ctx). + Where("id = ?", familyID).First(&family).Error; err2 == nil { + isFamilyOwner = (family.OwnerUserId == userID) + } + } + + // 4. 事务内:按顺序删除所有关联表,最后删 user + err = l.svcCtx.DB.WithContext(l.ctx).Transaction(func(tx *gorm.DB) error { + // 4a. 家庭处理 + if familyID > 0 { + if isFamilyOwner { + // 家主:解散整个家庭(删所有成员记录 + 删家庭) + if e := tx.Unscoped().Where("family_id = ?", familyID). + Delete(&usermodel.UserFamilyMember{}).Error; e != nil { + return e + } + if e := tx.Unscoped().Where("id = ?", familyID). + Delete(&usermodel.UserFamily{}).Error; e != nil { + return e + } + } else { + // 成员:只删自己的成员记录 + if e := tx.Unscoped().Where("user_id = ? AND family_id = ?", userID, familyID). + Delete(&usermodel.UserFamilyMember{}).Error; e != nil { + return e + } + } + } + + // 4b. 用户认证方式 + if e := tx.Unscoped().Where("user_id = ?", userID). + Delete(&usermodel.AuthMethods{}).Error; e != nil { + return e + } + + // 4c. 订阅 + if e := tx.Unscoped().Where("user_id = ?", userID). + Delete(&usermodel.Subscribe{}).Error; e != nil { + return e + } + + // 4d. 设备 + 设备在线记录 + if e := tx.Where("user_id = ?", userID). + Delete(&usermodel.Device{}).Error; e != nil { + return e + } + if e := tx.Where("user_id = ?", userID). + Delete(&usermodel.DeviceOnlineRecord{}).Error; e != nil { + return e + } + + // 4e. 提现记录 + if e := tx.Where("user_id = ?", userID). + Delete(&usermodel.Withdrawal{}).Error; e != nil { + return e + } + + // 4f. 订单 + if e := tx.Where("user_id = ?", userID). + Delete(&order.Order{}).Error; e != nil { + return e + } + + // 4g. 流量日志 + if e := tx.Where("user_id = ?", userID). + Delete(&traffic.TrafficLog{}).Error; e != nil { + return e + } + + // 4h. 系统日志(object_id = userID) + if e := tx.Where("object_id = ?", userID). + Delete(&log.SystemLog{}).Error; e != nil { + return e + } + + // 4i. 工单 follow + 工单 + if len(ticketIDs) > 0 { + if e := tx.Where("ticket_id IN ?", ticketIDs). + Delete(&ticket.Follow{}).Error; e != nil { + return e + } + } + if e := tx.Where("user_id = ?", userID). + Delete(&ticket.Ticket{}).Error; e != nil { + return e + } + + // 4j. Apple IAP 交易记录 + if e := tx.Where("user_id = ?", userID). + Delete(&apple.Transaction{}).Error; e != nil { + return e + } + + // 4k. 日志消息 + if e := tx.Where("user_id = ?", userID). + Delete(&logmessage.LogMessage{}).Error; e != nil { + return e + } + + // 4l. 最后删除 user(Unscoped = 物理删除) + if e := tx.Unscoped().Where("id = ?", userID). + Delete(&usermodel.User{}).Error; e != nil { + return e + } + + return nil + }) + if err != nil { + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseDeletedError), "purge user %d failed: %v", userID, err) + } + + // 5. 事务后:清缓存 + 踢设备 + l.cleanupAfterDelete(userID, userInfo, authMethods, subscribes) + return nil } + +func (l *DeleteUserLogic) cleanupAfterDelete( + userID int64, + userInfo *usermodel.User, + authMethods []*usermodel.AuthMethods, + subscribes []*usermodel.SubscribeDetails, +) { + // 踢设备 + var devices []usermodel.Device + l.svcCtx.DB.WithContext(l.ctx).Where("user_id = ?", userID).Find(&devices) + for _, d := range devices { + l.svcCtx.DeviceManager.KickDevice(d.UserId, d.Identifier) + } + + // 清 session + l.clearAllSessions(userID) + + // 清 email 缓存 + var emailKeys []string + for _, am := range authMethods { + if am.AuthType == "email" && am.AuthIdentifier != "" { + emailKeys = append(emailKeys, fmt.Sprintf("cache:user:email:%s", am.AuthIdentifier)) + } + } + if len(emailKeys) > 0 { + if e := l.svcCtx.Redis.Del(l.ctx, emailKeys...).Err(); e != nil { + l.Errorw("clear email cache failed", pkglogger.Field("user_id", userID), pkglogger.Field("error", e.Error())) + } + } + + // 清 user 缓存 + if userInfo != nil { + if e := l.svcCtx.UserModel.ClearUserCache(l.ctx, userInfo); e != nil { + l.Errorw("clear user cache failed", pkglogger.Field("user_id", userID), pkglogger.Field("error", e.Error())) + } + } + + // 清订阅缓存 + subModels := make([]*usermodel.Subscribe, 0, len(subscribes)+1) + subModels = append(subModels, &usermodel.Subscribe{UserId: userID}) + for _, s := range subscribes { + subModels = append(subModels, &usermodel.Subscribe{ + Id: s.Id, UserId: s.UserId, SubscribeId: s.SubscribeId, Token: s.Token, + }) + } + if e := l.svcCtx.UserModel.ClearSubscribeCache(l.ctx, subModels...); e != nil { + l.Errorw("clear subscribe cache failed", pkglogger.Field("user_id", userID), pkglogger.Field("error", e.Error())) + } +} + +// clearAllSessions 清理用户所有 session +func (l *DeleteUserLogic) clearAllSessions(userID int64) { + sessionsKey := fmt.Sprintf("%s%d", config.UserSessionsKeyPrefix, userID) + sessions, _ := l.svcCtx.Redis.ZRange(l.ctx, sessionsKey, 0, -1).Result() + pipe := l.svcCtx.Redis.TxPipeline() + for _, sid := range sessions { + pipe.Del(l.ctx, fmt.Sprintf("%s:%s", config.SessionIdKey, sid)) + pipe.Del(l.ctx, fmt.Sprintf("%s:detail:%s", config.SessionIdKey, sid)) + pipe.ZRem(l.ctx, sessionsKey, sid) + } + pipe.Del(l.ctx, sessionsKey) + if _, e := pipe.Exec(l.ctx); e != nil { + l.Errorw("clear sessions failed", pkglogger.Field("user_id", userID), pkglogger.Field("error", e.Error())) + } +} diff --git a/internal/logic/admin/user/familyCommon.go b/internal/logic/admin/user/familyCommon.go index 9e34456..f34fc5a 100644 --- a/internal/logic/admin/user/familyCommon.go +++ b/internal/logic/admin/user/familyCommon.go @@ -53,8 +53,13 @@ func normalizeFamilyStatusInput(status string) (uint8, bool) { } } -func findUserIdentifiers(ctx context.Context, db *gorm.DB, userIDs []int64) (map[int64]string, error) { - identifierMap := make(map[int64]string) +type identifierInfo struct { + Identifier string + AuthType string +} + +func findUserIdentifiers(ctx context.Context, db *gorm.DB, userIDs []int64) (map[int64]identifierInfo, error) { + identifierMap := make(map[int64]identifierInfo) if len(userIDs) == 0 { return identifierMap, nil } @@ -92,8 +97,31 @@ func findUserIdentifiers(ctx context.Context, db *gorm.DB, userIDs []int64) (map continue } selectedPriority[row.UserId] = currentPriority - identifierMap[row.UserId] = row.AuthIdentifier + identifierMap[row.UserId] = identifierInfo{ + Identifier: row.AuthIdentifier, + AuthType: row.AuthType, + } } return identifierMap, nil } + +func parseDeviceType(userAgent string) string { + ua := strings.ToLower(userAgent) + switch { + case strings.Contains(ua, "iphone"): + return "iPhone" + case strings.Contains(ua, "ipad"): + return "iPad" + case strings.Contains(ua, "android"): + return "Android" + case strings.Contains(ua, "windows"): + return "Windows" + case strings.Contains(ua, "macintosh"), strings.Contains(ua, "mac os"): + return "Mac" + case strings.Contains(ua, "linux"): + return "Linux" + default: + return "" + } +} diff --git a/internal/logic/admin/user/getFamilyDetailLogic.go b/internal/logic/admin/user/getFamilyDetailLogic.go index b3ed335..6f71369 100644 --- a/internal/logic/admin/user/getFamilyDetailLogic.go +++ b/internal/logic/admin/user/getFamilyDetailLogic.go @@ -63,18 +63,22 @@ func (l *GetFamilyDetailLogic) GetFamilyDetail(req *types.GetFamilyDetailRequest return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "query family member identifiers failed") } - // 查出所有成员的设备,构建 userId → deviceNo 映射 + // 查出所有成员的设备,构建 userId → deviceNo 映射 + deviceType 映射 deviceNoMap := make(map[int64]string) + deviceTypeMap := make(map[int64]string) devices, _, _ := l.svcCtx.UserModel.QueryDeviceListByUserIds(l.ctx, userIDs) for _, d := range devices { if _, exists := deviceNoMap[d.UserId]; !exists { deviceNoMap[d.UserId] = tool.DeviceIdToHash(d.Id) + deviceTypeMap[d.UserId] = parseDeviceType(d.UserAgent) } } memberItems := make([]types.FamilyMemberItem, 0, len(members)) for _, member := range members { - identifier := identifierMap[member.UserId] + info := identifierMap[member.UserId] + identifier := info.Identifier + authType := info.AuthType if identifier == "" { identifier = strconv.FormatInt(member.UserId, 10) } @@ -82,7 +86,9 @@ func (l *GetFamilyDetailLogic) GetFamilyDetail(req *types.GetFamilyDetailRequest memberItem := types.FamilyMemberItem{ UserId: member.UserId, Identifier: identifier, - DeviceNo : deviceNoMap[member.UserId], + AuthType: authType, + DeviceNo: deviceNoMap[member.UserId], + DeviceType: deviceTypeMap[member.UserId], Role: member.Role, RoleName: mapFamilyRoleName(member.Role), Status: member.Status, @@ -96,7 +102,9 @@ func (l *GetFamilyDetailLogic) GetFamilyDetail(req *types.GetFamilyDetailRequest memberItems = append(memberItems, memberItem) } - ownerIdentifier := identifierMap[family.OwnerUserId] + ownerInfo := identifierMap[family.OwnerUserId] + ownerIdentifier := ownerInfo.Identifier + ownerAuthType := ownerInfo.AuthType if ownerIdentifier == "" { ownerIdentifier = strconv.FormatInt(family.OwnerUserId, 10) } @@ -106,6 +114,7 @@ func (l *GetFamilyDetailLogic) GetFamilyDetail(req *types.GetFamilyDetailRequest FamilyId: family.Id, OwnerUserId: family.OwnerUserId, OwnerIdentifier: ownerIdentifier, + OwnerAuthType: ownerAuthType, Status: mapFamilyStatus(family.Status), ActiveMemberCount: activeMemberCount, MaxMembers: family.MaxMembers, diff --git a/internal/logic/admin/user/getFamilyListLogic.go b/internal/logic/admin/user/getFamilyListLogic.go index 741edcb..89f394e 100644 --- a/internal/logic/admin/user/getFamilyListLogic.go +++ b/internal/logic/admin/user/getFamilyListLogic.go @@ -123,7 +123,9 @@ func (l *GetFamilyListLogic) GetFamilyList(req *types.GetFamilyListRequest) (*ty list := make([]types.FamilySummary, 0, len(families)) for _, family := range families { - ownerIdentifier := identifierMap[family.OwnerUserId] + ownerInfo := identifierMap[family.OwnerUserId] + ownerIdentifier := ownerInfo.Identifier + ownerAuthType := ownerInfo.AuthType if ownerIdentifier == "" { ownerIdentifier = strconv.FormatInt(family.OwnerUserId, 10) } @@ -132,6 +134,7 @@ func (l *GetFamilyListLogic) GetFamilyList(req *types.GetFamilyListRequest) (*ty FamilyId: family.Id, OwnerUserId: family.OwnerUserId, OwnerIdentifier: ownerIdentifier, + OwnerAuthType: ownerAuthType, Status: mapFamilyStatus(family.Status), ActiveMemberCount: countMap[family.Id], MaxMembers: family.MaxMembers, diff --git a/internal/logic/admin/user/getUserSubscribeByIdLogic.go b/internal/logic/admin/user/getUserSubscribeByIdLogic.go index 2f9af88..5ef44e7 100644 --- a/internal/logic/admin/user/getUserSubscribeByIdLogic.go +++ b/internal/logic/admin/user/getUserSubscribeByIdLogic.go @@ -3,9 +3,11 @@ package user import ( "context" + "github.com/perfect-panel/server/internal/model/group" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/speedlimit" "github.com/perfect-panel/server/pkg/tool" "github.com/perfect-panel/server/pkg/xerr" "github.com/pkg/errors" @@ -34,5 +36,24 @@ func (l *GetUserSubscribeByIdLogic) GetUserSubscribeById(req *types.GetUserSubsc } var subscribeDetails types.UserSubscribeDetail tool.DeepCopy(&subscribeDetails, sub) + + // 填充分组名 + if sub.NodeGroupId > 0 { + var ng group.NodeGroup + if err := l.svcCtx.DB.WithContext(l.ctx).First(&ng, sub.NodeGroupId).Error; err == nil { + subscribeDetails.NodeGroupName = ng.Name + } + } + + // Calculate speed limit status + if sub.Subscribe != nil && sub.Status == 1 { + result := speedlimit.Calculate(l.ctx, l.svcCtx.DB, sub.UserId, sub.Id, sub.Subscribe.SpeedLimit, sub.Subscribe.TrafficLimit) + subscribeDetails.EffectiveSpeed = result.EffectiveSpeed + subscribeDetails.IsThrottled = result.IsThrottled + subscribeDetails.ThrottleRule = result.ThrottleRule + subscribeDetails.ThrottleStart = result.ThrottleStart + subscribeDetails.ThrottleEnd = result.ThrottleEnd + } + return &subscribeDetails, nil } diff --git a/internal/logic/admin/user/getUserSubscribeLogic.go b/internal/logic/admin/user/getUserSubscribeLogic.go index cd1e733..c33040a 100644 --- a/internal/logic/admin/user/getUserSubscribeLogic.go +++ b/internal/logic/admin/user/getUserSubscribeLogic.go @@ -3,6 +3,7 @@ package user import ( "context" + "github.com/perfect-panel/server/internal/model/group" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/pkg/logger" @@ -38,10 +39,33 @@ func (l *GetUserSubscribeLogic) GetUserSubscribe(req *types.GetUserSubscribeList Total: int64(len(data)), } + // 收集所有 node_group_id,批量查分组名 + groupIdSet := make(map[int64]struct{}) + for _, item := range data { + if item.NodeGroupId > 0 { + groupIdSet[item.NodeGroupId] = struct{}{} + } + } + groupNames := make(map[int64]string) + if len(groupIdSet) > 0 { + ids := make([]int64, 0, len(groupIdSet)) + for id := range groupIdSet { + ids = append(ids, id) + } + var groups []group.NodeGroup + if err := l.svcCtx.DB.WithContext(l.ctx).Where("id IN ?", ids).Find(&groups).Error; err == nil { + for _, g := range groups { + groupNames[g.Id] = g.Name + } + } + } + for _, item := range data { var sub types.UserSubscribe tool.DeepCopy(&sub, item) sub.Short, _ = tool.FixedUniqueString(item.Token, 8, "") + sub.NodeGroupId = item.NodeGroupId + sub.NodeGroupName = groupNames[item.NodeGroupId] resp.List = append(resp.List, sub) } return diff --git a/internal/logic/admin/user/updateUserSubscribeLogic.go b/internal/logic/admin/user/updateUserSubscribeLogic.go index d86bac3..6ec2267 100644 --- a/internal/logic/admin/user/updateUserSubscribeLogic.go +++ b/internal/logic/admin/user/updateUserSubscribeLogic.go @@ -53,6 +53,8 @@ func (l *UpdateUserSubscribeLogic) UpdateUserSubscribe(req *types.UpdateUserSubs Token: userSub.Token, UUID: userSub.UUID, Status: userSub.Status, + NodeGroupId: userSub.NodeGroupId, + GroupLocked: userSub.GroupLocked, }) if err != nil { @@ -81,5 +83,6 @@ func (l *UpdateUserSubscribeLogic) UpdateUserSubscribe(req *types.UpdateUserSubs l.Errorf("ClearServerAllCache error: %v", err.Error()) return errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "failed to clear server cache: %v", err.Error()) } + return nil } diff --git a/internal/logic/auth/admin/adminGenerateCaptchaLogic.go b/internal/logic/auth/admin/adminGenerateCaptchaLogic.go new file mode 100644 index 0000000..a45df89 --- /dev/null +++ b/internal/logic/auth/admin/adminGenerateCaptchaLogic.go @@ -0,0 +1,81 @@ +package admin + +import ( + "context" + + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" + "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/tool" + "github.com/perfect-panel/server/pkg/xerr" + "github.com/pkg/errors" +) + +type AdminGenerateCaptchaLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Generate captcha +func NewAdminGenerateCaptchaLogic(ctx context.Context, svcCtx *svc.ServiceContext) *AdminGenerateCaptchaLogic { + return &AdminGenerateCaptchaLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *AdminGenerateCaptchaLogic) AdminGenerateCaptcha() (resp *types.GenerateCaptchaResponse, err error) { + resp = &types.GenerateCaptchaResponse{} + + // Get verify config from database + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[AdminGenerateCaptchaLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var config struct { + CaptchaType string `json:"captcha_type"` + TurnstileSiteKey string `json:"turnstile_site_key"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &config) + + resp.Type = config.CaptchaType + + // If captcha type is local, generate captcha image + if config.CaptchaType == "local" { + captchaService := captcha.NewService(captcha.Config{ + Type: captcha.CaptchaTypeLocal, + RedisClient: l.svcCtx.Redis, + }) + + id, image, err := captchaService.Generate(l.ctx) + if err != nil { + l.Logger.Error("[AdminGenerateCaptchaLogic] Generate captcha error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "Generate captcha error: %v", err.Error()) + } + + resp.Id = id + resp.Image = image + } else if config.CaptchaType == "turnstile" { + // For Turnstile, just return the site key + resp.Id = config.TurnstileSiteKey + } else if config.CaptchaType == "slider" { + // For slider, generate background and block images + sliderSvc := captcha.NewSliderService(l.svcCtx.Redis) + id, bgImage, blockImage, err := sliderSvc.GenerateSlider(l.ctx) + if err != nil { + l.Logger.Error("[AdminGenerateCaptchaLogic] Generate slider captcha error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "Generate slider captcha error: %v", err.Error()) + } + resp.Id = id + resp.Image = bgImage + resp.BlockImage = blockImage + } + + return resp, nil +} diff --git a/internal/logic/auth/admin/adminLoginLogic.go b/internal/logic/auth/admin/adminLoginLogic.go new file mode 100644 index 0000000..72aed52 --- /dev/null +++ b/internal/logic/auth/admin/adminLoginLogic.go @@ -0,0 +1,164 @@ +package admin + +import ( + "context" + "fmt" + "time" + + "github.com/perfect-panel/server/internal/config" + "github.com/perfect-panel/server/internal/logic/auth" + "github.com/perfect-panel/server/internal/model/log" + "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" + "github.com/perfect-panel/server/pkg/constant" + "github.com/perfect-panel/server/pkg/jwt" + "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/tool" + "github.com/perfect-panel/server/pkg/uuidx" + "github.com/perfect-panel/server/pkg/xerr" + "github.com/pkg/errors" + "gorm.io/gorm" +) + +type AdminLoginLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Admin login +func NewAdminLoginLogic(ctx context.Context, svcCtx *svc.ServiceContext) *AdminLoginLogic { + return &AdminLoginLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *AdminLoginLogic) AdminLogin(req *types.UserLoginRequest) (resp *types.LoginResponse, err error) { + loginStatus := false + var userInfo *user.User + // Record login status + defer func(svcCtx *svc.ServiceContext) { + if userInfo != nil && userInfo.Id != 0 { + loginLog := log.Login{ + Method: "email", + LoginIP: req.IP, + UserAgent: req.UserAgent, + Success: loginStatus, + Timestamp: time.Now().UnixMilli(), + } + content, _ := loginLog.Marshal() + if err := l.svcCtx.LogModel.Insert(l.ctx, &log.SystemLog{ + Type: log.TypeLogin.Uint8(), + Date: time.Now().Format("2006-01-02"), + ObjectID: userInfo.Id, + Content: string(content), + }); err != nil { + l.Errorw("failed to insert login log", + logger.Field("user_id", userInfo.Id), + logger.Field("ip", req.IP), + logger.Field("error", err.Error()), + ) + } + } + }(l.svcCtx) + + // Verify captcha + if err := l.verifyCaptcha(req); err != nil { + return nil, err + } + + userInfo, err = l.svcCtx.UserModel.FindOneByEmail(l.ctx, req.Email) + + if userInfo.DeletedAt.Valid { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.UserNotExist), "user email deleted: %v", req.Email) + } + + if err != nil { + if errors.As(err, &gorm.ErrRecordNotFound) { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.UserNotExist), "user email not exist: %v", req.Email) + } + logger.WithContext(l.ctx).Error(err) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "query user info failed: %v", err.Error()) + } + + // Check if user is admin + if userInfo.IsAdmin == nil || !*userInfo.IsAdmin { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.PermissionDenied), "user is not admin") + } + + // Verify password + if !tool.MultiPasswordVerify(userInfo.Algo, userInfo.Salt, req.Password, userInfo.Password) { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.UserPasswordError), "user password") + } + + // Bind device to user if identifier is provided + if req.Identifier != "" { + bindLogic := auth.NewBindDeviceLogic(l.ctx, l.svcCtx) + if err := bindLogic.BindDeviceToUser(req.Identifier, req.IP, req.UserAgent, userInfo.Id); err != nil { + l.Errorw("failed to bind device to user", + logger.Field("user_id", userInfo.Id), + logger.Field("identifier", req.Identifier), + logger.Field("error", err.Error()), + ) + // Don't fail login if device binding fails, just log the error + } + } + if l.ctx.Value(constant.CtxLoginType) != nil { + req.LoginType = l.ctx.Value(constant.CtxLoginType).(string) + } + // Generate session id + sessionId := uuidx.NewUUID().String() + // Generate token + token, err := jwt.NewJwtToken( + l.svcCtx.Config.JwtAuth.AccessSecret, + time.Now().Unix(), + l.svcCtx.Config.JwtAuth.AccessExpire, + jwt.WithOption("UserId", userInfo.Id), + jwt.WithOption("SessionId", sessionId), + jwt.WithOption("identifier", req.Identifier), + jwt.WithOption("CtxLoginType", req.LoginType), + ) + if err != nil { + l.Logger.Error("[AdminLogin] token generate error", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "token generate error: %v", err.Error()) + } + sessionIdCacheKey := fmt.Sprintf("%v:%v", config.SessionIdKey, sessionId) + if err = l.svcCtx.Redis.Set(l.ctx, sessionIdCacheKey, userInfo.Id, time.Duration(l.svcCtx.Config.JwtAuth.AccessExpire)*time.Second).Err(); err != nil { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "set session id error: %v", err.Error()) + } + loginStatus = true + return &types.LoginResponse{ + Token: token, + }, nil +} + +func (l *AdminLoginLogic) verifyCaptcha(req *types.UserLoginRequest) error { + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[AdminLoginLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var cfg struct { + CaptchaType string `json:"captcha_type"` + EnableAdminLoginCaptcha bool `json:"enable_admin_login_captcha"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &cfg) + + if !cfg.EnableAdminLoginCaptcha { + return nil + } + + return captcha.VerifyCaptcha(l.ctx, l.svcCtx.Redis, cfg.CaptchaType, cfg.TurnstileSecret, captcha.VerifyInput{ + CaptchaId: req.CaptchaId, + CaptchaCode: req.CaptchaCode, + CfToken: req.CfToken, + SliderToken: req.SliderToken, + IP: req.IP, + }) +} diff --git a/internal/logic/auth/admin/adminResetPasswordLogic.go b/internal/logic/auth/admin/adminResetPasswordLogic.go new file mode 100644 index 0000000..4ed13f1 --- /dev/null +++ b/internal/logic/auth/admin/adminResetPasswordLogic.go @@ -0,0 +1,192 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/perfect-panel/server/internal/config" + "github.com/perfect-panel/server/internal/logic/auth" + "github.com/perfect-panel/server/internal/model/log" + "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" + "github.com/perfect-panel/server/pkg/constant" + "github.com/perfect-panel/server/pkg/jwt" + "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/tool" + "github.com/perfect-panel/server/pkg/uuidx" + "github.com/perfect-panel/server/pkg/xerr" + "github.com/pkg/errors" + "gorm.io/gorm" +) + +type AdminResetPasswordLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +type CacheKeyPayload struct { + Code string `json:"code"` +} + +// Admin reset password +func NewAdminResetPasswordLogic(ctx context.Context, svcCtx *svc.ServiceContext) *AdminResetPasswordLogic { + return &AdminResetPasswordLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *AdminResetPasswordLogic) AdminResetPassword(req *types.ResetPasswordRequest) (resp *types.LoginResponse, err error) { + var userInfo *user.User + loginStatus := false + + defer func() { + if userInfo != nil && userInfo.Id != 0 && loginStatus { + loginLog := log.Login{ + Method: "email", + LoginIP: req.IP, + UserAgent: req.UserAgent, + Success: loginStatus, + Timestamp: time.Now().UnixMilli(), + } + content, _ := loginLog.Marshal() + if err := l.svcCtx.LogModel.Insert(l.ctx, &log.SystemLog{ + Id: 0, + Type: log.TypeLogin.Uint8(), + Date: time.Now().Format("2006-01-02"), + ObjectID: userInfo.Id, + Content: string(content), + }); err != nil { + l.Errorw("failed to insert login log", + logger.Field("user_id", userInfo.Id), + logger.Field("ip", req.IP), + logger.Field("error", err.Error()), + ) + } + } + }() + + cacheKey := fmt.Sprintf("%s:%s:%s", config.AuthCodeCacheKey, constant.Security, req.Email) + // Check the verification code + if value, err := l.svcCtx.Redis.Get(l.ctx, cacheKey).Result(); err != nil { + l.Errorw("Verification code error", logger.Field("cacheKey", cacheKey), logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "Verification code error") + } else { + var payload CacheKeyPayload + if err := json.Unmarshal([]byte(value), &payload); err != nil { + l.Errorw("Unmarshal errors", logger.Field("cacheKey", cacheKey), logger.Field("error", err.Error()), logger.Field("value", value)) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "Verification code error") + } + if payload.Code != req.Code { + l.Errorw("Verification code error", logger.Field("cacheKey", cacheKey), logger.Field("error", "Verification code error"), logger.Field("reqCode", req.Code), logger.Field("payloadCode", payload.Code)) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "Verification code error") + } + } + + // Verify captcha + if err := l.verifyCaptcha(req); err != nil { + return nil, err + } + + // Check user + authMethod, err := l.svcCtx.UserModel.FindUserAuthMethodByOpenID(l.ctx, "email", req.Email) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.UserNotExist), "user email not exist: %v", req.Email) + } + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find user by email error: %v", err.Error()) + } + + userInfo, err = l.svcCtx.UserModel.FindOne(l.ctx, authMethod.UserId) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.UserNotExist), "user email not exist: %v", req.Email) + } + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "query user info failed: %v", err.Error()) + } + + // Check if user is admin + if userInfo.IsAdmin == nil || !*userInfo.IsAdmin { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.PermissionDenied), "user is not admin") + } + + // Update password + userInfo.Password = tool.EncodePassWord(req.Password) + userInfo.Algo = "default" + if err = l.svcCtx.UserModel.Update(l.ctx, userInfo); err != nil { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseUpdateError), "update user info failed: %v", err.Error()) + } + + // Bind device to user if identifier is provided + if req.Identifier != "" { + bindLogic := auth.NewBindDeviceLogic(l.ctx, l.svcCtx) + if err := bindLogic.BindDeviceToUser(req.Identifier, req.IP, req.UserAgent, userInfo.Id); err != nil { + l.Errorw("failed to bind device to user", + logger.Field("user_id", userInfo.Id), + logger.Field("identifier", req.Identifier), + logger.Field("error", err.Error()), + ) + // Don't fail register if device binding fails, just log the error + } + } + if l.ctx.Value(constant.CtxLoginType) != nil { + req.LoginType = l.ctx.Value(constant.CtxLoginType).(string) + } + // Generate session id + sessionId := uuidx.NewUUID().String() + // Generate token + token, err := jwt.NewJwtToken( + l.svcCtx.Config.JwtAuth.AccessSecret, + time.Now().Unix(), + l.svcCtx.Config.JwtAuth.AccessExpire, + jwt.WithOption("UserId", userInfo.Id), + jwt.WithOption("SessionId", sessionId), + jwt.WithOption("identifier", req.Identifier), + jwt.WithOption("CtxLoginType", req.LoginType), + ) + if err != nil { + l.Logger.Error("[AdminResetPassword] token generate error", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "token generate error: %v", err.Error()) + } + sessionIdCacheKey := fmt.Sprintf("%v:%v", config.SessionIdKey, sessionId) + if err = l.svcCtx.Redis.Set(l.ctx, sessionIdCacheKey, userInfo.Id, time.Duration(l.svcCtx.Config.JwtAuth.AccessExpire)*time.Second).Err(); err != nil { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "set session id error: %v", err.Error()) + } + loginStatus = true + return &types.LoginResponse{ + Token: token, + }, nil +} + +func (l *AdminResetPasswordLogic) verifyCaptcha(req *types.ResetPasswordRequest) error { + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[AdminResetPasswordLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var cfg struct { + CaptchaType string `json:"captcha_type"` + EnableAdminLoginCaptcha bool `json:"enable_admin_login_captcha"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &cfg) + + if !cfg.EnableAdminLoginCaptcha { + return nil + } + + return captcha.VerifyCaptcha(l.ctx, l.svcCtx.Redis, cfg.CaptchaType, cfg.TurnstileSecret, captcha.VerifyInput{ + CaptchaId: req.CaptchaId, + CaptchaCode: req.CaptchaCode, + CfToken: req.CfToken, + SliderToken: req.SliderToken, + IP: req.IP, + }) +} diff --git a/internal/logic/auth/admin/adminSliderVerifyCaptchaLogic.go b/internal/logic/auth/admin/adminSliderVerifyCaptchaLogic.go new file mode 100644 index 0000000..d6203ba --- /dev/null +++ b/internal/logic/auth/admin/adminSliderVerifyCaptchaLogic.go @@ -0,0 +1,57 @@ +package admin + +import ( + "context" + + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" + "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/tool" + "github.com/perfect-panel/server/pkg/xerr" + "github.com/pkg/errors" +) + +type AdminSliderVerifyCaptchaLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Verify slider captcha +func NewAdminSliderVerifyCaptchaLogic(ctx context.Context, svcCtx *svc.ServiceContext) *AdminSliderVerifyCaptchaLogic { + return &AdminSliderVerifyCaptchaLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *AdminSliderVerifyCaptchaLogic) AdminSliderVerifyCaptcha(req *types.SliderVerifyCaptchaRequest) (resp *types.SliderVerifyCaptchaResponse, err error) { + // Get verify config from database + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[AdminSliderVerifyCaptchaLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var config struct { + CaptchaType string `json:"captcha_type"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &config) + + if config.CaptchaType != "slider" { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "slider captcha not enabled") + } + + sliderSvc := captcha.NewSliderService(l.svcCtx.Redis) + token, err := sliderSvc.VerifySlider(l.ctx, req.Id, req.X, req.Y, req.Trail) + if err != nil { + l.Logger.Error("[AdminSliderVerifyCaptchaLogic] VerifySlider error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "verify slider error") + } + + return &types.SliderVerifyCaptchaResponse{ + Token: token, + }, nil +} diff --git a/internal/logic/auth/generateCaptchaLogic.go b/internal/logic/auth/generateCaptchaLogic.go new file mode 100644 index 0000000..615c4bc --- /dev/null +++ b/internal/logic/auth/generateCaptchaLogic.go @@ -0,0 +1,81 @@ +package auth + +import ( + "context" + + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" + "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/tool" + "github.com/perfect-panel/server/pkg/xerr" + "github.com/pkg/errors" +) + +type GenerateCaptchaLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Generate captcha +func NewGenerateCaptchaLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GenerateCaptchaLogic { + return &GenerateCaptchaLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *GenerateCaptchaLogic) GenerateCaptcha() (resp *types.GenerateCaptchaResponse, err error) { + resp = &types.GenerateCaptchaResponse{} + + // Get verify config from database + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[GenerateCaptchaLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var config struct { + CaptchaType string `json:"captcha_type"` + TurnstileSiteKey string `json:"turnstile_site_key"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &config) + + resp.Type = config.CaptchaType + + // If captcha type is local, generate captcha image + if config.CaptchaType == "local" { + captchaService := captcha.NewService(captcha.Config{ + Type: captcha.CaptchaTypeLocal, + RedisClient: l.svcCtx.Redis, + }) + + id, image, err := captchaService.Generate(l.ctx) + if err != nil { + l.Logger.Error("[GenerateCaptchaLogic] Generate captcha error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "Generate captcha error: %v", err.Error()) + } + + resp.Id = id + resp.Image = image + } else if config.CaptchaType == "turnstile" { + // For Turnstile, just return the site key + resp.Id = config.TurnstileSiteKey + } else if config.CaptchaType == "slider" { + // For slider, generate background and block images + sliderSvc := captcha.NewSliderService(l.svcCtx.Redis) + id, bgImage, blockImage, err := sliderSvc.GenerateSlider(l.ctx) + if err != nil { + l.Logger.Error("[GenerateCaptchaLogic] Generate slider captcha error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "Generate slider captcha error: %v", err.Error()) + } + resp.Id = id + resp.Image = bgImage + resp.BlockImage = blockImage + } + + return resp, nil +} diff --git a/internal/logic/auth/registerLimitLogic.go b/internal/logic/auth/registerLimitLogic.go index 048ef75..11ab3ca 100644 --- a/internal/logic/auth/registerLimitLogic.go +++ b/internal/logic/auth/registerLimitLogic.go @@ -16,6 +16,10 @@ func registerIpLimit(svcCtx *svc.ServiceContext, ctx context.Context, registerIp return true } + // Add timeout protection for Redis operations + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + // Use a sorted set to track IP registrations with timestamp as score // Key format: register:ip:{ip} key := fmt.Sprintf("%s%s", config.RegisterIpKeyPrefix, registerIp) diff --git a/internal/logic/auth/resetPasswordLogic.go b/internal/logic/auth/resetPasswordLogic.go index f504437..7484115 100644 --- a/internal/logic/auth/resetPasswordLogic.go +++ b/internal/logic/auth/resetPasswordLogic.go @@ -9,6 +9,7 @@ import ( "github.com/perfect-panel/server/internal/model/log" "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/pkg/captcha" "github.com/perfect-panel/server/pkg/jwt" "github.com/perfect-panel/server/pkg/uuidx" @@ -91,6 +92,11 @@ func (l *ResetPasswordLogic) ResetPassword(req *types.ResetPasswordRequest) (res l.svcCtx.Redis.Del(l.ctx, cacheKey) } + // Verify captcha + if err := l.verifyCaptcha(req); err != nil { + return nil, err + } + // Check user authMethod, err := l.svcCtx.UserModel.FindUserAuthMethodByOpenID(l.ctx, "email", req.Email) if err != nil { @@ -155,3 +161,30 @@ func (l *ResetPasswordLogic) ResetPassword(req *types.ResetPasswordRequest) (res Token: token, }, nil } + +func (l *ResetPasswordLogic) verifyCaptcha(req *types.ResetPasswordRequest) error { + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[ResetPasswordLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var cfg struct { + CaptchaType string `json:"captcha_type"` + EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &cfg) + + if !cfg.EnableUserResetPasswordCaptcha { + return nil + } + + return captcha.VerifyCaptcha(l.ctx, l.svcCtx.Redis, cfg.CaptchaType, cfg.TurnstileSecret, captcha.VerifyInput{ + CaptchaId: req.CaptchaId, + CaptchaCode: req.CaptchaCode, + CfToken: req.CfToken, + SliderToken: req.SliderToken, + IP: req.IP, + }) +} diff --git a/internal/logic/auth/sliderVerifyCaptchaLogic.go b/internal/logic/auth/sliderVerifyCaptchaLogic.go new file mode 100644 index 0000000..9659f97 --- /dev/null +++ b/internal/logic/auth/sliderVerifyCaptchaLogic.go @@ -0,0 +1,51 @@ +package auth + +import ( + "context" + + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" + "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/tool" + "github.com/perfect-panel/server/pkg/xerr" + "github.com/pkg/errors" +) + +type SliderVerifyCaptchaLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Verify slider captcha +func NewSliderVerifyCaptchaLogic(ctx context.Context, svcCtx *svc.ServiceContext) *SliderVerifyCaptchaLogic { + return &SliderVerifyCaptchaLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *SliderVerifyCaptchaLogic) SliderVerifyCaptcha(req *types.SliderVerifyCaptchaRequest) (resp *types.SliderVerifyCaptchaResponse, err error) { + var config struct { + CaptchaType string `json:"captcha_type"` + } + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &config) + + if config.CaptchaType != string(captcha.CaptchaTypeSlider) { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "slider captcha not enabled") + } + + sliderSvc := captcha.NewSliderService(l.svcCtx.Redis) + token, err := sliderSvc.VerifySlider(l.ctx, req.Id, req.X, req.Y, req.Trail) + if err != nil { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "slider verify failed: %v", err.Error()) + } + + return &types.SliderVerifyCaptchaResponse{Token: token}, nil +} diff --git a/internal/logic/auth/telephoneLoginLogic.go b/internal/logic/auth/telephoneLoginLogic.go index 3a8655c..bb538e8 100644 --- a/internal/logic/auth/telephoneLoginLogic.go +++ b/internal/logic/auth/telephoneLoginLogic.go @@ -12,6 +12,7 @@ import ( "github.com/perfect-panel/server/internal/model/log" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" "github.com/perfect-panel/server/pkg/constant" "github.com/perfect-panel/server/pkg/jwt" "github.com/perfect-panel/server/pkg/logger" @@ -94,6 +95,11 @@ func (l *TelephoneLoginLogic) TelephoneLogin(req *types.TelephoneLoginRequest, r return nil, xerr.NewErrCodeMsg(xerr.InvalidParams, "password and telephone code is empty") } + // Verify captcha + if err := l.verifyCaptcha(req); err != nil { + return nil, err + } + if req.TelephoneCode == "" { // Verify password if !tool.MultiPasswordVerify(userInfo.Algo, userInfo.Salt, req.Password, userInfo.Password) { @@ -164,3 +170,30 @@ func (l *TelephoneLoginLogic) TelephoneLogin(req *types.TelephoneLoginRequest, r Token: token, }, nil } + +func (l *TelephoneLoginLogic) verifyCaptcha(req *types.TelephoneLoginRequest) error { + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[TelephoneLoginLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var cfg struct { + CaptchaType string `json:"captcha_type"` + EnableUserLoginCaptcha bool `json:"enable_user_login_captcha"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &cfg) + + if !cfg.EnableUserLoginCaptcha { + return nil + } + + return captcha.VerifyCaptcha(l.ctx, l.svcCtx.Redis, cfg.CaptchaType, cfg.TurnstileSecret, captcha.VerifyInput{ + CaptchaId: req.CaptchaId, + CaptchaCode: req.CaptchaCode, + CfToken: req.CfToken, + SliderToken: req.SliderToken, + IP: req.IP, + }) +} diff --git a/internal/logic/auth/telephoneRegisterLogic.go b/internal/logic/auth/telephoneRegisterLogic.go new file mode 100644 index 0000000..118a6a8 --- /dev/null +++ b/internal/logic/auth/telephoneRegisterLogic.go @@ -0,0 +1,30 @@ +package auth + +import ( + "context" + + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" +) + +type TelephoneRegisterLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// User Telephone register +func NewTelephoneRegisterLogic(ctx context.Context, svcCtx *svc.ServiceContext) *TelephoneRegisterLogic { + return &TelephoneRegisterLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *TelephoneRegisterLogic) TelephoneRegister(req *types.TelephoneRegisterRequest) (resp *types.LoginResponse, err error) { + // todo: add your logic here and delete this line + + return +} diff --git a/internal/logic/auth/telephoneUserRegisterLogic.go b/internal/logic/auth/telephoneUserRegisterLogic.go index e503190..b40aca6 100644 --- a/internal/logic/auth/telephoneUserRegisterLogic.go +++ b/internal/logic/auth/telephoneUserRegisterLogic.go @@ -13,6 +13,7 @@ import ( "github.com/perfect-panel/server/internal/model/user" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" "github.com/perfect-panel/server/pkg/jwt" "github.com/perfect-panel/server/pkg/logger" "github.com/perfect-panel/server/pkg/phone" @@ -81,6 +82,12 @@ func (l *TelephoneUserRegisterLogic) TelephoneUserRegister(req *types.TelephoneR return nil, errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "code error") } l.svcCtx.Redis.Del(l.ctx, cacheKey) + + // Verify captcha + if err := l.verifyCaptcha(req); err != nil { + return nil, err + } + // Check if the user exists _, err = l.svcCtx.UserModel.FindUserAuthMethodByOpenID(l.ctx, "mobile", phoneNumber) if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { @@ -280,3 +287,29 @@ func (l *TelephoneUserRegisterLogic) activeTrial(uid int64) (*user.Subscribe, er return userSub, nil } +func (l *TelephoneUserRegisterLogic) verifyCaptcha(req *types.TelephoneRegisterRequest) error { + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[TelephoneUserRegisterLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var cfg struct { + CaptchaType string `json:"captcha_type"` + EnableUserRegisterCaptcha bool `json:"enable_user_register_captcha"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &cfg) + + if !cfg.EnableUserRegisterCaptcha { + return nil + } + + return captcha.VerifyCaptcha(l.ctx, l.svcCtx.Redis, cfg.CaptchaType, cfg.TurnstileSecret, captcha.VerifyInput{ + CaptchaId: req.CaptchaId, + CaptchaCode: req.CaptchaCode, + CfToken: req.CfToken, + SliderToken: req.SliderToken, + IP: req.IP, + }) +} diff --git a/internal/logic/auth/userLoginLogic.go b/internal/logic/auth/userLoginLogic.go index 4204c53..1faa112 100644 --- a/internal/logic/auth/userLoginLogic.go +++ b/internal/logic/auth/userLoginLogic.go @@ -6,6 +6,7 @@ import ( "time" "github.com/perfect-panel/server/internal/model/log" + "github.com/perfect-panel/server/pkg/captcha" "github.com/perfect-panel/server/pkg/constant" "github.com/perfect-panel/server/pkg/logger" @@ -66,6 +67,11 @@ func (l *UserLoginLogic) UserLogin(req *types.UserLoginRequest) (resp *types.Log } }(l.svcCtx) + // Verify captcha + if err := l.verifyCaptcha(req); err != nil { + return nil, err + } + userInfo, err = l.svcCtx.UserModel.FindOneByEmail(l.ctx, req.Email) if err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { @@ -134,3 +140,30 @@ func (l *UserLoginLogic) UserLogin(req *types.UserLoginRequest) (resp *types.Log Token: token, }, nil } + +func (l *UserLoginLogic) verifyCaptcha(req *types.UserLoginRequest) error { + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[UserLoginLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var cfg struct { + CaptchaType string `json:"captcha_type"` + EnableUserLoginCaptcha bool `json:"enable_user_login_captcha"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &cfg) + + if !cfg.EnableUserLoginCaptcha { + return nil + } + + return captcha.VerifyCaptcha(l.ctx, l.svcCtx.Redis, cfg.CaptchaType, cfg.TurnstileSecret, captcha.VerifyInput{ + CaptchaId: req.CaptchaId, + CaptchaCode: req.CaptchaCode, + CfToken: req.CfToken, + SliderToken: req.SliderToken, + IP: req.IP, + }) +} diff --git a/internal/logic/auth/userRegisterLogic.go b/internal/logic/auth/userRegisterLogic.go index 70def38..5aa7ce7 100644 --- a/internal/logic/auth/userRegisterLogic.go +++ b/internal/logic/auth/userRegisterLogic.go @@ -8,11 +8,13 @@ import ( "time" "github.com/perfect-panel/server/internal/config" + "github.com/perfect-panel/server/internal/logic/admin/group" "github.com/perfect-panel/server/internal/logic/common" "github.com/perfect-panel/server/internal/model/log" "github.com/perfect-panel/server/internal/model/user" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/captcha" "github.com/perfect-panel/server/pkg/constant" "github.com/perfect-panel/server/pkg/jwt" "github.com/perfect-panel/server/pkg/logger" @@ -85,6 +87,12 @@ func (l *UserRegisterLogic) UserRegister(req *types.UserRegisterRequest) (resp * } l.svcCtx.Redis.Del(l.ctx, cacheKey) } + + // Verify captcha + if err := l.verifyCaptcha(req); err != nil { + return nil, err + } + // Check if the user exists u, err := l.svcCtx.UserModel.FindOneByEmail(l.ctx, req.Email) if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { @@ -132,22 +140,76 @@ func (l *UserRegisterLogic) UserRegister(req *types.UserRegisterRequest) (resp * return err } - if l.svcCtx.Config.Register.EnableTrial { - // Active trial - var trialErr error - trialSubscribe, trialErr = l.activeTrial(userInfo.Id) - if trialErr != nil { - return trialErr - } - } return nil }) if err != nil { return nil, err } + // Activate trial subscription after transaction success (moved outside transaction to reduce lock time) + if l.svcCtx.Config.Register.EnableTrial { + trialSubscribe, err = l.activeTrial(userInfo.Id) + if err != nil { + l.Errorw("Failed to activate trial subscription", logger.Field("error", err.Error())) + // Don't fail registration if trial activation fails + } + } + // Clear cache after transaction success if l.svcCtx.Config.Register.EnableTrial && trialSubscribe != nil { + // Trigger user group recalculation (runs in background) + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Check if group management is enabled + var groupEnabled string + err := l.svcCtx.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "enabled"). + Select("value"). + Scan(&groupEnabled).Error + if err != nil || groupEnabled != "true" && groupEnabled != "1" { + l.Debugf("Group management not enabled, skipping recalculation") + return + } + + // Get the configured grouping mode + var groupMode string + err = l.svcCtx.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "mode"). + Select("value"). + Scan(&groupMode).Error + if err != nil { + l.Errorw("Failed to get group mode", logger.Field("error", err.Error())) + return + } + + // Validate group mode + if groupMode != "average" && groupMode != "subscribe" && groupMode != "traffic" { + l.Debugf("Invalid group mode (current: %s), skipping", groupMode) + return + } + + // Trigger group recalculation with the configured mode + logic := group.NewRecalculateGroupLogic(ctx, l.svcCtx) + req := &types.RecalculateGroupRequest{ + Mode: groupMode, + } + + if err := logic.RecalculateGroup(req); err != nil { + l.Errorw("Failed to recalculate user group", + logger.Field("user_id", userInfo.Id), + logger.Field("error", err.Error()), + ) + return + } + + l.Infow("Successfully recalculated user group after registration", + logger.Field("user_id", userInfo.Id), + logger.Field("mode", groupMode), + ) + }() + // Clear user subscription cache if err = l.svcCtx.UserModel.ClearSubscribeCache(l.ctx, trialSubscribe); err != nil { l.Errorw("ClearSubscribeCache failed", logger.Field("error", err.Error()), logger.Field("userSubscribeId", trialSubscribe.Id)) @@ -202,7 +264,7 @@ func (l *UserRegisterLogic) UserRegister(req *types.UserRegisterRequest) (resp * } loginStatus := true defer func() { - if token != "" && userInfo.Id != 0 { + if token != "" && userInfo != nil && userInfo.Id != 0 { loginLog := log.Login{ Method: "email", LoginIP: req.IP, @@ -275,3 +337,30 @@ func (l *UserRegisterLogic) activeTrial(uid int64) (*user.Subscribe, error) { } return userSub, nil } + +func (l *UserRegisterLogic) verifyCaptcha(req *types.UserRegisterRequest) error { + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[UserRegisterLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } + + var cfg struct { + CaptchaType string `json:"captcha_type"` + EnableUserRegisterCaptcha bool `json:"enable_user_register_captcha"` + TurnstileSecret string `json:"turnstile_secret"` + } + tool.SystemConfigSliceReflectToStruct(verifyCfg, &cfg) + + if !cfg.EnableUserRegisterCaptcha { + return nil + } + + return captcha.VerifyCaptcha(l.ctx, l.svcCtx.Redis, cfg.CaptchaType, cfg.TurnstileSecret, captcha.VerifyInput{ + CaptchaId: req.CaptchaId, + CaptchaCode: req.CaptchaCode, + CfToken: req.CfToken, + SliderToken: req.SliderToken, + IP: req.IP, + }) +} diff --git a/internal/logic/common/getGlobalConfigLogic.go b/internal/logic/common/getGlobalConfigLogic.go index 502e098..f470b46 100644 --- a/internal/logic/common/getGlobalConfigLogic.go +++ b/internal/logic/common/getGlobalConfigLogic.go @@ -41,6 +41,11 @@ func (l *GetGlobalConfigLogic) GetGlobalConfig() (resp *types.GetGlobalConfigRes l.Logger.Error("[GetGlobalConfigLogic] GetVerifyCodeConfig error: ", logger.Field("error", err.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyCodeConfig error: %v", err.Error()) } + verifyCfg, err := l.svcCtx.SystemModel.GetVerifyConfig(l.ctx) + if err != nil { + l.Logger.Error("[GetGlobalConfigLogic] GetVerifyConfig error: ", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "GetVerifyConfig error: %v", err.Error()) + } tool.DeepCopy(&resp.Site, l.svcCtx.Config.Site) tool.DeepCopy(&resp.Subscribe, l.svcCtx.Config.Subscribe) @@ -52,17 +57,12 @@ func (l *GetGlobalConfigLogic) GetGlobalConfig() (resp *types.GetGlobalConfigRes tool.DeepCopy(&resp.Invite, l.svcCtx.Config.Invite) tool.SystemConfigSliceReflectToStruct(currencyCfg, &resp.Currency) tool.SystemConfigSliceReflectToStruct(verifyCodeCfg, &resp.VerifyCode) + tool.SystemConfigSliceReflectToStruct(verifyCfg, &resp.Verify) if report.IsGatewayMode() { resp.Subscribe.SubscribePath = "/sub" + l.svcCtx.Config.Subscribe.SubscribePath } - resp.Verify = types.VeifyConfig{ - TurnstileSiteKey: l.svcCtx.Config.Verify.TurnstileSiteKey, - EnableLoginVerify: l.svcCtx.Config.Verify.LoginVerify, - EnableRegisterVerify: l.svcCtx.Config.Verify.RegisterVerify, - EnableResetPasswordVerify: l.svcCtx.Config.Verify.ResetPasswordVerify, - } var methods []string // auth methods diff --git a/internal/logic/common/inviteLinkResolver.go b/internal/logic/common/inviteLinkResolver.go index fa6ab9a..299e60a 100644 --- a/internal/logic/common/inviteLinkResolver.go +++ b/internal/logic/common/inviteLinkResolver.go @@ -215,18 +215,26 @@ func (r *InviteLinkResolver) generateShortLinkWithTimeout(referCode string, time } _, domain := r.resolveShareURLAndDomain() - requestCtx := r.ctx - var cancel context.CancelFunc - if timeout > 0 { - requestCtx, cancel = context.WithTimeout(r.ctx, timeout) - defer cancel() + const maxRetries = 3 + var lastErr error + for attempt := 0; attempt < maxRetries; attempt++ { + requestCtx := r.ctx + var cancel context.CancelFunc + if timeout > 0 { + requestCtx, cancel = context.WithTimeout(r.ctx, timeout) + } + + shortLink, err := r.createShortLink(requestCtx, longLink, domain) + if cancel != nil { + cancel() + } + if err == nil && strings.TrimSpace(shortLink) != "" { + return strings.TrimSpace(shortLink), nil + } + lastErr = err } - shortLink, err := r.createShortLink(requestCtx, longLink, domain) - if err != nil { - return "", err - } - return strings.TrimSpace(shortLink), nil + return "", lastErr } func (r *InviteLinkResolver) getCachedShortLink(referCode string) string { diff --git a/internal/logic/common/newUserEligibility.go b/internal/logic/common/newUserEligibility.go new file mode 100644 index 0000000..bb371d4 --- /dev/null +++ b/internal/logic/common/newUserEligibility.go @@ -0,0 +1,188 @@ +package common + +import ( + "context" + "sort" + "time" + + modelOrder "github.com/perfect-panel/server/internal/model/order" + modelUser "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/pkg/xerr" + "github.com/pkg/errors" + "gorm.io/gorm" +) + +const ( + NewUserEligibilitySourceDeviceCreatedAt = "device_created_at" + NewUserEligibilitySourceUserCreatedAtFallback = "user_created_at_fallback" + NewUserEligibilityJoinSourceBindEmail = "bind_email_with_verification" + NewUserEligibilityWindow = 24 * time.Hour +) + +type NewUserEligibilityContext struct { + ScopeUserIDs []int64 + EligibilityStartAt time.Time + Source string +} + +func (c *NewUserEligibilityContext) IsNewUserAt(now time.Time) bool { + if c == nil || c.EligibilityStartAt.IsZero() { + return false + } + return now.Sub(c.EligibilityStartAt) <= NewUserEligibilityWindow +} + +type newUserEligibilityRelation struct { + FamilyID int64 `gorm:"column:family_id"` + Role uint8 `gorm:"column:role"` + JoinSource string `gorm:"column:join_source"` + OwnerUserID int64 `gorm:"column:owner_user_id"` +} + +func ResolveNewUserEligibility(ctx context.Context, db *gorm.DB, currentUserID int64) (*NewUserEligibilityContext, error) { + if currentUserID <= 0 { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.InvalidParams), "current user id is empty") + } + + scopeUserIDs, err := resolveNewUserEligibilityScope(ctx, db, currentUserID) + if err != nil { + return nil, err + } + + startAt, source, err := resolveNewUserEligibilityStartAt(ctx, db, scopeUserIDs) + if err != nil { + return nil, err + } + + return &NewUserEligibilityContext{ + ScopeUserIDs: scopeUserIDs, + EligibilityStartAt: startAt, + Source: source, + }, nil +} + +func CountScopedSubscribePurchaseOrders( + ctx context.Context, + db *gorm.DB, + scopeUserIDs []int64, + subscribeID int64, + statuses []int64, + excludeOrderNo string, +) (int64, error) { + if len(scopeUserIDs) == 0 { + return 0, nil + } + + var count int64 + query := db.WithContext(ctx). + Model(&modelOrder.Order{}). + Where("user_id IN ? AND subscribe_id = ? AND type = 1", scopeUserIDs, subscribeID) + if len(statuses) > 0 { + query = query.Where("status IN ?", statuses) + } + if excludeOrderNo != "" { + query = query.Where("order_no != ?", excludeOrderNo) + } + if err := query.Count(&count).Error; err != nil { + return 0, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "count scoped subscribe purchase orders failed") + } + + return count, nil +} + +func resolveNewUserEligibilityScope(ctx context.Context, db *gorm.DB, currentUserID int64) ([]int64, error) { + defaultScope := []int64{currentUserID} + + var relation newUserEligibilityRelation + err := db.WithContext(ctx). + Table("user_family_member AS ufm"). + Select("ufm.family_id, ufm.role, ufm.join_source, uf.owner_user_id"). + Joins("JOIN user_family AS uf ON uf.id = ufm.family_id AND uf.deleted_at IS NULL AND uf.status = ?", modelUser.FamilyStatusActive). + Where("ufm.user_id = ? AND ufm.deleted_at IS NULL AND ufm.status = ?", currentUserID, modelUser.FamilyMemberActive). + Limit(1). + Take(&relation).Error + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return defaultScope, nil + } + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "query new user eligibility family relation failed") + } + + if relation.Role != modelUser.FamilyRoleOwner && relation.JoinSource != NewUserEligibilityJoinSourceBindEmail { + return defaultScope, nil + } + + var scopedUserIDs []int64 + if err = db.WithContext(ctx). + Table("user_family_member AS ufm"). + Select("ufm.user_id"). + Joins("JOIN user_family AS uf ON uf.id = ufm.family_id AND uf.deleted_at IS NULL AND uf.status = ?", modelUser.FamilyStatusActive). + Where( + "ufm.family_id = ? AND ufm.deleted_at IS NULL AND ufm.status = ? AND (ufm.role = ? OR ufm.join_source = ?)", + relation.FamilyID, + modelUser.FamilyMemberActive, + modelUser.FamilyRoleOwner, + NewUserEligibilityJoinSourceBindEmail, + ). + Pluck("ufm.user_id", &scopedUserIDs).Error; err != nil { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "query new user eligibility scope failed") + } + + scopedUserIDs = append(scopedUserIDs, currentUserID) + return uniqueSortedInt64(scopedUserIDs), nil +} + +func resolveNewUserEligibilityStartAt(ctx context.Context, db *gorm.DB, scopeUserIDs []int64) (time.Time, string, error) { + var earliestDevice modelUser.Device + err := db.WithContext(ctx). + Model(&modelUser.Device{}). + Where("user_id IN ?", scopeUserIDs). + Order("created_at ASC"). + Limit(1). + Take(&earliestDevice).Error + switch { + case err == nil && !earliestDevice.CreatedAt.IsZero(): + return earliestDevice.CreatedAt, NewUserEligibilitySourceDeviceCreatedAt, nil + case err != nil && !errors.Is(err, gorm.ErrRecordNotFound): + return time.Time{}, "", errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "query new user eligibility device start failed") + } + + var earliestUser modelUser.User + err = db.WithContext(ctx). + Model(&modelUser.User{}). + Where("id IN ?", scopeUserIDs). + Order("created_at ASC"). + Limit(1). + Take(&earliestUser).Error + if err != nil { + return time.Time{}, "", errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "query new user eligibility fallback start failed") + } + if earliestUser.CreatedAt.IsZero() { + return time.Time{}, "", errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "new user eligibility start time not found") + } + + return earliestUser.CreatedAt, NewUserEligibilitySourceUserCreatedAtFallback, nil +} + +func uniqueSortedInt64(values []int64) []int64 { + if len(values) == 0 { + return nil + } + + seen := make(map[int64]struct{}, len(values)) + result := make([]int64, 0, len(values)) + for _, value := range values { + if value == 0 { + continue + } + if _, ok := seen[value]; ok { + continue + } + seen[value] = struct{}{} + result = append(result, value) + } + sort.Slice(result, func(i, j int) bool { + return result[i] < result[j] + }) + return result +} diff --git a/internal/logic/public/iap/apple/attachTransactionLogic.go b/internal/logic/public/iap/apple/attachTransactionLogic.go index bfecea6..797766a 100644 --- a/internal/logic/public/iap/apple/attachTransactionLogic.go +++ b/internal/logic/public/iap/apple/attachTransactionLogic.go @@ -6,12 +6,9 @@ import ( "encoding/hex" "encoding/json" "fmt" - "os" - "strconv" "strings" "time" - tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api/v5" "github.com/google/uuid" "github.com/hibiken/asynq" commonLogic "github.com/perfect-panel/server/internal/logic/common" @@ -100,12 +97,10 @@ func (l *AttachTransactionLogic) Attach(req *types.AttachAppleTransactionRequest existingOrderNo, validateErr := l.validateOrderTradeNoBinding(orderInfo, tradeNoCandidates) if validateErr != nil { l.Errorw("Apple 交易绑定校验失败", logger.Field("orderNo", req.OrderNo), logger.Field("tradeNoCandidates", tradeNoCandidates), logger.Field("error", validateErr.Error())) - l.sendIAPAttachTraceToTelegram("REJECT_BINDING_ERROR", orderInfo, u.Id, orderInfo.SubscribeId, "", orderInfo.Quantity, txPayload.PurchaseDate, txPayload.TransactionId, txPayload.OriginalTransactionId, validateErr.Error()) return nil, errors.Wrapf(xerr.NewErrCode(xerr.ERROR), "apple transaction binding error") } if existingOrderNo != "" { l.Errorw("Apple 交易重复绑定,返回已绑定订单", logger.Field("orderNo", req.OrderNo), logger.Field("existingOrderNo", existingOrderNo), logger.Field("tradeNoCandidates", tradeNoCandidates)) - l.sendIAPAttachTraceToTelegram("REJECT_DUPLICATE_TRANSACTION", orderInfo, u.Id, orderInfo.SubscribeId, "", orderInfo.Quantity, txPayload.PurchaseDate, txPayload.TransactionId, txPayload.OriginalTransactionId, "already used by "+existingOrderNo) // 关闭当前 pending 订单,避免产生孤儿订单 if orderInfo.Status == orderStatusPending { if closeErr := l.svcCtx.DB.Model(&ordermodel.Order{}). @@ -282,7 +277,6 @@ func (l *AttachTransactionLogic) Attach(req *types.AttachAppleTransactionRequest l.Errorw("同订单幂等同步失败", logger.Field("orderNo", req.OrderNo), logger.Field("error", syncErr.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseUpdateError), "sync order status failed: %v", syncErr.Error()) } - l.sendIAPAttachTraceToTelegram("IDEMPOTENT_SAME_ORDER", orderInfo, u.Id, subscribeId, tier, duration, txPayload.PurchaseDate, txPayload.TransactionId, txPayload.OriginalTransactionId, "") l.Infow("事务已处理,同订单幂等返回", logger.Field("orderNo", req.OrderNo), logger.Field("expiresAt", expiresAt)) return &types.AttachAppleTransactionResponse{ExpiresAt: expiresAt, Tier: tier}, nil } @@ -296,7 +290,6 @@ func (l *AttachTransactionLogic) Attach(req *types.AttachAppleTransactionRequest l.Errorw("事务已处理但同步订单状态失败", logger.Field("orderNo", req.OrderNo), logger.Field("error", syncErr.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseUpdateError), "sync order status failed: %v", syncErr.Error()) } - l.sendIAPAttachTraceToTelegram("SUCCESS_NEW_PURCHASE_QUEUE", orderInfo, u.Id, subscribeId, tier, duration, txPayload.PurchaseDate, txPayload.TransactionId, txPayload.OriginalTransactionId, "") l.Infow("事务已处理,首购订单等待激活队列发放订阅", logger.Field("orderNo", req.OrderNo), logger.Field("expiresAt", exp.Unix())) return &types.AttachAppleTransactionResponse{ExpiresAt: exp.Unix(), Tier: tier}, nil } @@ -316,7 +309,6 @@ func (l *AttachTransactionLogic) Attach(req *types.AttachAppleTransactionRequest l.Errorw("同步订单状态失败(existSub)", logger.Field("orderNo", req.OrderNo), logger.Field("error", syncErr.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseUpdateError), "sync order status failed: %v", syncErr.Error()) } - l.sendIAPAttachTraceToTelegram("SUCCESS_RENEW_EXIST_SUB", orderInfo, u.Id, subscribeId, tier, duration, txPayload.PurchaseDate, txPayload.TransactionId, txPayload.OriginalTransactionId, "") l.Infow("事务已处理,刷新订阅到期时间", logger.Field("originalTransactionId", txPayload.OriginalTransactionId), logger.Field("tier", tier), logger.Field("expiresAt", newExpire.Unix())) return &types.AttachAppleTransactionResponse{ ExpiresAt: newExpire.Unix(), @@ -337,7 +329,6 @@ func (l *AttachTransactionLogic) Attach(req *types.AttachAppleTransactionRequest l.Errorw("同步订单状态失败(orderLinkedSub)", logger.Field("orderNo", req.OrderNo), logger.Field("error", syncErr.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseUpdateError), "sync order status failed: %v", syncErr.Error()) } - l.sendIAPAttachTraceToTelegram("SUCCESS_RENEW_ORDER_LINKED_SUB", orderInfo, u.Id, subscribeId, tier, duration, txPayload.PurchaseDate, txPayload.TransactionId, txPayload.OriginalTransactionId, "") l.Infow("事务已处理,刷新订单关联订阅到期时间", logger.Field("orderNo", req.OrderNo), logger.Field("userSubscribeId", orderLinkedSub.Id), logger.Field("expiresAt", newExpire.Unix())) return &types.AttachAppleTransactionResponse{ExpiresAt: newExpire.Unix(), Tier: tier}, nil } @@ -355,7 +346,6 @@ func (l *AttachTransactionLogic) Attach(req *types.AttachAppleTransactionRequest l.Errorw("同步订单状态失败(singleModeAnchorSub)", logger.Field("orderNo", req.OrderNo), logger.Field("error", syncErr.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseUpdateError), "sync order status failed: %v", syncErr.Error()) } - l.sendIAPAttachTraceToTelegram("SUCCESS_RENEW_SINGLE_MODE_ANCHOR", orderInfo, u.Id, subscribeId, tier, duration, txPayload.PurchaseDate, txPayload.TransactionId, txPayload.OriginalTransactionId, "") l.Infow("事务已处理,刷新单订阅锚点到期时间", logger.Field("userSubscribeId", singleModeAnchorSub.Id), logger.Field("expiresAt", newExpire.Unix())) return &types.AttachAppleTransactionResponse{ExpiresAt: newExpire.Unix(), Tier: tier}, nil } @@ -394,6 +384,7 @@ func (l *AttachTransactionLogic) Attach(req *types.AttachAppleTransactionRequest userSub := user.Subscribe{ UserId: entitlement.EffectiveUserID, SubscribeId: subscribeId, + OrderId: orderInfo.Id, StartTime: time.Now(), ExpireTime: exp, Traffic: 0, @@ -426,7 +417,6 @@ func (l *AttachTransactionLogic) Attach(req *types.AttachAppleTransactionRequest l.Errorw("绑定事务提交失败", logger.Field("error", err.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseInsertError), "insert error: %v", err.Error()) } - l.sendIAPAttachTraceToTelegram("SUCCESS_COMMIT", orderInfo, u.Id, subscribeId, tier, duration, txPayload.PurchaseDate, txPayload.TransactionId, txPayload.OriginalTransactionId, "") // 事务提交后立即清除订阅缓存,避免 App 查到旧数据(激活队列异步执行,存在竞态) if orderLinkedSub != nil { @@ -600,94 +590,3 @@ func containsString(candidates []string, target string) bool { } return false } - -func (l *AttachTransactionLogic) sendIAPAttachTraceToTelegram(status string, orderInfo *ordermodel.Order, userID int64, subscribeID int64, subscribeName string, quantity int64, purchaseAt time.Time, transactionID string, originalTransactionID string, note string) { - if l.svcCtx == nil { - return - } - orderNo := "" - if orderInfo != nil { - orderNo = orderInfo.OrderNo - } - if subscribeName == "" { - subscribeName = "-" - } - message := fmt.Sprintf( - "IAP Attach Log [%s]\n订单号: %s\n购买时间: %s\n购买人ID: %d\n订阅信息: %s (subscribe_id=%d, quantity=%d)\ntransaction: %s\noriginal_transaction: %s", - status, - orderNo, - purchaseAt.Format("2006-01-02 15:04:05"), - userID, - subscribeName, - subscribeID, - quantity, - transactionID, - originalTransactionID, - ) - if note != "" { - message += "\n备注: " + note - } - - overrideBotToken := strings.TrimSpace(os.Getenv("TG_BOT_TOKEN")) - overrideChatID := strings.TrimSpace(os.Getenv("TG_CHAT_ID")) - if overrideBotToken != "" && overrideChatID != "" { - if chatID, err := strconv.ParseInt(overrideChatID, 10, 64); err == nil && chatID != 0 { - bot := l.svcCtx.TelegramBot - if bot == nil || strings.TrimSpace(l.svcCtx.Config.Telegram.BotToken) != overrideBotToken { - overrideBot, newErr := tgbotapi.NewBotAPI(overrideBotToken) - if newErr == nil { - bot = overrideBot - } else { - l.Errorw("初始化 TG 覆盖 Bot 失败", logger.Field("error", newErr.Error())) - } - } - if bot != nil { - msg := tgbotapi.NewMessage(chatID, message) - if _, sendErr := bot.Send(msg); sendErr != nil { - l.Errorw("发送 IAP TG 覆盖通道消息失败", logger.Field("error", sendErr.Error())) - } - return - } - } - } - - if l.svcCtx.TelegramBot == nil || !l.svcCtx.Config.Telegram.EnableNotify { - return - } - - if groupChatID, err := strconv.ParseInt(strings.TrimSpace(l.svcCtx.Config.Telegram.GroupChatID), 10, 64); err == nil && groupChatID != 0 { - msg := tgbotapi.NewMessage(groupChatID, message) - if _, sendErr := l.svcCtx.TelegramBot.Send(msg); sendErr != nil { - l.Errorw("发送 IAP TG 群消息失败", logger.Field("error", sendErr.Error())) - } - return - } - - admins, err := l.svcCtx.UserModel.QueryAdminUsers(l.ctx) - if err != nil { - l.Errorw("查询管理员失败(IAP TG日志)", logger.Field("error", err.Error())) - return - } - for _, admin := range admins { - if telegramID, ok := findTelegramAuth(admin); ok { - msg := tgbotapi.NewMessage(telegramID, message) - if _, sendErr := l.svcCtx.TelegramBot.Send(msg); sendErr != nil { - l.Errorw("发送 IAP TG 管理员消息失败", logger.Field("error", sendErr.Error())) - } - } - } -} - -func findTelegramAuth(u *user.User) (int64, bool) { - if u == nil { - return 0, false - } - for _, item := range u.AuthMethods { - if item.AuthType == "telegram" { - if telegramID, err := strconv.ParseInt(item.AuthIdentifier, 10, 64); err == nil { - return telegramID, true - } - } - } - return 0, false -} diff --git a/internal/logic/public/order/getDiscount.go b/internal/logic/public/order/getDiscount.go index ee7dd4e..db107f3 100644 --- a/internal/logic/public/order/getDiscount.go +++ b/internal/logic/public/order/getDiscount.go @@ -16,13 +16,23 @@ func getDiscount(discounts []types.SubscribeDiscount, inputMonths int64, isNewUs } // isNewUserOnlyForQuantity checks whether the matched discount tier has new_user_only enabled. +// Returns true only when all matching tiers for the given quantity require new-user status +// (i.e. no fallback tier with new_user_only=false exists). When both a new-user-only tier +// and a general tier exist for the same quantity, non-new-users can still purchase via the +// general tier, so this returns false. func isNewUserOnlyForQuantity(discounts []types.SubscribeDiscount, inputQuantity int64) bool { - for _, discount := range discounts { - if inputQuantity == discount.Quantity { - return discount.NewUserOnly + hasNewUserOnly := false + hasFallback := false + for _, d := range discounts { + if d.Quantity != inputQuantity { + continue + } + if d.NewUserOnly { + hasNewUserOnly = true + } else { + hasFallback = true } } - - return false + return hasNewUserOnly && !hasFallback } diff --git a/internal/logic/public/order/newUserDiscountEligibility.go b/internal/logic/public/order/newUserDiscountEligibility.go new file mode 100644 index 0000000..2d08da2 --- /dev/null +++ b/internal/logic/public/order/newUserDiscountEligibility.go @@ -0,0 +1,63 @@ +package order + +import ( + "context" + "encoding/json" + "time" + + commonLogic "github.com/perfect-panel/server/internal/logic/common" + "github.com/perfect-panel/server/internal/types" + "gorm.io/gorm" +) + +type newUserDiscountEligibility struct { + Eligibility *commonLogic.NewUserEligibilityContext + Discounts []types.SubscribeDiscount + NewUserOnly bool + WithinWindow bool + EligibleForDiscount bool +} + +func resolveNewUserDiscountEligibility( + ctx context.Context, + db *gorm.DB, + currentUserID int64, + subscribeID int64, + quantity int64, + discountJSON string, +) (*newUserDiscountEligibility, error) { + state := &newUserDiscountEligibility{} + if discountJSON == "" { + return state, nil + } + + _ = json.Unmarshal([]byte(discountJSON), &state.Discounts) + state.NewUserOnly = isNewUserOnlyForQuantity(state.Discounts, quantity) + + eligibility, err := commonLogic.ResolveNewUserEligibility(ctx, db, currentUserID) + if err != nil { + return nil, err + } + state.Eligibility = eligibility + state.WithinWindow = eligibility.IsNewUserAt(time.Now()) + state.EligibleForDiscount = state.WithinWindow + + if state.EligibleForDiscount && state.NewUserOnly { + historyCount, countErr := commonLogic.CountScopedSubscribePurchaseOrders( + ctx, + db, + eligibility.ScopeUserIDs, + subscribeID, + []int64{2, 5}, + "", + ) + if countErr != nil { + return nil, countErr + } + if historyCount >= 1 { + state.EligibleForDiscount = false + } + } + + return state, nil +} diff --git a/internal/logic/public/order/preCreateOrderLogic.go b/internal/logic/public/order/preCreateOrderLogic.go index 65036da..fc07fb0 100644 --- a/internal/logic/public/order/preCreateOrderLogic.go +++ b/internal/logic/public/order/preCreateOrderLogic.go @@ -2,9 +2,7 @@ package order import ( "context" - "encoding/json" "math" - "time" commonLogic "github.com/perfect-panel/server/internal/logic/common" "github.com/perfect-panel/server/internal/model/order" @@ -105,35 +103,21 @@ func (l *PreCreateOrderLogic) PreCreateOrder(req *types.PurchaseOrderRequest) (r } } - // check new user only restriction (tier-level only) - var newUserOnly bool - if !isSingleModeRenewal && sub.Discount != "" { - var dis []types.SubscribeDiscount - _ = json.Unmarshal([]byte(sub.Discount), &dis) - newUserOnly = isNewUserOnlyForQuantity(dis, req.Quantity) + newUserDiscount, err := resolveNewUserDiscountEligibility(l.ctx, l.svcCtx.DB, u.Id, targetSubscribeID, req.Quantity, sub.Discount) + if err != nil { + l.Errorw("[PreCreateOrder] Database query error resolving new user eligibility", + logger.Field("error", err.Error()), + logger.Field("user_id", u.Id), + ) + return nil, err } - if newUserOnly { - if time.Since(u.CreatedAt) > 24*time.Hour { - return nil, errors.Wrapf(xerr.NewErrCode(xerr.SubscribeNewUserOnly), "not a new user") - } - var historyCount int64 - if e := l.svcCtx.DB.Model(&order.Order{}). - Where("user_id = ? AND subscribe_id = ? AND type = 1 AND status IN ?", - u.Id, targetSubscribeID, []int64{2, 5}). - Count(&historyCount).Error; e != nil { - return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "check new user purchase history error: %v", e.Error()) - } - if historyCount >= 1 { - return nil, errors.Wrapf(xerr.NewErrCode(xerr.SubscribeNewUserOnly), "already purchased new user plan") - } + if !isSingleModeRenewal && newUserDiscount.NewUserOnly && !newUserDiscount.WithinWindow { + return nil, errors.Wrapf(xerr.NewErrCode(xerr.SubscribeNewUserOnly), "not a new user") } var discount float64 = 1 - isNewUserForDiscount := time.Since(u.CreatedAt) <= 24*time.Hour - if sub.Discount != "" { - var dis []types.SubscribeDiscount - _ = json.Unmarshal([]byte(sub.Discount), &dis) - discount = getDiscount(dis, req.Quantity, isNewUserForDiscount) + if len(newUserDiscount.Discounts) > 0 { + discount = getDiscount(newUserDiscount.Discounts, req.Quantity, newUserDiscount.EligibleForDiscount) } price := sub.UnitPrice * req.Quantity diff --git a/internal/logic/public/order/purchaseLogic.go b/internal/logic/public/order/purchaseLogic.go index 0f21651..6224e29 100644 --- a/internal/logic/public/order/purchaseLogic.go +++ b/internal/logic/public/order/purchaseLogic.go @@ -111,6 +111,31 @@ func (l *PurchaseLogic) Purchase(req *types.PurchaseOrderRequest) (resp *types.P } } + // 单订阅模式下,若已有同套餐 pending 订单,关闭旧单后继续创建新单(确保新单参数生效) + if l.svcCtx.Config.Subscribe.SingleModel && orderType == 1 { + var existPending order.Order + if e := l.svcCtx.DB.WithContext(l.ctx). + Model(&order.Order{}). + Where("user_id = ? AND subscribe_id = ? AND status = 1", u.Id, targetSubscribeID). + Order("id DESC"). + First(&existPending).Error; e == nil && existPending.Id > 0 { + l.Infow("[Purchase] single mode pending order exists, closing old order and creating new one", + logger.Field("user_id", u.Id), + logger.Field("old_order_no", existPending.OrderNo), + logger.Field("subscribe_id", targetSubscribeID), + ) + if closeErr := NewCloseOrderLogic(l.ctx, l.svcCtx).CloseOrder(&types.CloseOrderRequest{ + OrderNo: existPending.OrderNo, + }); closeErr != nil { + l.Errorw("[Purchase] close old pending order failed", + logger.Field("error", closeErr.Error()), + logger.Field("old_order_no", existPending.OrderNo), + ) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseUpdateError), "close old pending order error: %v", closeErr.Error()) + } + } + } + // find subscribe plan sub, err := l.svcCtx.SubscribeModel.FindOne(l.ctx, targetSubscribeID) @@ -128,12 +153,18 @@ func (l *PurchaseLogic) Purchase(req *types.PurchaseOrderRequest) (resp *types.P return nil, errors.Wrapf(xerr.NewErrCode(xerr.SubscribeOutOfStock), "subscribe out of stock") } + newUserDiscount, err := resolveNewUserDiscountEligibility(l.ctx, l.svcCtx.DB, u.Id, targetSubscribeID, req.Quantity, sub.Discount) + if err != nil { + l.Errorw("[Purchase] Database query error resolving new user eligibility", + logger.Field("error", err.Error()), + logger.Field("user_id", u.Id), + ) + return nil, err + } + var discount float64 = 1 - isNewUserForDiscount := time.Since(u.CreatedAt) <= 24*time.Hour - if sub.Discount != "" { - var dis []types.SubscribeDiscount - _ = json.Unmarshal([]byte(sub.Discount), &dis) - discount = getDiscount(dis, req.Quantity, isNewUserForDiscount) + if len(newUserDiscount.Discounts) > 0 { + discount = getDiscount(newUserDiscount.Discounts, req.Quantity, newUserDiscount.EligibleForDiscount) } price := sub.UnitPrice * req.Quantity // discount amount @@ -216,36 +247,34 @@ func (l *PurchaseLogic) Purchase(req *types.PurchaseOrderRequest) (resp *types.P } } // query user is new purchase or renewal - isNew := false - if orderType == 1 { - isNew, err = l.svcCtx.OrderModel.IsUserEligibleForNewOrder(l.ctx, u.Id) - if err != nil { - l.Errorw("[Purchase] Database query error", logger.Field("error", err.Error()), logger.Field("user_id", u.Id)) - return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find user order error: %v", err.Error()) - } + // 注意:SingleModel 下 orderType 会被路由成 2(续费),但仍需正确判断是否首购以发佣金 + isNew, err := l.svcCtx.OrderModel.IsUserEligibleForNewOrder(l.ctx, u.Id) + if err != nil { + l.Errorw("[Purchase] Database query error", logger.Field("error", err.Error()), logger.Field("user_id", u.Id)) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find user order error: %v", err.Error()) } // create order orderInfo := &order.Order{ UserId: u.Id, SubscriptionUserId: entitlement.EffectiveUserID, ParentId: parentOrderID, - OrderNo: tool.GenerateTradeNo(), - Type: orderType, - Quantity: req.Quantity, - Price: price, - Amount: amount, - Discount: discountAmount, - GiftAmount: deductionAmount, - Coupon: req.Coupon, - CouponDiscount: coupon, - PaymentId: payment.Id, - Method: canonicalOrderMethod(payment.Platform), - FeeAmount: feeAmount, - Status: 1, - IsNew: isNew, - SubscribeId: targetSubscribeID, - SubscribeToken: subscribeToken, - AppAccountToken: uuid.New().String(), + OrderNo: tool.GenerateTradeNo(), + Type: orderType, + Quantity: req.Quantity, + Price: price, + Amount: amount, + Discount: discountAmount, + GiftAmount: deductionAmount, + Coupon: req.Coupon, + CouponDiscount: coupon, + PaymentId: payment.Id, + Method: canonicalOrderMethod(payment.Platform), + FeeAmount: feeAmount, + Status: 1, + IsNew: isNew, + SubscribeId: targetSubscribeID, + SubscribeToken: subscribeToken, + AppAccountToken: uuid.New().String(), } if isSingleModeRenewal { l.Infow("[Purchase] single mode purchase order created as renewal", @@ -277,28 +306,21 @@ func (l *PurchaseLogic) Purchase(req *types.PurchaseOrderRequest) (resp *types.P } } - // check new user only restriction inside transaction to prevent race condition (tier-level only) - if orderInfo.Type == 1 { - var txNewUserOnly bool - if sub.Discount != "" { - var dis []types.SubscribeDiscount - _ = json.Unmarshal([]byte(sub.Discount), &dis) - txNewUserOnly = isNewUserOnlyForQuantity(dis, orderInfo.Quantity) + // Re-check new-user-only restriction inside the transaction to prevent race conditions. + if orderInfo.Type == 1 && newUserDiscount.NewUserOnly { + txNewUserDiscount, txErr := resolveNewUserDiscountEligibility( + l.ctx, + db, + u.Id, + targetSubscribeID, + orderInfo.Quantity, + sub.Discount, + ) + if txErr != nil { + return txErr } - if txNewUserOnly { - if time.Since(u.CreatedAt) > 24*time.Hour { - return errors.Wrapf(xerr.NewErrCode(xerr.SubscribeNewUserOnly), "not a new user") - } - var historyCount int64 - if e := db.Model(&order.Order{}). - Where("user_id = ? AND subscribe_id = ? AND type = 1 AND status IN ?", - u.Id, targetSubscribeID, []int{2, 5}). - Count(&historyCount).Error; e != nil { - return errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "check new user purchase history error: %v", e.Error()) - } - if historyCount >= 1 { - return errors.Wrapf(xerr.NewErrCode(xerr.SubscribeNewUserOnly), "already purchased new user plan") - } + if !txNewUserDiscount.WithinWindow { + return errors.Wrapf(xerr.NewErrCode(xerr.SubscribeNewUserOnly), "not a new user") } } diff --git a/internal/logic/public/subscribe/querySubscribeListLogic.go b/internal/logic/public/subscribe/querySubscribeListLogic.go index 2208559..f2c80cf 100644 --- a/internal/logic/public/subscribe/querySubscribeListLogic.go +++ b/internal/logic/public/subscribe/querySubscribeListLogic.go @@ -7,6 +7,7 @@ import ( "github.com/perfect-panel/server/internal/model/subscribe" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/constant" "github.com/perfect-panel/server/pkg/logger" "github.com/perfect-panel/server/pkg/tool" "github.com/perfect-panel/server/pkg/xerr" @@ -57,6 +58,18 @@ func (l *QuerySubscribeListLogic) QuerySubscribeList(req *types.QuerySubscribeLi } list[i] = sub } + + // 老版本客户端(无 X-App-Id)去掉每个套餐 discount 的最后一个 + hasAppId, _ := l.ctx.Value(constant.CtxKeyHasAppId).(bool) + if !hasAppId { + for i := range list { + if len(list[i].Discount) > 0 { + list[i].Discount = list[i].Discount[:len(list[i].Discount)-1] + } + } + } + resp.List = list + resp.Total = int64(len(list)) return } diff --git a/internal/logic/public/subscribe/queryUserSubscribeNodeListLogic.go b/internal/logic/public/subscribe/queryUserSubscribeNodeListLogic.go index 2530ae7..87773fe 100644 --- a/internal/logic/public/subscribe/queryUserSubscribeNodeListLogic.go +++ b/internal/logic/public/subscribe/queryUserSubscribeNodeListLogic.go @@ -6,6 +6,7 @@ import ( "time" commonLogic "github.com/perfect-panel/server/internal/logic/common" + "github.com/perfect-panel/server/internal/model/group" "github.com/perfect-panel/server/internal/model/node" "github.com/perfect-panel/server/internal/model/user" "github.com/perfect-panel/server/internal/svc" @@ -104,29 +105,39 @@ func fillUserSubscribeInfoEntitlementFields(sub *types.UserSubscribeInfo, entitl func (l *QueryUserSubscribeNodeListLogic) getServers(userSub *user.Subscribe) (userSubscribeNodes []*types.UserSubscribeNodeInfo, err error) { userSubscribeNodes = make([]*types.UserSubscribeNodeInfo, 0) if l.isSubscriptionExpired(userSub) { - return l.createExpiredServers(), nil + return l.createExpiredServers(userSub), nil } - subDetails, err := l.svcCtx.SubscribeModel.FindOne(l.ctx, userSub.SubscribeId) + // Check if group management is enabled + var groupEnabled string + err = l.svcCtx.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "enabled"). + Select("value").Scan(&groupEnabled).Error + if err != nil { - l.Errorw("[Generate Subscribe]find subscribe details error: %v", logger.Field("error", err.Error())) - return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find subscribe details error: %v", err.Error()) + l.Debugw("[GetServers] Failed to check group enabled", logger.Field("error", err.Error())) + // Continue with tag-based filtering } - nodeIds := tool.StringToInt64Slice(subDetails.Nodes) - tags := normalizeSubscribeNodeTags(subDetails.NodeTags) + isGroupEnabled := (groupEnabled == "true" || groupEnabled == "1") - l.Debugf("[Generate Subscribe]nodes: %v, NodeTags: %v", nodeIds, tags) - - enable := true - - _, nodes, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{ - Page: 1, - Size: 1000, - NodeId: nodeIds, - Tag: tags, - Enabled: &enable, // Only get enabled nodes - }) + var nodes []*node.Node + if isGroupEnabled { + // Group mode: use group_ids to filter nodes + nodes, err = l.getNodesByGroup(userSub) + if err != nil { + l.Errorw("[GetServers] Failed to get nodes by group", logger.Field("error", err.Error())) + return nil, err + } + } else { + // Tag mode: use node_ids and tags to filter nodes + nodes, err = l.getNodesByTag(userSub) + if err != nil { + l.Errorw("[GetServers] Failed to get nodes by tag", logger.Field("error", err.Error())) + return nil, err + } + } + // Process nodes and create response if len(nodes) > 0 { var serverMapIds = make(map[int64]*node.Server) for _, n := range nodes { @@ -174,21 +185,241 @@ func (l *QueryUserSubscribeNodeListLogic) getServers(userSub *user.Subscribe) (u } l.Debugf("[Query Subscribe]found servers: %v", len(nodes)) - - if err != nil { - l.Errorw("[Generate Subscribe]find server details error: %v", logger.Field("error", err.Error())) - return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find server details error: %v", err.Error()) - } - logger.Debugf("[Generate Subscribe]found servers: %v", len(nodes)) return userSubscribeNodes, nil } +// getNodesByGroup gets nodes based on user subscription node_group_id with priority fallback +func (l *QueryUserSubscribeNodeListLogic) getNodesByGroup(userSub *user.Subscribe) ([]*node.Node, error) { + // 按优先级获取 node_group_id:user_subscribe.node_group_id > subscribe.node_group_id > subscribe.node_group_ids[0] + nodeGroupId := int64(0) + source := "" + var directNodeIds []int64 + + // 优先级1: user_subscribe.node_group_id + if userSub.NodeGroupId != 0 { + nodeGroupId = userSub.NodeGroupId + source = "user_subscribe.node_group_id" + } + + // 获取 subscribe 详情(用于获取 node_group_id 和直接分配的节点) + subDetails, err := l.svcCtx.SubscribeModel.FindOne(l.ctx, userSub.SubscribeId) + if err != nil { + l.Errorw("[GetNodesByGroup] find subscribe details error", logger.Field("error", err.Error())) + return nil, err + } + + // 获取直接分配的节点ID + directNodeIds = tool.StringToInt64Slice(subDetails.Nodes) + l.Debugf("[GetNodesByGroup] direct nodes: %v", directNodeIds) + + // 如果 user_subscribe 没有 node_group_id,从 subscribe 获取 + if nodeGroupId == 0 { + // 优先级2: subscribe.node_group_id + if subDetails.NodeGroupId != 0 { + nodeGroupId = subDetails.NodeGroupId + source = "subscribe.node_group_id" + } else if len(subDetails.NodeGroupIds) > 0 { + // 优先级3: subscribe.node_group_ids[0] + nodeGroupId = subDetails.NodeGroupIds[0] + source = "subscribe.node_group_ids[0]" + } + } + + l.Debugf("[GetNodesByGroup] Using %s: %v", source, nodeGroupId) + + // 查询所有启用的节点 + enable := true + _, allNodes, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{ + Page: 0, + Size: 10000, + Enabled: &enable, + }) + if err != nil { + l.Errorw("[GetNodesByGroup] FilterNodeList error", logger.Field("error", err.Error())) + return nil, err + } + + // 过滤节点 + var resultNodes []*node.Node + nodeIdMap := make(map[int64]bool) + + for _, n := range allNodes { + // 1. 公共节点(node_group_ids 为空),所有人可见 + if len(n.NodeGroupIds) == 0 { + if !nodeIdMap[n.Id] { + resultNodes = append(resultNodes, n) + nodeIdMap[n.Id] = true + } + continue + } + + // 2. 如果有节点组,检查节点是否属于该节点组 + if nodeGroupId != 0 { + for _, gid := range n.NodeGroupIds { + if gid == nodeGroupId { + if !nodeIdMap[n.Id] { + resultNodes = append(resultNodes, n) + nodeIdMap[n.Id] = true + } + break + } + } + } + } + + // 3. 添加直接分配的节点 + if len(directNodeIds) > 0 { + for _, n := range allNodes { + if tool.Contains(directNodeIds, n.Id) && !nodeIdMap[n.Id] { + resultNodes = append(resultNodes, n) + nodeIdMap[n.Id] = true + } + } + } + + l.Debugf("[GetNodesByGroup] Found %d nodes (group=%d, direct=%d)", len(resultNodes), nodeGroupId, len(directNodeIds)) + return resultNodes, nil +} + +// getNodesByTag gets nodes based on subscribe node_ids and tags +func (l *QueryUserSubscribeNodeListLogic) getNodesByTag(userSub *user.Subscribe) ([]*node.Node, error) { + subDetails, err := l.svcCtx.SubscribeModel.FindOne(l.ctx, userSub.SubscribeId) + if err != nil { + l.Errorw("[Generate Subscribe]find subscribe details error: %v", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find subscribe details error: %v", err.Error()) + } + + nodeIds := tool.StringToInt64Slice(subDetails.Nodes) + tags := strings.Split(subDetails.NodeTags, ",") + newTags := make([]string, 0) + for _, t := range tags { + if t != "" { + newTags = append(newTags, t) + } + } + tags = newTags + l.Debugf("[Generate Subscribe]nodes: %v, NodeTags: %v", nodeIds, tags) + + enable := true + _, nodes, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{ + Page: 0, + Size: 1000, + NodeId: nodeIds, + Tag: tags, + Enabled: &enable, // Only get enabled nodes + }) + + return nodes, err +} + +// getAllNodes returns all enabled nodes +func (l *QueryUserSubscribeNodeListLogic) getAllNodes() ([]*node.Node, error) { + enable := true + _, nodes, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{ + Page: 0, + Size: 1000, + Enabled: &enable, + }) + + return nodes, err +} + func (l *QueryUserSubscribeNodeListLogic) isSubscriptionExpired(userSub *user.Subscribe) bool { return userSub.ExpireTime.Unix() < time.Now().Unix() && userSub.ExpireTime.Unix() != 0 } -func (l *QueryUserSubscribeNodeListLogic) createExpiredServers() []*types.UserSubscribeNodeInfo { - return nil +func (l *QueryUserSubscribeNodeListLogic) createExpiredServers(userSub *user.Subscribe) []*types.UserSubscribeNodeInfo { + // 1. 查询过期节点组 + var expiredGroup group.NodeGroup + err := l.svcCtx.DB.Where("is_expired_group = ?", true).First(&expiredGroup).Error + if err != nil { + l.Debugw("no expired node group configured", logger.Field("error", err)) + return nil + } + + // 2. 检查用户是否在过期天数限制内 + expiredDays := int(time.Since(userSub.ExpireTime).Hours() / 24) + if expiredDays > expiredGroup.ExpiredDaysLimit { + l.Debugf("user subscription expired %d days, exceeds limit %d days", expiredDays, expiredGroup.ExpiredDaysLimit) + return nil + } + + // 3. 检查用户已使用流量是否超过限制(仅使用过期期间的流量) + if expiredGroup.MaxTrafficGBExpired != nil && *expiredGroup.MaxTrafficGBExpired > 0 { + usedTrafficGB := (userSub.ExpiredDownload + userSub.ExpiredUpload) / (1024 * 1024 * 1024) + if usedTrafficGB >= *expiredGroup.MaxTrafficGBExpired { + l.Debugf("user expired traffic %d GB, exceeds expired group limit %d GB", usedTrafficGB, *expiredGroup.MaxTrafficGBExpired) + return nil + } + } + + // 4. 查询过期节点组的节点 + enable := true + _, nodes, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{ + Page: 0, + Size: 1000, + NodeGroupIds: []int64{expiredGroup.Id}, + Enabled: &enable, + }) + if err != nil { + l.Errorw("failed to query expired group nodes", logger.Field("error", err)) + return nil + } + + if len(nodes) == 0 { + l.Debug("no nodes found in expired group") + return nil + } + + // 5. 查询服务器信息 + var serverMapIds = make(map[int64]*node.Server) + for _, n := range nodes { + serverMapIds[n.ServerId] = nil + } + var serverIds []int64 + for k := range serverMapIds { + serverIds = append(serverIds, k) + } + + servers, err := l.svcCtx.NodeModel.QueryServerList(l.ctx, serverIds) + if err != nil { + l.Errorw("failed to query servers", logger.Field("error", err)) + return nil + } + + for _, s := range servers { + serverMapIds[s.Id] = s + } + + // 6. 构建节点列表 + userSubscribeNodes := make([]*types.UserSubscribeNodeInfo, 0, len(nodes)) + for _, n := range nodes { + server := serverMapIds[n.ServerId] + if server == nil { + continue + } + userSubscribeNode := &types.UserSubscribeNodeInfo{ + Id: n.Id, + Name: n.Name, + Uuid: userSub.UUID, + Protocol: n.Protocol, + Protocols: server.Protocols, + Port: n.Port, + Address: n.Address, + Tags: strings.Split(n.Tags, ","), + Country: server.Country, + City: server.City, + Latitude: server.Latitude, + Longitude: server.Longitude, + LongitudeCenter: server.LongitudeCenter, + LatitudeCenter: server.LatitudeCenter, + CreatedAt: n.CreatedAt.Unix(), + } + userSubscribeNodes = append(userSubscribeNodes, userSubscribeNode) + } + + l.Infof("returned %d nodes from expired group for user %d (expired %d days)", len(userSubscribeNodes), userSub.UserId, expiredDays) + return userSubscribeNodes } func (l *QueryUserSubscribeNodeListLogic) getFirstHostLine() string { diff --git a/internal/logic/public/user/getUserTrafficStatsLogic.go b/internal/logic/public/user/getUserTrafficStatsLogic.go new file mode 100644 index 0000000..e46cb4b --- /dev/null +++ b/internal/logic/public/user/getUserTrafficStatsLogic.go @@ -0,0 +1,138 @@ +package user + +import ( + "context" + "strconv" + "time" + + "gorm.io/gorm" + + "github.com/perfect-panel/server/internal/model/user" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/constant" + "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/xerr" + "github.com/pkg/errors" +) + +type GetUserTrafficStatsLogic struct { + logger.Logger + ctx context.Context + svcCtx *svc.ServiceContext +} + +// Get User Traffic Statistics +func NewGetUserTrafficStatsLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUserTrafficStatsLogic { + return &GetUserTrafficStatsLogic{ + Logger: logger.WithContext(ctx), + ctx: ctx, + svcCtx: svcCtx, + } +} + +func (l *GetUserTrafficStatsLogic) GetUserTrafficStats(req *types.GetUserTrafficStatsRequest) (resp *types.GetUserTrafficStatsResponse, err error) { + // 获取当前用户 + u, ok := l.ctx.Value(constant.CtxKeyUser).(*user.User) + if !ok { + logger.Error("current user is not found in context") + return nil, errors.Wrapf(xerr.NewErrCode(xerr.InvalidAccess), "Invalid Access") + } + + // 将字符串 ID 转换为 int64 + userSubscribeId, err := strconv.ParseInt(req.UserSubscribeId, 10, 64) + if err != nil { + l.Errorw("[GetUserTrafficStats] Invalid User Subscribe ID:", + logger.Field("user_subscribe_id", req.UserSubscribeId), + logger.Field("err", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.InvalidAccess), "Invalid subscription ID") + } + + // 验证订阅归属权 - 直接查询 user_subscribe 表 + var userSubscribe struct { + Id int64 + UserId int64 + } + err = l.svcCtx.DB.WithContext(l.ctx). + Table("user_subscribe"). + Select("id, user_id"). + Where("id = ?", userSubscribeId). + First(&userSubscribe).Error + + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + l.Errorw("[GetUserTrafficStats] User Subscribe Not Found:", + logger.Field("user_subscribe_id", userSubscribeId), + logger.Field("user_id", u.Id)) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.InvalidAccess), "Subscription not found") + } + l.Errorw("[GetUserTrafficStats] Query User Subscribe Error:", logger.Field("err", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "Query User Subscribe Error") + } + + if userSubscribe.UserId != u.Id { + l.Errorw("[GetUserTrafficStats] User Subscribe Access Denied:", + logger.Field("user_subscribe_id", userSubscribeId), + logger.Field("subscribe_user_id", userSubscribe.UserId), + logger.Field("current_user_id", u.Id)) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.InvalidAccess), "Invalid Access") + } + + // 计算时间范围 + now := time.Now() + startDate := now.AddDate(0, 0, -req.Days+1) + startDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.Local) + + // 初始化响应 + resp = &types.GetUserTrafficStatsResponse{ + List: make([]types.DailyTrafficStats, 0, req.Days), + TotalUpload: 0, + TotalDownload: 0, + TotalTraffic: 0, + } + + // 按天查询流量数据 + for i := 0; i < req.Days; i++ { + currentDate := startDate.AddDate(0, 0, i) + dayStart := time.Date(currentDate.Year(), currentDate.Month(), currentDate.Day(), 0, 0, 0, 0, time.Local) + dayEnd := dayStart.Add(24 * time.Hour).Add(-time.Nanosecond) + + // 查询当天流量 + var dailyTraffic struct { + Upload int64 + Download int64 + } + + // 直接使用 model 的查询方法 + err := l.svcCtx.DB.WithContext(l.ctx). + Table("traffic_log"). + Select("COALESCE(SUM(upload), 0) as upload, COALESCE(SUM(download), 0) as download"). + Where("user_id = ? AND subscribe_id = ? AND timestamp BETWEEN ? AND ?", + u.Id, userSubscribeId, dayStart, dayEnd). + Scan(&dailyTraffic).Error + + if err != nil { + l.Errorw("[GetUserTrafficStats] Query Daily Traffic Error:", + logger.Field("date", currentDate.Format("2006-01-02")), + logger.Field("err", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "Query Traffic Error") + } + + // 添加到结果列表 + total := dailyTraffic.Upload + dailyTraffic.Download + resp.List = append(resp.List, types.DailyTrafficStats{ + Date: currentDate.Format("2006-01-02"), + Upload: dailyTraffic.Upload, + Download: dailyTraffic.Download, + Total: total, + }) + + // 累加总计 + resp.TotalUpload += dailyTraffic.Upload + resp.TotalDownload += dailyTraffic.Download + } + + resp.TotalTraffic = resp.TotalUpload + resp.TotalDownload + + return resp, nil +} diff --git a/internal/logic/public/user/queryUserSubscribeLogic.go b/internal/logic/public/user/queryUserSubscribeLogic.go index fbbcaeb..fff0edb 100644 --- a/internal/logic/public/user/queryUserSubscribeLogic.go +++ b/internal/logic/public/user/queryUserSubscribeLogic.go @@ -3,6 +3,7 @@ package user import ( "context" "encoding/json" + "strconv" "time" commonLogic "github.com/perfect-panel/server/internal/logic/common" @@ -58,6 +59,9 @@ func (l *QueryUserSubscribeLogic) QueryUserSubscribe() (resp *types.QueryUserSub var sub types.UserSubscribe tool.DeepCopy(&sub, item) + // 填充 IdStr 字段,避免前端精度丢失 + sub.IdStr = strconv.FormatInt(item.Id, 10) + // 解析Discount字段 避免在续订时只能续订一个月 if item.Subscribe != nil && item.Subscribe.Discount != "" { var discounts []types.SubscribeDiscount diff --git a/internal/logic/server/getServerUserListLogic.go b/internal/logic/server/getServerUserListLogic.go index 70ea51f..817ea70 100644 --- a/internal/logic/server/getServerUserListLogic.go +++ b/internal/logic/server/getServerUserListLogic.go @@ -4,14 +4,18 @@ import ( "encoding/json" "fmt" "strings" + "time" "github.com/gin-gonic/gin" + "github.com/perfect-panel/server/internal/model/group" "github.com/perfect-panel/server/internal/model/node" "github.com/perfect-panel/server/internal/model/subscribe" + "github.com/perfect-panel/server/internal/model/user" "github.com/perfect-panel/server/internal/svc" "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/speedlimit" "github.com/perfect-panel/server/pkg/tool" "github.com/perfect-panel/server/pkg/uuidx" "github.com/perfect-panel/server/pkg/xerr" @@ -55,6 +59,7 @@ func (l *GetServerUserListLogic) GetServerUserList(req *types.GetServerUserListR return nil, err } + // 查询该服务器上该协议的所有节点(包括属于节点组的节点) _, nodes, err := l.svcCtx.NodeModel.FilterNodeList(l.ctx, &node.FilterNodeParams{ Page: 1, Size: 1000, @@ -65,25 +70,74 @@ func (l *GetServerUserListLogic) GetServerUserList(req *types.GetServerUserListR l.Errorw("FilterNodeList error", logger.Field("error", err.Error())) return nil, err } - var nodeTag []string + + if len(nodes) == 0 { + return &types.GetServerUserListResponse{ + Users: []types.ServerUser{ + { + Id: 1, + UUID: uuidx.NewUUID().String(), + }, + }, + }, nil + } + + // 收集所有唯一的节点组 ID + nodeGroupMap := make(map[int64]bool) // nodeGroupId -> true var nodeIds []int64 + var nodeTags []string + for _, n := range nodes { nodeIds = append(nodeIds, n.Id) if n.Tags != "" { - nodeTag = append(nodeTag, strings.Split(n.Tags, ",")...) + nodeTags = append(nodeTags, strings.Split(n.Tags, ",")...) + } + // 收集节点组 ID + if len(n.NodeGroupIds) > 0 { + for _, gid := range n.NodeGroupIds { + if gid > 0 { + nodeGroupMap[gid] = true + } + } } } - _, subs, err := l.svcCtx.SubscribeModel.FilterList(l.ctx, &subscribe.FilterParams{ - Page: 1, - Size: 9999, - Node: nodeIds, - Tags: nodeTag, - }) - if err != nil { - l.Errorw("QuerySubscribeIdsByServerIdAndServerGroupId error", logger.Field("error", err.Error())) - return nil, err + // 获取所有节点组 ID + nodeGroupIds := make([]int64, 0, len(nodeGroupMap)) + for gid := range nodeGroupMap { + nodeGroupIds = append(nodeGroupIds, gid) } + + // 查询订阅: + // 1. 如果有节点组,查询匹配这些节点组的订阅 + // 2. 如果没有节点组,查询使用节点 ID 或 tags 的订阅 + var subs []*subscribe.Subscribe + if len(nodeGroupIds) > 0 { + // 节点组模式:查询 node_group_id 或 node_group_ids 匹配的订阅 + _, subs, err = l.svcCtx.SubscribeModel.FilterListByNodeGroups(l.ctx, &subscribe.FilterByNodeGroupsParams{ + Page: 1, + Size: 9999, + NodeGroupIds: nodeGroupIds, + }) + if err != nil { + l.Errorw("FilterListByNodeGroups error", logger.Field("error", err.Error())) + return nil, err + } + } else { + // 传统模式:查询匹配节点 ID 或 tags 的订阅 + nodeTags = tool.RemoveDuplicateElements(nodeTags...) + _, subs, err = l.svcCtx.SubscribeModel.FilterList(l.ctx, &subscribe.FilterParams{ + Page: 1, + Size: 9999, + Node: nodeIds, + Tags: nodeTags, + }) + if err != nil { + l.Errorw("FilterList error", logger.Field("error", err.Error())) + return nil, err + } + } + if len(subs) == 0 { return &types.GetServerUserListResponse{ Users: []types.ServerUser{ @@ -101,14 +155,33 @@ func (l *GetServerUserListLogic) GetServerUserList(req *types.GetServerUserListR return nil, err } for _, datum := range data { + if !l.shouldIncludeServerUser(datum, nodeGroupIds) { + continue + } + + // 计算该用户的实际限速值(考虑按量限速规则) + effectiveSpeedLimit := l.calculateEffectiveSpeedLimit(sub, datum) + users = append(users, types.ServerUser{ Id: datum.Id, UUID: datum.UUID, - SpeedLimit: sub.SpeedLimit, + SpeedLimit: effectiveSpeedLimit, DeviceLimit: sub.DeviceLimit, }) } } + + // 处理过期订阅用户:如果当前节点属于过期节点组,添加符合条件的过期用户 + if len(nodeGroupIds) > 0 { + expiredUsers, expiredSpeedLimit := l.getExpiredUsers(nodeGroupIds) + for i := range expiredUsers { + if expiredSpeedLimit > 0 { + expiredUsers[i].SpeedLimit = expiredSpeedLimit + } + } + users = append(users, expiredUsers...) + } + if len(users) == 0 { users = append(users, types.ServerUser{ Id: 1, @@ -131,3 +204,97 @@ func (l *GetServerUserListLogic) GetServerUserList(req *types.GetServerUserListR } return resp, nil } + +func (l *GetServerUserListLogic) shouldIncludeServerUser(userSub *user.Subscribe, serverNodeGroupIds []int64) bool { + if userSub == nil { + return false + } + + if userSub.ExpireTime.Unix() == 0 || userSub.ExpireTime.After(time.Now()) { + return true + } + + return l.canUseExpiredNodeGroup(userSub, serverNodeGroupIds) +} + +func (l *GetServerUserListLogic) getExpiredUsers(serverNodeGroupIds []int64) ([]types.ServerUser, int64) { + var expiredGroup group.NodeGroup + if err := l.svcCtx.DB.Where("is_expired_group = ?", true).First(&expiredGroup).Error; err != nil { + return nil, 0 + } + + if !tool.Contains(serverNodeGroupIds, expiredGroup.Id) { + return nil, 0 + } + + var expiredSubs []*user.Subscribe + if err := l.svcCtx.DB.Where("status = ?", 3).Find(&expiredSubs).Error; err != nil { + l.Errorw("query expired subscriptions failed", logger.Field("error", err.Error())) + return nil, 0 + } + + users := make([]types.ServerUser, 0) + seen := make(map[int64]bool) + for _, userSub := range expiredSubs { + if !l.checkExpiredUserEligibility(userSub, &expiredGroup) { + continue + } + if seen[userSub.Id] { + continue + } + seen[userSub.Id] = true + users = append(users, types.ServerUser{ + Id: userSub.Id, + UUID: userSub.UUID, + }) + } + + return users, int64(expiredGroup.SpeedLimit) +} + +func (l *GetServerUserListLogic) checkExpiredUserEligibility(userSub *user.Subscribe, expiredGroup *group.NodeGroup) bool { + expiredDays := int(time.Since(userSub.ExpireTime).Hours() / 24) + if expiredDays > expiredGroup.ExpiredDaysLimit { + return false + } + + if expiredGroup.MaxTrafficGBExpired != nil && *expiredGroup.MaxTrafficGBExpired > 0 { + usedTrafficGB := (userSub.ExpiredDownload + userSub.ExpiredUpload) / (1024 * 1024 * 1024) + if usedTrafficGB >= *expiredGroup.MaxTrafficGBExpired { + return false + } + } + + return true +} + +func (l *GetServerUserListLogic) canUseExpiredNodeGroup(userSub *user.Subscribe, serverNodeGroupIds []int64) bool { + var expiredGroup group.NodeGroup + if err := l.svcCtx.DB.Where("is_expired_group = ?", true).First(&expiredGroup).Error; err != nil { + return false + } + + if !tool.Contains(serverNodeGroupIds, expiredGroup.Id) { + return false + } + + expiredDays := int(time.Since(userSub.ExpireTime).Hours() / 24) + if expiredDays > expiredGroup.ExpiredDaysLimit { + return false + } + + if expiredGroup.MaxTrafficGBExpired != nil && *expiredGroup.MaxTrafficGBExpired > 0 { + usedTrafficGB := (userSub.ExpiredDownload + userSub.ExpiredUpload) / (1024 * 1024 * 1024) + if usedTrafficGB >= *expiredGroup.MaxTrafficGBExpired { + return false + } + } + + return true +} + +// calculateEffectiveSpeedLimit 计算用户的实际限速值(考虑按量限速规则) +func (l *GetServerUserListLogic) calculateEffectiveSpeedLimit(sub *subscribe.Subscribe, userSub *user.Subscribe) int64 { + result := speedlimit.Calculate(l.ctx.Request.Context(), l.svcCtx.DB, userSub.UserId, userSub.Id, sub.SpeedLimit, sub.TrafficLimit) + return result.EffectiveSpeed +} diff --git a/internal/logic/subscribe/subscribeLogic.go b/internal/logic/subscribe/subscribeLogic.go index 28f9ecb..7b88160 100644 --- a/internal/logic/subscribe/subscribeLogic.go +++ b/internal/logic/subscribe/subscribeLogic.go @@ -8,6 +8,7 @@ import ( "github.com/perfect-panel/server/adapter" "github.com/perfect-panel/server/internal/model/client" + "github.com/perfect-panel/server/internal/model/group" "github.com/perfect-panel/server/internal/model/log" "github.com/perfect-panel/server/internal/model/node" "github.com/perfect-panel/server/internal/report" @@ -206,6 +207,19 @@ func (l *SubscribeLogic) logSubscribeActivity(subscribeStatus bool, userSub *use func (l *SubscribeLogic) getServers(userSub *user.Subscribe) ([]*node.Node, error) { if l.isSubscriptionExpired(userSub) { + // 尝试获取过期节点组的节点 + expiredNodes, err := l.getExpiredGroupNodes(userSub) + if err != nil { + l.Errorw("[Generate Subscribe]get expired group nodes error", logger.Field("error", err.Error())) + return l.createExpiredServers(), nil + } + // 如果有符合条件的过期节点组节点,返回它们 + if len(expiredNodes) > 0 { + l.Debugf("[Generate Subscribe]user %d can use expired node group, nodes count: %d", userSub.UserId, len(expiredNodes)) + return expiredNodes, nil + } + // 否则返回假的过期节点 + l.Debugf("[Generate Subscribe]user %d cannot use expired node group, return fake expired nodes", userSub.UserId) return l.createExpiredServers(), nil } @@ -215,14 +229,133 @@ func (l *SubscribeLogic) getServers(userSub *user.Subscribe) ([]*node.Node, erro return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find subscribe details error: %v", err.Error()) } + // 判断是否使用分组模式 + isGroupMode := l.isGroupEnabled() + + if isGroupMode { + // === 分组模式:使用 node_group_id 获取节点 === + // 按优先级获取 node_group_id:user_subscribe.node_group_id > subscribe.node_group_id > subscribe.node_group_ids[0] + nodeGroupId := int64(0) + source := "" + + // 优先级1: user_subscribe.node_group_id + if userSub.NodeGroupId != 0 { + nodeGroupId = userSub.NodeGroupId + source = "user_subscribe.node_group_id" + } else { + // 优先级2 & 3: 从 subscribe 表获取 + if subDetails.NodeGroupId != 0 { + nodeGroupId = subDetails.NodeGroupId + source = "subscribe.node_group_id" + } else if len(subDetails.NodeGroupIds) > 0 { + // 优先级3: subscribe.node_group_ids[0] + nodeGroupId = subDetails.NodeGroupIds[0] + source = "subscribe.node_group_ids[0]" + } + } + + l.Debugf("[Generate Subscribe]group mode, using %s: %v", source, nodeGroupId) + + // 根据 node_group_id 获取节点 + enable := true + + // 1. 获取分组节点 + var groupNodes []*node.Node + if nodeGroupId > 0 { + params := &node.FilterNodeParams{ + Page: 0, + Size: 1000, + NodeGroupIds: []int64{nodeGroupId}, + Enabled: &enable, + Preload: true, + } + _, groupNodes, err = l.svc.NodeModel.FilterNodeList(l.ctx.Request.Context(), params) + + if err != nil { + l.Errorw("[Generate Subscribe]filter nodes by group error", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "filter nodes by group error: %v", err.Error()) + } + l.Debugf("[Generate Subscribe]found %d nodes for node_group_id=%d", len(groupNodes), nodeGroupId) + } + + // 2. 获取公共节点(NodeGroupIds 为空的节点) + _, allNodes, err := l.svc.NodeModel.FilterNodeList(l.ctx.Request.Context(), &node.FilterNodeParams{ + Page: 0, + Size: 1000, + Enabled: &enable, + Preload: true, + }) + + if err != nil { + l.Errorw("[Generate Subscribe]filter all nodes error", logger.Field("error", err.Error())) + return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "filter all nodes error: %v", err.Error()) + } + + // 过滤出公共节点 + var publicNodes []*node.Node + for _, n := range allNodes { + if len(n.NodeGroupIds) == 0 { + publicNodes = append(publicNodes, n) + } + } + l.Debugf("[Generate Subscribe]found %d public nodes (node_group_ids is empty)", len(publicNodes)) + + // 3. 合并分组节点和公共节点 + nodesMap := make(map[int64]*node.Node) + for _, n := range groupNodes { + nodesMap[n.Id] = n + } + for _, n := range publicNodes { + if _, exists := nodesMap[n.Id]; !exists { + nodesMap[n.Id] = n + } + } + + // 转换为切片 + var result []*node.Node + for _, n := range nodesMap { + result = append(result, n) + } + + l.Debugf("[Generate Subscribe]total nodes (group + public): %d (group: %d, public: %d)", len(result), len(groupNodes), len(publicNodes)) + + // 查询节点组信息,获取节点组名称(仅当用户有分组时) + if nodeGroupId > 0 { + type NodeGroupInfo struct { + Id int64 + Name string + } + var nodeGroupInfo NodeGroupInfo + err = l.svc.DB.Table("node_group").Select("id, name").Where("id = ?", nodeGroupId).First(&nodeGroupInfo).Error + if err != nil { + l.Infow("[Generate Subscribe]node group not found", logger.Field("nodeGroupId", nodeGroupId), logger.Field("error", err.Error())) + } + + // 如果节点组信息存在,为没有 tag 的分组节点设置节点组名称为 tag + if nodeGroupInfo.Id != 0 && nodeGroupInfo.Name != "" { + for _, n := range result { + // 只为分组节点设置 tag,公共节点不设置 + if n.Tags == "" && len(n.NodeGroupIds) > 0 { + n.Tags = nodeGroupInfo.Name + l.Debugf("[Generate Subscribe]set node_group name as tag for node %d: %s", n.Id, nodeGroupInfo.Name) + } + } + } + } + + return result, nil + } + + // === 标签模式:使用 node_ids 和 tags 获取节点 === nodeIds := tool.StringToInt64Slice(subDetails.Nodes) tags := tool.RemoveStringElement(strings.Split(subDetails.NodeTags, ","), "") - l.Debugf("[Generate Subscribe]nodes: %v, NodeTags: %v", len(nodeIds), len(tags)) + l.Debugf("[Generate Subscribe]tag mode, nodes: %v, NodeTags: %v", len(nodeIds), len(tags)) if len(nodeIds) == 0 && len(tags) == 0 { - logger.Infow("[Generate Subscribe]no subscribe nodes") + logger.Infow("[Generate Subscribe]no subscribe nodes configured") return []*node.Node{}, nil } + enable := true var nodes []*node.Node _, nodes, err = l.svc.NodeModel.FilterNodeList(l.ctx.Request.Context(), &node.FilterNodeParams{ @@ -231,16 +364,15 @@ func (l *SubscribeLogic) getServers(userSub *user.Subscribe) ([]*node.Node, erro NodeId: nodeIds, Tag: tool.RemoveDuplicateElements(tags...), Preload: true, - Enabled: &enable, // Only get enabled nodes + Enabled: &enable, }) - l.Debugf("[Query Subscribe]found servers: %v", len(nodes)) - if err != nil { l.Errorw("[Generate Subscribe]find server details error: %v", logger.Field("error", err.Error())) return nil, errors.Wrapf(xerr.NewErrCode(xerr.DatabaseQueryError), "find server details error: %v", err.Error()) } - logger.Debugf("[Generate Subscribe]found servers: %v", len(nodes)) + + l.Debugf("[Generate Subscribe]found %d nodes in tag mode", len(nodes)) return nodes, nil } @@ -290,3 +422,66 @@ func (l *SubscribeLogic) getFirstHostLine() string { } return host } + +// isGroupEnabled 判断分组功能是否启用 +func (l *SubscribeLogic) isGroupEnabled() bool { + var value string + err := l.svc.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "enabled"). + Select("value"). + Scan(&value).Error + if err != nil { + l.Debugf("[SubscribeLogic]check group enabled failed: %v", err) + return false + } + return value == "true" || value == "1" +} + +// getExpiredGroupNodes 获取过期节点组的节点 +func (l *SubscribeLogic) getExpiredGroupNodes(userSub *user.Subscribe) ([]*node.Node, error) { + // 1. 查询过期节点组 + var expiredGroup group.NodeGroup + err := l.svc.DB.Where("is_expired_group = ?", true).First(&expiredGroup).Error + if err != nil { + l.Debugw("[SubscribeLogic]no expired node group configured", logger.Field("error", err.Error())) + return nil, err + } + + // 2. 检查用户是否在过期天数限制内 + expiredDays := int(time.Since(userSub.ExpireTime).Hours() / 24) + if expiredDays > expiredGroup.ExpiredDaysLimit { + l.Debugf("[SubscribeLogic]user %d subscription expired %d days, exceeds limit %d days", userSub.UserId, expiredDays, expiredGroup.ExpiredDaysLimit) + return nil, nil + } + + // 3. 检查用户已使用流量是否超过限制(仅使用过期期间的流量) + if expiredGroup.MaxTrafficGBExpired != nil && *expiredGroup.MaxTrafficGBExpired > 0 { + usedTrafficGB := (userSub.ExpiredDownload + userSub.ExpiredUpload) / (1024 * 1024 * 1024) + if usedTrafficGB >= *expiredGroup.MaxTrafficGBExpired { + l.Debugf("[SubscribeLogic]user %d expired traffic %d GB, exceeds expired group limit %d GB", userSub.UserId, usedTrafficGB, *expiredGroup.MaxTrafficGBExpired) + return nil, nil + } + } + + // 4. 查询过期节点组的节点 + enable := true + _, nodes, err := l.svc.NodeModel.FilterNodeList(l.ctx.Request.Context(), &node.FilterNodeParams{ + Page: 0, + Size: 1000, + NodeGroupIds: []int64{expiredGroup.Id}, + Enabled: &enable, + Preload: true, + }) + if err != nil { + l.Errorw("[SubscribeLogic]failed to query expired group nodes", logger.Field("error", err.Error())) + return nil, err + } + + if len(nodes) == 0 { + l.Debug("[SubscribeLogic]no nodes found in expired group") + return nil, nil + } + + l.Infof("[SubscribeLogic]returned %d nodes from expired group for user %d (expired %d days)", len(nodes), userSub.UserId, expiredDays) + return nodes, nil +} diff --git a/internal/middleware/apiVersionMiddleware.go b/internal/middleware/apiVersionMiddleware.go index 845a54d..7362772 100644 --- a/internal/middleware/apiVersionMiddleware.go +++ b/internal/middleware/apiVersionMiddleware.go @@ -14,9 +14,11 @@ func ApiVersionMiddleware(_ *svc.ServiceContext) func(c *gin.Context) { return func(c *gin.Context) { rawVersion := strings.TrimSpace(c.GetHeader("api-header")) useLatest := apiversion.UseLatest(rawVersion, apiversion.DefaultThreshold) + hasAppId := strings.TrimSpace(c.GetHeader("X-App-Id")) != "" ctx := context.WithValue(c.Request.Context(), constant.CtxKeyAPIVersionUseLatest, useLatest) ctx = context.WithValue(ctx, constant.CtxKeyAPIHeaderRaw, rawVersion) + ctx = context.WithValue(ctx, constant.CtxKeyHasAppId, hasAppId) c.Request = c.Request.WithContext(ctx) c.Set("api_header", rawVersion) diff --git a/internal/middleware/signatureMiddleware.go b/internal/middleware/signatureMiddleware.go index 516fbf6..2b049a0 100644 --- a/internal/middleware/signatureMiddleware.go +++ b/internal/middleware/signatureMiddleware.go @@ -24,6 +24,7 @@ var ( "/v1/iap/notifications", "/v1/telegram/webhook", "/v1/subscribe/config", + "/v1/common/log/report", } ) diff --git a/internal/model/group/history.go b/internal/model/group/history.go new file mode 100644 index 0000000..5ab1b0b --- /dev/null +++ b/internal/model/group/history.go @@ -0,0 +1,54 @@ +package group + +import ( + "time" + + "gorm.io/gorm" +) + +// GroupHistory 分组历史记录模型 +type GroupHistory struct { + Id int64 `gorm:"primaryKey"` + GroupMode string `gorm:"type:varchar(50);not null;index:idx_group_mode;comment:Group Mode: average/subscribe/traffic"` + TriggerType string `gorm:"type:varchar(50);not null;index:idx_trigger_type;comment:Trigger Type: manual/auto/schedule"` + State string `gorm:"type:varchar(50);not null;index:idx_state;comment:State: pending/running/completed/failed"` + TotalUsers int `gorm:"default:0;not null;comment:Total Users"` + SuccessCount int `gorm:"default:0;not null;comment:Success Count"` + FailedCount int `gorm:"default:0;not null;comment:Failed Count"` + StartTime *time.Time `gorm:"comment:Start Time"` + EndTime *time.Time `gorm:"comment:End Time"` + Operator string `gorm:"type:varchar(100);comment:Operator"` + ErrorMessage string `gorm:"type:TEXT;comment:Error Message"` + CreatedAt time.Time `gorm:"<-:create;index:idx_created_at;comment:Create Time"` +} + +// TableName 指定表名 +func (*GroupHistory) TableName() string { + return "group_history" +} + +// BeforeCreate GORM hook - 创建前回调 +func (gh *GroupHistory) BeforeCreate(tx *gorm.DB) error { + return nil +} + +// GroupHistoryDetail 分组历史详情模型 +type GroupHistoryDetail struct { + Id int64 `gorm:"primaryKey"` + HistoryId int64 `gorm:"not null;index:idx_history_id;comment:History ID"` + NodeGroupId int64 `gorm:"not null;index:idx_node_group_id;comment:Node Group ID"` + UserCount int `gorm:"default:0;not null;comment:User Count"` + NodeCount int `gorm:"default:0;not null;comment:Node Count"` + UserData string `gorm:"type:text;comment:User data JSON (id and email/phone)"` + CreatedAt time.Time `gorm:"<-:create;comment:Create Time"` +} + +// TableName 指定表名 +func (*GroupHistoryDetail) TableName() string { + return "group_history_detail" +} + +// BeforeCreate GORM hook - 创建前回调 +func (ghd *GroupHistoryDetail) BeforeCreate(tx *gorm.DB) error { + return nil +} diff --git a/internal/model/group/model.go b/internal/model/group/model.go new file mode 100644 index 0000000..77d5e1b --- /dev/null +++ b/internal/model/group/model.go @@ -0,0 +1,14 @@ +package group + +import ( + "gorm.io/gorm" +) + +// AutoMigrate 自动迁移数据库表 +func AutoMigrate(db *gorm.DB) error { + return db.AutoMigrate( + &NodeGroup{}, + &GroupHistory{}, + &GroupHistoryDetail{}, + ) +} diff --git a/internal/model/group/node_group.go b/internal/model/group/node_group.go new file mode 100644 index 0000000..a2fe3ee --- /dev/null +++ b/internal/model/group/node_group.go @@ -0,0 +1,34 @@ +package group + +import ( + "time" + + "gorm.io/gorm" +) + +// NodeGroup 节点组模型 +type NodeGroup struct { + Id int64 `gorm:"primaryKey"` + Name string `gorm:"type:varchar(255);not null;comment:Name"` + Description string `gorm:"type:varchar(500);comment:Description"` + Sort int `gorm:"default:0;index:idx_sort;comment:Sort Order"` + ForCalculation *bool `gorm:"default:true;not null;comment:For Calculation: whether this node group participates in grouping calculation"` + IsExpiredGroup *bool `gorm:"default:false;not null;index:idx_is_expired_group;comment:Is Expired Group"` + ExpiredDaysLimit int `gorm:"default:7;not null;comment:Expired days limit (days)"` + MaxTrafficGBExpired *int64 `gorm:"default:0;comment:Max traffic for expired users (GB)"` + SpeedLimit int `gorm:"default:0;not null;comment:Speed limit (KB/s)"` + MinTrafficGB *int64 `gorm:"default:0;comment:Minimum Traffic (GB) for this node group"` + MaxTrafficGB *int64 `gorm:"default:0;comment:Maximum Traffic (GB) for this node group"` + CreatedAt time.Time `gorm:"<-:create;comment:Create Time"` + UpdatedAt time.Time `gorm:"comment:Update Time"` +} + +// TableName 指定表名 +func (*NodeGroup) TableName() string { + return "node_group" +} + +// BeforeCreate GORM hook - 创建前回调 +func (ng *NodeGroup) BeforeCreate(tx *gorm.DB) error { + return nil +} diff --git a/internal/model/node/model.go b/internal/model/node/model.go index bede293..e6961c1 100644 --- a/internal/model/node/model.go +++ b/internal/model/node/model.go @@ -34,15 +34,16 @@ type FilterParams struct { } type FilterNodeParams struct { - Page int // Page Number - Size int // Page Size - NodeId []int64 // Node IDs - ServerId []int64 // Server IDs - Tag []string // Tags - Search string // Search Address or Name - Protocol string // Protocol - Preload bool // Preload Server - Enabled *bool // Enabled + Page int // Page Number + Size int // Page Size + NodeId []int64 // Node IDs + ServerId []int64 // Server IDs + Tag []string // Tags + NodeGroupIds []int64 // Node Group IDs + Search string // Search Address or Name + Protocol string // Protocol + Preload bool // Preload Server + Enabled *bool // Enabled } // FilterServerList Filter Server List @@ -97,6 +98,18 @@ func (m *customServerModel) FilterNodeList(ctx context.Context, params *FilterNo if len(params.Tag) > 0 { query = query.Scopes(InSet("tags", params.Tag)) } + if len(params.NodeGroupIds) > 0 { + // Filter by node_group_ids using JSON_CONTAINS for each group ID + // Multiple group IDs: node must belong to at least one of the groups + var conditions []string + for _, gid := range params.NodeGroupIds { + conditions = append(conditions, fmt.Sprintf("JSON_CONTAINS(node_group_ids, '%d')", gid)) + } + if len(conditions) > 0 { + query = query.Where("(" + strings.Join(conditions, " OR ") + ")") + } + } + // If no NodeGroupIds specified, return all nodes (including public nodes) if params.Protocol != "" { query = query.Where("protocol = ?", params.Protocol) } diff --git a/internal/model/node/node.go b/internal/model/node/node.go index 89d665d..787ea32 100644 --- a/internal/model/node/node.go +++ b/internal/model/node/node.go @@ -1,25 +1,73 @@ package node import ( + "database/sql/driver" + "encoding/json" "time" "github.com/perfect-panel/server/pkg/logger" "gorm.io/gorm" ) +// JSONInt64Slice is a custom type for handling []int64 as JSON in database +type JSONInt64Slice []int64 + +// Scan implements sql.Scanner interface +func (j *JSONInt64Slice) Scan(value interface{}) error { + if value == nil { + *j = []int64{} + return nil + } + + // Handle []byte + bytes, ok := value.([]byte) + if !ok { + // Try to handle string + str, ok := value.(string) + if !ok { + *j = []int64{} + return nil + } + bytes = []byte(str) + } + + if len(bytes) == 0 { + *j = []int64{} + return nil + } + + // Check if it's a JSON array + if bytes[0] != '[' { + // Not a JSON array, return empty slice + *j = []int64{} + return nil + } + + return json.Unmarshal(bytes, j) +} + +// Value implements driver.Valuer interface +func (j JSONInt64Slice) Value() (driver.Value, error) { + if len(j) == 0 { + return "[]", nil + } + return json.Marshal(j) +} + type Node struct { - Id int64 `gorm:"primary_key"` - Name string `gorm:"type:varchar(100);not null;default:'';comment:Node Name"` - Tags string `gorm:"type:varchar(255);not null;default:'';comment:Tags"` - Port uint16 `gorm:"not null;default:0;comment:Connect Port"` - Address string `gorm:"type:varchar(255);not null;default:'';comment:Connect Address"` - ServerId int64 `gorm:"not null;default:0;comment:Server ID"` - Server *Server `gorm:"foreignKey:ServerId;references:Id"` - Protocol string `gorm:"type:varchar(100);not null;default:'';comment:Protocol"` - Enabled *bool `gorm:"type:boolean;not null;default:true;comment:Enabled"` - Sort int `gorm:"uniqueIndex;not null;default:0;comment:Sort"` - CreatedAt time.Time `gorm:"<-:create;comment:Creation Time"` - UpdatedAt time.Time `gorm:"comment:Update Time"` + Id int64 `gorm:"primary_key"` + Name string `gorm:"type:varchar(100);not null;default:'';comment:Node Name"` + Tags string `gorm:"type:varchar(255);not null;default:'';comment:Tags"` + Port uint16 `gorm:"not null;default:0;comment:Connect Port"` + Address string `gorm:"type:varchar(255);not null;default:'';comment:Connect Address"` + ServerId int64 `gorm:"not null;default:0;comment:Server ID"` + Server *Server `gorm:"foreignKey:ServerId;references:Id"` + Protocol string `gorm:"type:varchar(100);not null;default:'';comment:Protocol"` + Enabled *bool `gorm:"type:boolean;not null;default:true;comment:Enabled"` + Sort int `gorm:"uniqueIndex;not null;default:0;comment:Sort"` + NodeGroupIds JSONInt64Slice `gorm:"type:json;comment:Node Group IDs (JSON array, multiple groups)"` + CreatedAt time.Time `gorm:"<-:create;comment:Creation Time"` + UpdatedAt time.Time `gorm:"comment:Update Time"` } func (n *Node) TableName() string { diff --git a/internal/model/subscribe/model.go b/internal/model/subscribe/model.go index 9942046..9764a27 100644 --- a/internal/model/subscribe/model.go +++ b/internal/model/subscribe/model.go @@ -2,6 +2,8 @@ package subscribe import ( "context" + "fmt" + "strings" "github.com/perfect-panel/server/pkg/tool" "github.com/redis/go-redis/v9" @@ -19,6 +21,13 @@ type FilterParams struct { Language string // Language DefaultLanguage bool // Default Subscribe Language Data Search string // Search Keywords + NodeGroupId *int64 // Node Group ID +} + +type FilterByNodeGroupsParams struct { + Page int // Page Number + Size int // Page Size + NodeGroupIds []int64 // Node Group IDs (multiple) } func (p *FilterParams) Normalize() { @@ -32,6 +41,7 @@ func (p *FilterParams) Normalize() { type customSubscribeLogicModel interface { FilterList(ctx context.Context, params *FilterParams) (int64, []*Subscribe, error) + FilterListByNodeGroups(ctx context.Context, params *FilterByNodeGroupsParams) (int64, []*Subscribe, error) ClearCache(ctx context.Context, id ...int64) error QuerySubscribeMinSortByIds(ctx context.Context, ids []int64) (int64, error) } @@ -102,6 +112,12 @@ func (m *customSubscribeModel) FilterList(ctx context.Context, params *FilterPar if len(params.Tags) > 0 { query = query.Scopes(InSet("node_tags", params.Tags)) } + if params.NodeGroupId != nil { + // Filter by node_group_ids using JSON_CONTAINS + // JSON_CONTAINS requires a JSON string, not a bare integer + jsonVal := fmt.Sprintf("%d", *params.NodeGroupId) + query = query.Where("(node_group_ids IS NOT NULL AND JSON_CONTAINS(node_group_ids, ?))", jsonVal) + } if lang != "" { query = query.Where("language = ?", lang) } else if params.DefaultLanguage { @@ -154,3 +170,69 @@ func InSet(field string, values []string) func(db *gorm.DB) *gorm.DB { return query } } + +// FilterListByNodeGroups Filter subscribes by node groups +// Match if subscribe's node_group_id OR node_group_ids contains any of the provided node group IDs +func (m *customSubscribeModel) FilterListByNodeGroups(ctx context.Context, params *FilterByNodeGroupsParams) (int64, []*Subscribe, error) { + if params == nil { + params = &FilterByNodeGroupsParams{ + Page: 1, + Size: 10, + } + } + if params.Page <= 0 { + params.Page = 1 + } + if params.Size <= 0 { + params.Size = 10 + } + + var list []*Subscribe + var total int64 + + err := m.QueryNoCacheCtx(ctx, &list, func(conn *gorm.DB, v interface{}) error { + query := conn.Model(&Subscribe{}) + + // Filter by node groups: match if node_group_id or node_group_ids contains any of the provided IDs + if len(params.NodeGroupIds) > 0 { + var conditions []string + var args []interface{} + + // Condition 1: node_group_id IN (...) + placeholders := make([]string, len(params.NodeGroupIds)) + for i, id := range params.NodeGroupIds { + placeholders[i] = "?" + args = append(args, id) + } + conditions = append(conditions, "node_group_id IN ("+strings.Join(placeholders, ",")+")") + + // Condition 2: JSON_CONTAINS(node_group_ids, id) for each id + for _, id := range params.NodeGroupIds { + // JSON_CONTAINS requires a JSON string value, not a bare integer + jsonVal := fmt.Sprintf("%d", id) + conditions = append(conditions, "node_group_ids IS NOT NULL AND JSON_CONTAINS(node_group_ids, ?)") + args = append(args, jsonVal) + } + + // Combine with OR: (node_group_id IN (...) OR JSON_CONTAINS(node_group_ids, id1) OR ...) + query = query.Where("("+strings.Join(conditions, " OR ")+")", args...) + } + + // Count total + if err := query.Count(&total).Error; err != nil { + return err + } + + // Find with pagination + return query.Order("sort ASC"). + Limit(params.Size). + Offset((params.Page - 1) * params.Size). + Find(v).Error + }) + + if err != nil { + return 0, nil, err + } + + return total, list, nil +} diff --git a/internal/model/subscribe/subscribe.go b/internal/model/subscribe/subscribe.go index cf363af..7ac9da4 100644 --- a/internal/model/subscribe/subscribe.go +++ b/internal/model/subscribe/subscribe.go @@ -1,11 +1,58 @@ package subscribe import ( + "database/sql/driver" + "encoding/json" "time" "gorm.io/gorm" ) +// JSONInt64Slice is a custom type for handling []int64 as JSON in database +type JSONInt64Slice []int64 + +// Scan implements sql.Scanner interface +func (j *JSONInt64Slice) Scan(value interface{}) error { + if value == nil { + *j = []int64{} + return nil + } + + // Handle []byte + bytes, ok := value.([]byte) + if !ok { + // Try to handle string + str, ok := value.(string) + if !ok { + *j = []int64{} + return nil + } + bytes = []byte(str) + } + + if len(bytes) == 0 { + *j = []int64{} + return nil + } + + // Check if it's a JSON array + if bytes[0] != '[' { + // Not a JSON array, return empty slice + *j = []int64{} + return nil + } + + return json.Unmarshal(bytes, j) +} + +// Value implements driver.Valuer interface +func (j JSONInt64Slice) Value() (driver.Value, error) { + if len(j) == 0 { + return "[]", nil + } + return json.Marshal(j) +} + type Subscribe struct { Id int64 `gorm:"primaryKey"` Name string `gorm:"type:varchar(255);not null;default:'';comment:Subscribe Name"` @@ -21,9 +68,12 @@ type Subscribe struct { DeviceLimit int64 `gorm:"type:int;not null;default:0;comment:Device Limit"` Quota int64 `gorm:"type:int;not null;default:0;comment:Quota"` NewUserOnly *bool `gorm:"type:tinyint(1);default:0;comment:New user only: allow purchase within 24h of registration, once per user"` - Nodes string `gorm:"type:varchar(255);comment:Node Ids"` - NodeTags string `gorm:"type:varchar(255);comment:Node Tags"` - Show *bool `gorm:"type:tinyint(1);not null;default:0;comment:Show portal page"` + Nodes string `gorm:"type:varchar(255);comment:Node Ids"` + NodeTags string `gorm:"type:varchar(255);comment:Node Tags"` + NodeGroupIds JSONInt64Slice `gorm:"type:json;comment:Node Group IDs (JSON array, multiple groups)"` + NodeGroupId int64 `gorm:"default:0;index:idx_node_group_id;comment:Default Node Group ID (single ID)"` + TrafficLimit string `gorm:"type:text;comment:Traffic Limit Rules"` + Show *bool `gorm:"type:tinyint(1);not null;default:0;comment:Show portal page"` Sell *bool `gorm:"type:tinyint(1);not null;default:0;comment:Sell"` Sort int64 `gorm:"type:int;not null;default:0;comment:Sort"` DeductionRatio int64 `gorm:"type:int;default:0;comment:Deduction Ratio"` diff --git a/internal/model/user/device.go b/internal/model/user/device.go index a318e3e..3ad06aa 100644 --- a/internal/model/user/device.go +++ b/internal/model/user/device.go @@ -41,7 +41,7 @@ func (m *customUserModel) QueryDevicePageList(ctx context.Context, userId, subsc var list []*Device var total int64 err := m.QueryNoCacheCtx(ctx, &list, func(conn *gorm.DB, v interface{}) error { - return conn.Model(&Device{}).Where("`user_id` = ? and `subscribe_id` = ?", userId, subscribeId).Count(&total).Limit(size).Offset((page - 1) * size).Find(&list).Error + return conn.Model(&Device{}).Where("`user_id` = ?", userId).Count(&total).Limit(size).Offset((page - 1) * size).Find(&list).Error }) return list, total, err } diff --git a/internal/model/user/model.go b/internal/model/user/model.go index d457520..ecaac6e 100644 --- a/internal/model/user/model.go +++ b/internal/model/user/model.go @@ -29,6 +29,7 @@ type SubscribeDetails struct { OrderId int64 `gorm:"index:idx_order_id;not null;comment:Order ID"` SubscribeId int64 `gorm:"index:idx_subscribe_id;not null;comment:Subscription ID"` Subscribe *subscribe.Subscribe `gorm:"foreignKey:SubscribeId;references:Id"` + NodeGroupId int64 `gorm:"index:idx_node_group_id;not null;default:0;comment:Node Group ID (single ID)"` StartTime time.Time `gorm:"default:CURRENT_TIMESTAMP(3);not null;comment:Subscription Start Time"` ExpireTime time.Time `gorm:"default:NULL;comment:Subscription Expire Time"` FinishedAt *time.Time `gorm:"default:NULL;comment:Finished Time"` @@ -89,7 +90,7 @@ type customUserLogicModel interface { FindOneSubscribeDetailsById(ctx context.Context, id int64) (*SubscribeDetails, error) FindOneUserSubscribe(ctx context.Context, id int64) (*SubscribeDetails, error) FindUsersSubscribeBySubscribeId(ctx context.Context, subscribeId int64) ([]*Subscribe, error) - UpdateUserSubscribeWithTraffic(ctx context.Context, id, download, upload int64, tx ...*gorm.DB) error + UpdateUserSubscribeWithTraffic(ctx context.Context, id, download, upload int64, isExpired bool, tx ...*gorm.DB) error QueryResisterUserTotalByDate(ctx context.Context, date time.Time) (int64, error) QueryResisterUserTotalByMonthly(ctx context.Context, date time.Time) (int64, error) QueryResisterUserTotal(ctx context.Context) (int64, error) @@ -276,7 +277,7 @@ func (m *customUserModel) BatchDeleteUser(ctx context.Context, ids []int64, tx . }, m.batchGetCacheKeys(users...)...) } -func (m *customUserModel) UpdateUserSubscribeWithTraffic(ctx context.Context, id, download, upload int64, tx ...*gorm.DB) error { +func (m *customUserModel) UpdateUserSubscribeWithTraffic(ctx context.Context, id, download, upload int64, isExpired bool, tx ...*gorm.DB) error { sub, err := m.FindOneSubscribe(ctx, id) if err != nil { return err @@ -293,10 +294,21 @@ func (m *customUserModel) UpdateUserSubscribeWithTraffic(ctx context.Context, id if len(tx) > 0 { conn = tx[0] } - return conn.Model(&Subscribe{}).Where("id = ?", id).Updates(map[string]interface{}{ - "download": gorm.Expr("download + ?", download), - "upload": gorm.Expr("upload + ?", upload), - }).Error + + // 根据订阅状态更新对应的流量字段 + if isExpired { + // 过期期间,更新过期流量字段 + return conn.Model(&Subscribe{}).Where("id = ?", id).Updates(map[string]interface{}{ + "expired_download": gorm.Expr("expired_download + ?", download), + "expired_upload": gorm.Expr("expired_upload + ?", upload), + }).Error + } else { + // 正常期间,更新正常流量字段 + return conn.Model(&Subscribe{}).Where("id = ?", id).Updates(map[string]interface{}{ + "download": gorm.Expr("download + ?", download), + "upload": gorm.Expr("upload + ?", upload), + }).Error + } }) } diff --git a/internal/model/user/subscribe.go b/internal/model/user/subscribe.go index 7f798e4..854e9ea 100644 --- a/internal/model/user/subscribe.go +++ b/internal/model/user/subscribe.go @@ -67,7 +67,7 @@ func (m *defaultUserModel) FindSingleModeAnchorSubscribe(ctx context.Context, us var data Subscribe err := m.QueryNoCacheCtx(ctx, &data, func(conn *gorm.DB, _ interface{}) error { return conn.Model(&Subscribe{}). - Where("user_id = ? AND order_id > 0 AND token != '' AND `status` IN ?", userId, []int64{0, 1, 2, 3, 5}). + Where("user_id = ? AND token != '' AND (order_id > 0 OR token LIKE 'iap:%') AND `status` IN ?", userId, []int64{0, 1, 2, 3, 5}). Order("expire_time DESC"). Order("updated_at DESC"). Order("id DESC"). diff --git a/internal/model/user/user.go b/internal/model/user/user.go index 5ac50d9..425af75 100644 --- a/internal/model/user/user.go +++ b/internal/model/user/user.go @@ -1,11 +1,58 @@ package user import ( + "database/sql/driver" + "encoding/json" "time" "gorm.io/gorm" ) +// JSONInt64Slice is a custom type for handling []int64 as JSON in database +type JSONInt64Slice []int64 + +// Scan implements sql.Scanner interface +func (j *JSONInt64Slice) Scan(value interface{}) error { + if value == nil { + *j = []int64{} + return nil + } + + // Handle []byte + bytes, ok := value.([]byte) + if !ok { + // Try to handle string + str, ok := value.(string) + if !ok { + *j = []int64{} + return nil + } + bytes = []byte(str) + } + + if len(bytes) == 0 { + *j = []int64{} + return nil + } + + // Check if it's a JSON array + if bytes[0] != '[' { + // Not a JSON array, return empty slice + *j = []int64{} + return nil + } + + return json.Unmarshal(bytes, j) +} + +// Value implements driver.Valuer interface +func (j JSONInt64Slice) Value() (driver.Value, error) { + if len(j) == 0 { + return "[]", nil + } + return json.Marshal(j) +} + type User struct { Id int64 `gorm:"primaryKey"` Password string `gorm:"type:varchar(100);not null;comment:User Password"` @@ -41,23 +88,27 @@ func (*User) TableName() string { } type Subscribe struct { - Id int64 `gorm:"primaryKey"` - UserId int64 `gorm:"index:idx_user_id;not null;comment:User ID"` - User User `gorm:"foreignKey:UserId;references:Id"` - OrderId int64 `gorm:"index:idx_order_id;not null;comment:Order ID"` - SubscribeId int64 `gorm:"index:idx_subscribe_id;not null;comment:Subscription ID"` - StartTime time.Time `gorm:"default:CURRENT_TIMESTAMP(3);not null;comment:Subscription Start Time"` - ExpireTime time.Time `gorm:"default:NULL;comment:Subscription Expire Time"` - FinishedAt *time.Time `gorm:"default:NULL;comment:Finished Time"` - Traffic int64 `gorm:"default:0;comment:Traffic"` - Download int64 `gorm:"default:0;comment:Download Traffic"` - Upload int64 `gorm:"default:0;comment:Upload Traffic"` - Token string `gorm:"index:idx_token;unique;type:varchar(255);default:'';comment:Token"` - UUID string `gorm:"type:varchar(255);unique;index:idx_uuid;default:'';comment:UUID"` - Status uint8 `gorm:"type:tinyint(1);default:0;comment:Subscription Status: 0: Pending 1: Active 2: Finished 3: Expired 4: Deducted 5: stopped"` - Note string `gorm:"type:varchar(500);default:'';comment:User note for subscription"` - CreatedAt time.Time `gorm:"<-:create;comment:Creation Time"` - UpdatedAt time.Time `gorm:"comment:Update Time"` + Id int64 `gorm:"primaryKey"` + UserId int64 `gorm:"index:idx_user_id;not null;comment:User ID"` + User User `gorm:"foreignKey:UserId;references:Id"` + OrderId int64 `gorm:"index:idx_order_id;not null;comment:Order ID"` + SubscribeId int64 `gorm:"index:idx_subscribe_id;not null;comment:Subscription ID"` + NodeGroupId int64 `gorm:"index:idx_node_group_id;not null;default:0;comment:Node Group ID (single ID)"` + GroupLocked *bool `gorm:"type:tinyint(1);not null;default:0;comment:Group Locked"` + StartTime time.Time `gorm:"default:CURRENT_TIMESTAMP(3);not null;comment:Subscription Start Time"` + ExpireTime time.Time `gorm:"default:NULL;comment:Subscription Expire Time"` + FinishedAt *time.Time `gorm:"default:NULL;comment:Finished Time"` + Traffic int64 `gorm:"default:0;comment:Traffic"` + Download int64 `gorm:"default:0;comment:Download Traffic"` + Upload int64 `gorm:"default:0;comment:Upload Traffic"` + ExpiredDownload int64 `gorm:"default:0;comment:Expired period download traffic (bytes)"` + ExpiredUpload int64 `gorm:"default:0;comment:Expired period upload traffic (bytes)"` + Token string `gorm:"index:idx_token;unique;type:varchar(255);default:'';comment:Token"` + UUID string `gorm:"type:varchar(255);unique;index:idx_uuid;default:'';comment:UUID"` + Status uint8 `gorm:"type:tinyint(1);default:0;comment:Subscription Status: 0: Pending 1: Active 2: Finished 3: Expired 4: Deducted 5: stopped"` + Note string `gorm:"type:varchar(500);default:'';comment:User note for subscription"` + CreatedAt time.Time `gorm:"<-:create;comment:Creation Time"` + UpdatedAt time.Time `gorm:"comment:Update Time"` } func (*Subscribe) TableName() string { diff --git a/internal/types/compat_types.go b/internal/types/compat_types.go index dd5b5d4..fc6b4e9 100644 --- a/internal/types/compat_types.go +++ b/internal/types/compat_types.go @@ -1,5 +1,13 @@ package types +// compat_types.go — 手动补充的类型,已同步到 .api 定义 +// 下次用 goctl 重新生成 types.go 后,以下类型会自动包含在 types.go 中,届时可以删除本文件中对应的定义: +// - ContactRequest (已加入 common.api) +// - GetDownloadLinkRequest / GetDownloadLinkResponse (已加入 common.api) +// - EmailLoginRequest (已更新 auth.api, 加入 form tag) +// - ReportLogMessageRequest / ReportLogMessageResponse (已加入 common.api, 路由 POST /v1/common/log/report) +// - LegacyCheckVerificationCodeRequest / LegacyCheckVerificationCodeResponse (已加入 common.api, 路由 POST /v1/common/check_code) + type ContactRequest struct { Name string `json:"name" validate:"required,max=100"` Email string `json:"email" validate:"required,email"` diff --git a/internal/types/types.go b/internal/types/types.go index aa88d6e..149eef5 100644 --- a/internal/types/types.go +++ b/internal/types/types.go @@ -361,14 +361,28 @@ type CreateDocumentRequest struct { Show *bool `json:"show"` } +type CreateNodeGroupRequest struct { + Name string `json:"name" validate:"required"` + Description string `json:"description"` + Sort int `json:"sort"` + ForCalculation *bool `json:"for_calculation"` + IsExpiredGroup *bool `json:"is_expired_group"` + ExpiredDaysLimit *int `json:"expired_days_limit"` + MaxTrafficGBExpired *int64 `json:"max_traffic_gb_expired,omitempty"` + SpeedLimit *int `json:"speed_limit"` + MinTrafficGB *int64 `json:"min_traffic_gb,omitempty"` + MaxTrafficGB *int64 `json:"max_traffic_gb,omitempty"` +} + type CreateNodeRequest struct { - Name string `json:"name"` - Tags []string `json:"tags,omitempty"` - Port uint16 `json:"port"` - Address string `json:"address"` - ServerId int64 `json:"server_id"` - Protocol string `json:"protocol"` - Enabled *bool `json:"enabled"` + Name string `json:"name"` + Tags []string `json:"tags,omitempty"` + Port uint16 `json:"port"` + Address string `json:"address"` + ServerId int64 `json:"server_id"` + Protocol string `json:"protocol"` + Enabled *bool `json:"enabled"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` } type CreateOrderRequest struct { @@ -462,6 +476,9 @@ type CreateSubscribeRequest struct { NewUserOnly *bool `json:"new_user_only"` Nodes []int64 `json:"nodes"` NodeTags []string `json:"node_tags"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` + NodeGroupId int64 `json:"node_group_id"` + TrafficLimit []TrafficLimit `json:"traffic_limit"` Show *bool `json:"show"` Sell *bool `json:"sell"` DeductionRatio int64 `json:"deduction_ratio"` @@ -469,6 +486,7 @@ type CreateSubscribeRequest struct { ResetCycle int64 `json:"reset_cycle"` RenewalReset *bool `json:"renewal_reset"` ShowOriginalPrice bool `json:"show_original_price"` + AutoCreateGroup bool `json:"auto_create_group"` } type CreateTicketFollowRequest struct { @@ -543,6 +561,13 @@ type DeleteAccountResponse struct { Code int64 `json:"code"` } +type DailyTrafficStats struct { + Date string `json:"date"` + Upload int64 `json:"upload"` + Download int64 `json:"download"` + Total int64 `json:"total"` +} + type DeleteAdsRequest struct { Id int64 `json:"id"` } @@ -559,6 +584,10 @@ type DeleteDocumentRequest struct { Id int64 `json:"id" validate:"required"` } +type DeleteNodeGroupRequest struct { + Id int64 `json:"id" validate:"required"` +} + type DeleteNodeRequest struct { Id int64 `json:"id"` } @@ -659,6 +688,10 @@ type EmailAuthticateConfig struct { DomainSuffixList string `json:"domain_suffix_list"` } +type ExportGroupResultRequest struct { + HistoryId *int64 `form:"history_id,omitempty"` +} + type ErrorLogMessage struct { Id int64 `json:"id"` Platform string `json:"platform"` @@ -682,7 +715,9 @@ type FamilyDetail struct { type FamilyMemberItem struct { UserId int64 `json:"user_id"` Identifier string `json:"identifier"` + AuthType string `json:"auth_type"` DeviceNo string `json:"device_no"` + DeviceType string `json:"device_type"` Role uint8 `json:"role"` RoleName string `json:"role_name"` Status uint8 `json:"status"` @@ -696,6 +731,7 @@ type FamilySummary struct { FamilyId int64 `json:"family_id"` OwnerUserId int64 `json:"owner_user_id"` OwnerIdentifier string `json:"owner_identifier"` + OwnerAuthType string `json:"owner_auth_type"` Status string `json:"status"` ActiveMemberCount int64 `json:"active_member_count"` MaxMembers int64 `json:"max_members"` @@ -761,9 +797,10 @@ type FilterMobileLogResponse struct { } type FilterNodeListRequest struct { - Page int `form:"page"` - Size int `form:"size"` - Search string `form:"search,omitempty"` + Page int `form:"page"` + Size int `form:"size"` + Search string `form:"search,omitempty"` + NodeGroupId *int64 `form:"node_group_id,omitempty"` } type FilterNodeListResponse struct { @@ -855,6 +892,13 @@ type Follow struct { CreatedAt int64 `json:"created_at"` } +type GenerateCaptchaResponse struct { + Id string `json:"id"` + Image string `json:"image"` + Type string `json:"type"` + BlockImage string `json:"block_image,omitempty"` +} + type GetAdsDetailRequest struct { Id int64 `form:"id"` } @@ -1085,6 +1129,37 @@ type GetInviteSalesResponse struct { List []InvitedUserSale `json:"list"` } +type GetGroupConfigRequest struct { + Keys []string `form:"keys,omitempty"` +} + +type GetGroupConfigResponse struct { + Enabled bool `json:"enabled"` + Mode string `json:"mode"` + Config map[string]interface{} `json:"config"` + State RecalculationState `json:"state"` +} + +type GetGroupHistoryDetailRequest struct { + Id int64 `form:"id" validate:"required"` +} + +type GetGroupHistoryDetailResponse struct { + GroupHistoryDetail +} + +type GetGroupHistoryRequest struct { + Page int `form:"page"` + Size int `form:"size"` + GroupMode string `form:"group_mode,omitempty"` + TriggerType string `form:"trigger_type,omitempty"` +} + +type GetGroupHistoryResponse struct { + Total int64 `json:"total"` + List []GroupHistory `json:"list"` +} + type GetLoginLogRequest struct { Page int `form:"page"` Size int `form:"size"` @@ -1107,6 +1182,17 @@ type GetMessageLogListResponse struct { List []MessageLog `json:"list"` } +type GetNodeGroupListRequest struct { + Page int `form:"page"` + Size int `form:"size"` + GroupId string `form:"group_id,omitempty"` +} + +type GetNodeGroupListResponse struct { + Total int64 `json:"total"` + List []NodeGroup `json:"list"` +} + type GetNodeMultiplierResponse struct { Periods []TimePeriod `json:"periods"` } @@ -1234,11 +1320,19 @@ type GetSubscribeGroupListResponse struct { Total int64 `json:"total"` } +type GetSubscribeGroupMappingRequest struct { +} + +type GetSubscribeGroupMappingResponse struct { + List []SubscribeGroupMappingItem `json:"list"` +} + type GetSubscribeListRequest struct { - Page int64 `form:"page" validate:"required"` - Size int64 `form:"size" validate:"required"` - Language string `form:"language,omitempty"` - Search string `form:"search,omitempty"` + Page int64 `form:"page" validate:"required"` + Size int64 `form:"size" validate:"required"` + Language string `form:"language,omitempty"` + Search string `form:"search,omitempty"` + NodeGroupId int64 `form:"node_group_id,omitempty"` } type GetSubscribeListResponse struct { @@ -1423,6 +1517,18 @@ type GetUserTicketListResponse struct { List []Ticket `json:"list"` } +type GetUserTrafficStatsRequest struct { + UserSubscribeId string `form:"user_subscribe_id" validate:"required"` + Days int `form:"days" validate:"required,oneof=7 30"` +} + +type GetUserTrafficStatsResponse struct { + List []DailyTrafficStats `json:"list"` + TotalUpload int64 `json:"total_upload"` + TotalDownload int64 `json:"total_download"` + TotalTraffic int64 `json:"total_traffic"` +} + type GiftLog struct { Type uint16 `json:"type"` UserId int64 `json:"user_id"` @@ -1439,6 +1545,25 @@ type GoogleLoginCallbackRequest struct { State string `form:"state"` } +type GroupHistory struct { + Id int64 `json:"id"` + GroupMode string `json:"group_mode"` + TriggerType string `json:"trigger_type"` + TotalUsers int `json:"total_users"` + SuccessCount int `json:"success_count"` + FailedCount int `json:"failed_count"` + StartTime *int64 `json:"start_time,omitempty"` + EndTime *int64 `json:"end_time,omitempty"` + Operator string `json:"operator,omitempty"` + ErrorLog string `json:"error_log,omitempty"` + CreatedAt int64 `json:"created_at"` +} + +type GroupHistoryDetail struct { + GroupHistory + ConfigSnapshot map[string]interface{} `json:"config_snapshot,omitempty"` +} + type HasMigrateSeverNodeResponse struct { HasMigrate bool `json:"has_migrate"` } @@ -1527,17 +1652,19 @@ type ModuleConfig struct { } type Node struct { - Id int64 `json:"id"` - Name string `json:"name"` - Tags []string `json:"tags"` - Port uint16 `json:"port"` - Address string `json:"address"` - ServerId int64 `json:"server_id"` - Protocol string `json:"protocol"` - Enabled *bool `json:"enabled"` - Sort int `json:"sort,omitempty"` - CreatedAt int64 `json:"created_at"` - UpdatedAt int64 `json:"updated_at"` + Id int64 `json:"id"` + Name string `json:"name"` + Tags []string `json:"tags"` + Port uint16 `json:"port"` + Address string `json:"address"` + ServerId int64 `json:"server_id"` + Protocol string `json:"protocol"` + Enabled *bool `json:"enabled"` + Sort int `json:"sort,omitempty"` + NodeGroupId int64 `json:"node_group_id,omitempty"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` + CreatedAt int64 `json:"created_at"` + UpdatedAt int64 `json:"updated_at"` } type NodeConfig struct { @@ -1557,6 +1684,29 @@ type NodeDNS struct { Domains []string `json:"domains"` } +type NodeGroup struct { + Id int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Sort int `json:"sort"` + ForCalculation bool `json:"for_calculation"` + IsExpiredGroup bool `json:"is_expired_group"` + ExpiredDaysLimit int `json:"expired_days_limit"` + MaxTrafficGBExpired int64 `json:"max_traffic_gb_expired,omitempty"` + SpeedLimit int `json:"speed_limit"` + MinTrafficGB int64 `json:"min_traffic_gb,omitempty"` + MaxTrafficGB int64 `json:"max_traffic_gb,omitempty"` + NodeCount int64 `json:"node_count,omitempty"` + CreatedAt int64 `json:"created_at"` + UpdatedAt int64 `json:"updated_at"` +} + +type NodeGroupItem struct { + Id int64 `json:"id"` + Name string `json:"name"` + Nodes []Node `json:"nodes"` +} + type NodeOutbound struct { Name string `json:"name"` Protocol string `json:"protocol"` @@ -1774,6 +1924,15 @@ type PreviewSubscribeTemplateResponse struct { Template string `json:"template"` // 预览的模板内容 } +type PreviewUserNodesRequest struct { + UserId int64 `form:"user_id" validate:"required"` +} + +type PreviewUserNodesResponse struct { + UserId int64 `json:"user_id"` + NodeGroups []NodeGroupItem `json:"node_groups"` +} + type PrivacyPolicyConfig struct { PrivacyPolicy string `json:"privacy_policy"` } @@ -2055,6 +2214,17 @@ type QuotaTask struct { UpdatedAt int64 `json:"updated_at"` } +type RecalculateGroupRequest struct { + Mode string `json:"mode" validate:"required"` + TriggerType string `json:"trigger_type"` // "manual" or "scheduled" +} + +type RecalculationState struct { + State string `json:"state"` + Progress int `json:"progress"` + Total int `json:"total"` +} + type RechargeOrderRequest struct { Amount int64 `json:"amount" validate:"required,gt=0,lte=2000000000"` Payment int64 `json:"payment"` @@ -2139,15 +2309,22 @@ type ResetAllSubscribeTokenResponse struct { Success bool `json:"success"` } +type ResetGroupsRequest struct { + Confirm bool `json:"confirm" validate:"required"` +} + type ResetPasswordRequest struct { - Identifier string `json:"identifier"` - Email string `json:"email" validate:"required"` - Password string `json:"password" validate:"required"` - Code string `json:"code,optional"` - IP string `header:"X-Original-Forwarded-For"` - UserAgent string `header:"User-Agent"` - LoginType string `header:"Login-Type"` - CfToken string `json:"cf_token,optional"` + Identifier string `json:"identifier"` + Email string `json:"email" validate:"required"` + Password string `json:"password" validate:"required"` + Code string `json:"code,optional"` + IP string `header:"X-Original-Forwarded-For"` + UserAgent string `header:"User-Agent"` + LoginType string `header:"Login-Type"` + CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } type ResetSortRequest struct { @@ -2379,6 +2556,17 @@ type SiteCustomDataContacts struct { Address string `json:"address"` } +type SliderVerifyCaptchaRequest struct { + Id string `json:"id" validate:"required"` + X int `json:"x" validate:"required"` + Y int `json:"y" validate:"required"` + Trail string `json:"trail"` +} + +type SliderVerifyCaptchaResponse struct { + Token string `json:"token"` +} + type SortItem struct { Id int64 `json:"id" validate:"required"` Sort int64 `json:"sort" validate:"required"` @@ -2412,6 +2600,9 @@ type Subscribe struct { NewUserOnly bool `json:"new_user_only"` Nodes []int64 `json:"nodes"` NodeTags []string `json:"node_tags"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` + NodeGroupId int64 `json:"node_group_id"` + TrafficLimit []TrafficLimit `json:"traffic_limit"` Show bool `json:"show"` Sell bool `json:"sell"` Sort int64 `json:"sort"` @@ -2472,6 +2663,11 @@ type SubscribeGroup struct { UpdatedAt int64 `json:"updated_at"` } +type SubscribeGroupMappingItem struct { + SubscribeName string `json:"subscribe_name"` + NodeGroupName string `json:"node_group_name"` +} + type SubscribeItem struct { Subscribe Sold int64 `json:"sold"` @@ -2520,6 +2716,9 @@ type TelephoneLoginRequest struct { UserAgent string `header:"User-Agent"` LoginType string `header:"Login-Type"` CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } type TelephoneRegisterRequest struct { @@ -2533,6 +2732,9 @@ type TelephoneRegisterRequest struct { UserAgent string `header:"User-Agent"` LoginType string `header:"Login-Type,optional"` CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } type TelephoneResetPasswordRequest struct { @@ -2545,6 +2747,9 @@ type TelephoneResetPasswordRequest struct { UserAgent string `header:"User-Agent"` LoginType string `header:"Login-Type,optional"` CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } type TestEmailSendRequest struct { @@ -2595,6 +2800,13 @@ type TosConfig struct { TosContent string `json:"tos_content"` } +type TrafficLimit struct { + StatType string `json:"stat_type"` + StatValue int64 `json:"stat_value"` + TrafficUsage int64 `json:"traffic_usage"` + SpeedLimit int64 `json:"speed_limit"` +} + type TrafficLog struct { Id int64 `json:"id"` ServerId int64 `json:"server_id"` @@ -2730,15 +2942,36 @@ type UpdateFamilyMaxMembersRequest struct { MaxMembers int64 `json:"max_members" validate:"required,gt=0"` } +type UpdateGroupConfigRequest struct { + Enabled bool `json:"enabled"` + Mode string `json:"mode"` + Config map[string]interface{} `json:"config"` +} + +type UpdateNodeGroupRequest struct { + Id int64 `json:"id" validate:"required"` + Name string `json:"name"` + Description string `json:"description"` + Sort int `json:"sort"` + ForCalculation *bool `json:"for_calculation"` + IsExpiredGroup *bool `json:"is_expired_group"` + ExpiredDaysLimit *int `json:"expired_days_limit"` + MaxTrafficGBExpired *int64 `json:"max_traffic_gb_expired,omitempty"` + SpeedLimit *int `json:"speed_limit"` + MinTrafficGB *int64 `json:"min_traffic_gb,omitempty"` + MaxTrafficGB *int64 `json:"max_traffic_gb,omitempty"` +} + type UpdateNodeRequest struct { - Id int64 `json:"id"` - Name string `json:"name"` - Tags []string `json:"tags,omitempty"` - Port uint16 `json:"port"` - Address string `json:"address"` - ServerId int64 `json:"server_id"` - Protocol string `json:"protocol"` - Enabled *bool `json:"enabled"` + Id int64 `json:"id"` + Name string `json:"name"` + Tags []string `json:"tags,omitempty"` + Port uint16 `json:"port"` + Address string `json:"address"` + ServerId int64 `json:"server_id"` + Protocol string `json:"protocol"` + Enabled *bool `json:"enabled"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` } type UpdateOrderStatusRequest struct { @@ -2821,6 +3054,9 @@ type UpdateSubscribeRequest struct { NewUserOnly *bool `json:"new_user_only"` Nodes []int64 `json:"nodes"` NodeTags []string `json:"node_tags"` + NodeGroupIds []int64 `json:"node_group_ids,omitempty"` + NodeGroupId int64 `json:"node_group_id"` + TrafficLimit []TrafficLimit `json:"traffic_limit"` Show *bool `json:"show"` Sell *bool `json:"sell"` Sort int64 `json:"sort"` @@ -2975,25 +3211,31 @@ type UserLoginLog struct { } type UserLoginRequest struct { - Identifier string `json:"identifier"` - Email string `json:"email" validate:"required"` - Password string `json:"password" validate:"required"` - IP string `header:"X-Original-Forwarded-For"` - UserAgent string `header:"User-Agent"` - LoginType string `header:"Login-Type"` - CfToken string `json:"cf_token,optional"` + Identifier string `json:"identifier"` + Email string `json:"email" validate:"required"` + Password string `json:"password" validate:"required"` + IP string `header:"X-Original-Forwarded-For"` + UserAgent string `header:"User-Agent"` + LoginType string `header:"Login-Type"` + CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } type UserRegisterRequest struct { - Identifier string `json:"identifier"` - Email string `json:"email" validate:"required"` - Password string `json:"password" validate:"required"` - Invite string `json:"invite,optional"` - Code string `json:"code,optional"` - IP string `header:"X-Original-Forwarded-For"` - UserAgent string `header:"User-Agent"` - LoginType string `header:"Login-Type"` - CfToken string `json:"cf_token,optional"` + Identifier string `json:"identifier"` + Email string `json:"email" validate:"required"` + Password string `json:"password" validate:"required"` + Invite string `json:"invite,optional"` + Code string `json:"code,optional"` + IP string `header:"X-Original-Forwarded-For"` + UserAgent string `header:"User-Agent"` + LoginType string `header:"Login-Type"` + CfToken string `json:"cf_token,optional"` + CaptchaId string `json:"captcha_id,optional"` + CaptchaCode string `json:"captcha_code,optional"` + SliderToken string `json:"slider_token,optional"` } type UserStatistics struct { @@ -3012,10 +3254,13 @@ type UserStatisticsResponse struct { type UserSubscribe struct { Id int64 `json:"id"` + IdStr string `json:"id_str"` UserId int64 `json:"user_id"` OrderId int64 `json:"order_id"` SubscribeId int64 `json:"subscribe_id"` Subscribe Subscribe `json:"subscribe"` + NodeGroupId int64 `json:"node_group_id"` + NodeGroupName string `json:"node_group_name"` StartTime int64 `json:"start_time"` ExpireTime int64 `json:"expire_time"` FinishedAt int64 `json:"finished_at"` @@ -3035,22 +3280,30 @@ type UserSubscribe struct { } type UserSubscribeDetail struct { - Id int64 `json:"id"` - UserId int64 `json:"user_id"` - User User `json:"user"` - OrderId int64 `json:"order_id"` - SubscribeId int64 `json:"subscribe_id"` - Subscribe Subscribe `json:"subscribe"` - StartTime int64 `json:"start_time"` - ExpireTime int64 `json:"expire_time"` - ResetTime int64 `json:"reset_time"` - Traffic int64 `json:"traffic"` - Download int64 `json:"download"` - Upload int64 `json:"upload"` - Token string `json:"token"` - Status uint8 `json:"status"` - CreatedAt int64 `json:"created_at"` - UpdatedAt int64 `json:"updated_at"` + Id int64 `json:"id"` + UserId int64 `json:"user_id"` + User User `json:"user"` + OrderId int64 `json:"order_id"` + SubscribeId int64 `json:"subscribe_id"` + Subscribe Subscribe `json:"subscribe"` + NodeGroupId int64 `json:"node_group_id"` + NodeGroupName string `json:"node_group_name"` + GroupLocked bool `json:"group_locked"` + StartTime int64 `json:"start_time"` + ExpireTime int64 `json:"expire_time"` + ResetTime int64 `json:"reset_time"` + Traffic int64 `json:"traffic"` + Download int64 `json:"download"` + Upload int64 `json:"upload"` + Token string `json:"token"` + Status uint8 `json:"status"` + EffectiveSpeed int64 `json:"effective_speed"` + IsThrottled bool `json:"is_throttled"` + ThrottleRule string `json:"throttle_rule,omitempty"` + ThrottleStart int64 `json:"throttle_start,omitempty"` + ThrottleEnd int64 `json:"throttle_end,omitempty"` + CreatedAt int64 `json:"created_at"` + UpdatedAt int64 `json:"updated_at"` } type UserSubscribeInfo struct { @@ -3127,10 +3380,12 @@ type UserTrafficData struct { } type VeifyConfig struct { - TurnstileSiteKey string `json:"turnstile_site_key"` - EnableLoginVerify bool `json:"enable_login_verify"` - EnableRegisterVerify bool `json:"enable_register_verify"` - EnableResetPasswordVerify bool `json:"enable_reset_password_verify"` + CaptchaType string `json:"captcha_type"` + TurnstileSiteKey string `json:"turnstile_site_key"` + EnableUserLoginCaptcha bool `json:"enable_user_login_captcha"` + EnableUserRegisterCaptcha bool `json:"enable_user_register_captcha"` + EnableAdminLoginCaptcha bool `json:"enable_admin_login_captcha"` + EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"` } type VerifyCodeConfig struct { @@ -3140,11 +3395,13 @@ type VerifyCodeConfig struct { } type VerifyConfig struct { - TurnstileSiteKey string `json:"turnstile_site_key"` - TurnstileSecret string `json:"turnstile_secret"` - EnableLoginVerify bool `json:"enable_login_verify"` - EnableRegisterVerify bool `json:"enable_register_verify"` - EnableResetPasswordVerify bool `json:"enable_reset_password_verify"` + CaptchaType string `json:"captcha_type"` // local or turnstile + TurnstileSiteKey string `json:"turnstile_site_key"` + TurnstileSecret string `json:"turnstile_secret"` + EnableUserLoginCaptcha bool `json:"enable_user_login_captcha"` // User login captcha + EnableUserRegisterCaptcha bool `json:"enable_user_register_captcha"` // User register captcha + EnableAdminLoginCaptcha bool `json:"enable_admin_login_captcha"` // Admin login captcha + EnableUserResetPasswordCaptcha bool `json:"enable_user_reset_password_captcha"` // User reset password captcha } type VerifyEmailRequest struct { diff --git a/pkg/apiversion/version.go b/pkg/apiversion/version.go index 1e0cbd6..c5c6ff5 100644 --- a/pkg/apiversion/version.go +++ b/pkg/apiversion/version.go @@ -54,7 +54,7 @@ func UseLatest(header string, threshold string) bool { thresholdVersion, _ = Parse(DefaultThreshold) } - return compare(currentVersion, thresholdVersion) > 0 + return compare(currentVersion, thresholdVersion) >= 0 } func compare(left Version, right Version) int { diff --git a/pkg/captcha/local.go b/pkg/captcha/local.go new file mode 100644 index 0000000..86b9af6 --- /dev/null +++ b/pkg/captcha/local.go @@ -0,0 +1,109 @@ +package captcha + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/mojocn/base64Captcha" + "github.com/redis/go-redis/v9" +) + +type localService struct { + redis *redis.Client + driver base64Captcha.Driver +} + +func newLocalService(redisClient *redis.Client) Service { + // Configure captcha driver - alphanumeric with visual effects (letters + numbers) + driver := base64Captcha.NewDriverString( + 80, // height + 240, // width + 20, // noise count (more interference) + base64Captcha.OptionShowSlimeLine|base64Captcha.OptionShowSineLine, // show curved lines + 5, // length (5 characters) + "abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789", // source (exclude confusing chars) + nil, // bg color (use default) + nil, // fonts (use default) + nil, // fonts storage (use default) + ) + return &localService{ + redis: redisClient, + driver: driver, + } +} + +func (s *localService) Generate(ctx context.Context) (id string, image string, err error) { + // Generate captcha + captcha := base64Captcha.NewCaptcha(s.driver, &redisStore{ + redis: s.redis, + ctx: ctx, + }) + + id, b64s, answer, err := captcha.Generate() + if err != nil { + return "", "", err + } + + // Store answer in Redis with 5 minute expiration + key := fmt.Sprintf("captcha:%s", id) + err = s.redis.Set(ctx, key, answer, 5*time.Minute).Err() + if err != nil { + return "", "", err + } + + return id, b64s, nil +} + +func (s *localService) Verify(ctx context.Context, id string, code string, ip string) (bool, error) { + if id == "" || code == "" { + return false, nil + } + + key := fmt.Sprintf("captcha:%s", id) + + // Get answer from Redis + answer, err := s.redis.Get(ctx, key).Result() + if err != nil { + return false, err + } + + // Delete captcha after verification (one-time use) + s.redis.Del(ctx, key) + + // Verify code (case-insensitive) + return strings.EqualFold(answer, code), nil +} + +func (s *localService) GetType() CaptchaType { + return CaptchaTypeLocal +} + +// redisStore implements base64Captcha.Store interface +type redisStore struct { + redis *redis.Client + ctx context.Context +} + +func (r *redisStore) Set(id string, value string) error { + key := fmt.Sprintf("captcha:%s", id) + return r.redis.Set(r.ctx, key, value, 5*time.Minute).Err() +} + +func (r *redisStore) Get(id string, clear bool) string { + key := fmt.Sprintf("captcha:%s", id) + val, err := r.redis.Get(r.ctx, key).Result() + if err != nil { + return "" + } + if clear { + r.redis.Del(r.ctx, key) + } + return val +} + +func (r *redisStore) Verify(id, answer string, clear bool) bool { + v := r.Get(id, clear) + return strings.EqualFold(v, answer) +} diff --git a/pkg/captcha/service.go b/pkg/captcha/service.go new file mode 100644 index 0000000..5f8eeb0 --- /dev/null +++ b/pkg/captcha/service.go @@ -0,0 +1,70 @@ +package captcha + +import ( + "context" + + "github.com/redis/go-redis/v9" +) + +type CaptchaType string + +const ( + CaptchaTypeLocal CaptchaType = "local" + CaptchaTypeTurnstile CaptchaType = "turnstile" + CaptchaTypeSlider CaptchaType = "slider" +) + +// Service defines the captcha service interface +type Service interface { + // Generate generates a new captcha + // For local captcha: returns id and base64 image + // For turnstile: returns empty strings + // For slider: returns id, background image, and block image (in image field as JSON) + Generate(ctx context.Context) (id string, image string, err error) + + // Verify verifies the captcha + // For local captcha: token is captcha id, code is user input + // For turnstile: token is cf-turnstile-response, code is ignored + // For slider: use VerifySlider instead + Verify(ctx context.Context, token string, code string, ip string) (bool, error) + + // GetType returns the captcha type + GetType() CaptchaType +} + +// SliderService extends Service with slider-specific verification +type SliderService interface { + Service + // VerifySlider verifies slider position and trail, returns a one-time token on success + VerifySlider(ctx context.Context, id string, x, y int, trail string) (token string, err error) + // VerifySliderToken verifies the one-time token issued after slider verification + VerifySliderToken(ctx context.Context, token string) (bool, error) + // GenerateSlider returns id, background image base64, block image base64 + GenerateSlider(ctx context.Context) (id string, bgImage string, blockImage string, err error) +} + +// Config holds the configuration for captcha service +type Config struct { + Type CaptchaType + RedisClient *redis.Client + TurnstileSecret string +} + +// NewService creates a new captcha service based on the config +func NewService(config Config) Service { + switch config.Type { + case CaptchaTypeTurnstile: + return newTurnstileService(config.TurnstileSecret) + case CaptchaTypeSlider: + return newSliderService(config.RedisClient) + case CaptchaTypeLocal: + fallthrough + default: + return newLocalService(config.RedisClient) + } +} + +// NewSliderService creates a slider captcha service +func NewSliderService(redisClient *redis.Client) SliderService { + return newSliderService(redisClient) +} diff --git a/pkg/captcha/slider.go b/pkg/captcha/slider.go new file mode 100644 index 0000000..7c069a2 --- /dev/null +++ b/pkg/captcha/slider.go @@ -0,0 +1,515 @@ +package captcha + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "image" + "image/color" + "image/png" + "math" + "math/rand" + "time" + + "github.com/google/uuid" + "github.com/redis/go-redis/v9" +) + +const ( + sliderBgWidth = 560 + sliderBgHeight = 280 + sliderBlockSize = 100 + sliderMinX = 140 + sliderMaxX = 420 + sliderTolerance = 6 + sliderExpiry = 5 * time.Minute + sliderTokenExpiry = 30 * time.Second +) + +type sliderShape int + +const ( + shapeSquare sliderShape = 0 + shapeCircle sliderShape = 1 + shapeDiamond sliderShape = 2 + shapeStar sliderShape = 3 + shapeTriangle sliderShape = 4 + shapeTrapezoid sliderShape = 5 +) + +type sliderService struct { + redis *redis.Client +} + +func newSliderService(redisClient *redis.Client) *sliderService { + return &sliderService{redis: redisClient} +} + +// sliderData stores the correct position and shape in Redis +type sliderData struct { + X int `json:"x"` + Y int `json:"y"` + Shape sliderShape `json:"shape"` +} + +// inMask returns true if pixel (dx,dy) within the block bounding box belongs to the shape +func inMask(dx, dy int, shape sliderShape) bool { + half := sliderBlockSize / 2 + switch shape { + case shapeCircle: + ex := dx - half + ey := dy - half + return ex*ex+ey*ey <= half*half + case shapeDiamond: + return abs(dx-half)+abs(dy-half) <= half + case shapeStar: + return inStar(dx, dy, half) + case shapeTriangle: + return inTriangle(dx, dy) + case shapeTrapezoid: + return inTrapezoid(dx, dy) + default: // shapeSquare + margin := 8 + return dx >= margin && dx < sliderBlockSize-margin && dy >= margin && dy < sliderBlockSize-margin + } +} + +func abs(v int) int { + if v < 0 { + return -v + } + return v +} + +// pointInPolygon uses ray-casting to test if (x,y) is inside the polygon defined by pts. +func pointInPolygon(x, y float64, pts [][2]float64) bool { + n := len(pts) + inside := false + j := n - 1 + for i := 0; i < n; i++ { + xi, yi := pts[i][0], pts[i][1] + xj, yj := pts[j][0], pts[j][1] + if ((yi > y) != (yj > y)) && (x < (xj-xi)*(y-yi)/(yj-yi)+xi) { + inside = !inside + } + j = i + } + return inside +} + +// inStar returns true if (dx,dy) is inside a 5-pointed star centered in the block. +func inStar(dx, dy, half int) bool { + cx, cy := float64(half), float64(half) + r1 := float64(half) * 0.92 // outer radius + r2 := float64(half) * 0.40 // inner radius + x := float64(dx) - cx + y := float64(dy) - cy + pts := make([][2]float64, 10) + for i := 0; i < 10; i++ { + angle := float64(i)*math.Pi/5 - math.Pi/2 + r := r1 + if i%2 == 1 { + r = r2 + } + pts[i] = [2]float64{r * math.Cos(angle), r * math.Sin(angle)} + } + return pointInPolygon(x, y, pts) +} + +// inTriangle returns true if (dx,dy) is inside an upward-pointing triangle. +func inTriangle(dx, dy int) bool { + margin := 5 + size := sliderBlockSize - 2*margin + half := float64(sliderBlockSize) / 2 + ax, ay := half, float64(margin) + bx, by := float64(margin), float64(margin+size) + cx, cy2 := float64(margin+size), float64(margin+size) + px, py := float64(dx), float64(dy) + d1 := (px-bx)*(ay-by) - (ax-bx)*(py-by) + d2 := (px-cx)*(by-cy2) - (bx-cx)*(py-cy2) + d3 := (px-ax)*(cy2-ay) - (cx-ax)*(py-ay) + hasNeg := (d1 < 0) || (d2 < 0) || (d3 < 0) + hasPos := (d1 > 0) || (d2 > 0) || (d3 > 0) + return !(hasNeg && hasPos) +} + +// inTrapezoid returns true if (dx,dy) is inside a trapezoid (wider at bottom). +func inTrapezoid(dx, dy int) bool { + margin := 5 + topY := float64(margin) + bottomY := float64(sliderBlockSize - margin) + totalH := bottomY - topY + half := float64(sliderBlockSize) / 2 + topHalfW := float64(sliderBlockSize) * 0.25 + bottomHalfW := float64(sliderBlockSize) * 0.45 + x, y := float64(dx), float64(dy) + if y < topY || y > bottomY { + return false + } + t := (y - topY) / totalH + hw := topHalfW + t*(bottomHalfW-topHalfW) + return x >= half-hw && x <= half+hw +} + +func (s *sliderService) GenerateSlider(ctx context.Context) (id string, bgImage string, blockImage string, err error) { + bg := generateBackground() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + x := sliderMinX + r.Intn(sliderMaxX-sliderMinX) + y := r.Intn(sliderBgHeight - sliderBlockSize) + shape := sliderShape(r.Intn(6)) + + block := cropBlockShaped(bg, x, y, shape) + cutBackgroundShaped(bg, x, y, shape) + + bgB64, err := imageToPNGBase64(bg) + if err != nil { + return "", "", "", err + } + blockB64, err := imageToPNGBase64(block) + if err != nil { + return "", "", "", err + } + + id = uuid.New().String() + data, _ := json.Marshal(sliderData{X: x, Y: y, Shape: shape}) + key := fmt.Sprintf("captcha:slider:%s", id) + if err = s.redis.Set(ctx, key, string(data), sliderExpiry).Err(); err != nil { + return "", "", "", err + } + + return id, bgB64, blockB64, nil +} + +func (s *sliderService) Generate(ctx context.Context) (id string, image string, err error) { + id, _, _, err = s.GenerateSlider(ctx) + return id, "", err +} + +// TrailPoint records a pointer position and timestamp during drag +type TrailPoint struct { + X int `json:"x"` + Y int `json:"y"` + T int64 `json:"t"` // milliseconds since drag start +} + +// validateTrail performs human-behaviour checks on the drag trail. +// +// Rules: +// 1. Trail must be provided and have >= 8 points +// 2. Total drag duration: 300ms – 15000ms +// 3. First point x <= 10 (started from left) +// 4. No single-step jump > 80px +// 5. Final x within tolerance*2 of declared x +// 6. Speed variance > 0 (not perfectly uniform / robotic) +// 7. Y-axis total deviation >= 2px (path is not a perfect horizontal line) +func validateTrail(trail []TrailPoint, declaredX int) bool { + if len(trail) < 8 { + return false + } + + duration := trail[len(trail)-1].T - trail[0].T + if duration < 300 || duration > 15000 { + return false + } + + if trail[0].X > 10 { + return false + } + + // Collect per-step speeds and check max jump + var speeds []float64 + for i := 1; i < len(trail); i++ { + dt := float64(trail[i].T - trail[i-1].T) + dx := float64(trail[i].X - trail[i-1].X) + dy := float64(trail[i].Y - trail[i-1].Y) + if abs(int(dx)) > 80 { + return false + } + if dt > 0 { + dist := math.Sqrt(dx*dx + dy*dy) + speeds = append(speeds, dist/dt) + } + } + + // Speed variance check – robot drag tends to be perfectly uniform + if len(speeds) >= 3 { + mean := 0.0 + for _, v := range speeds { + mean += v + } + mean /= float64(len(speeds)) + variance := 0.0 + for _, v := range speeds { + d := v - mean + variance += d * d + } + variance /= float64(len(speeds)) + // If variance is essentially 0, it's robotic + if variance < 1e-6 { + return false + } + } + + // Y-axis deviation: humans almost always move slightly on Y + minY := trail[0].Y + maxY := trail[0].Y + for _, p := range trail { + if p.Y < minY { + minY = p.Y + } + if p.Y > maxY { + maxY = p.Y + } + } + if maxY-minY < 2 { + return false + } + + // Final position check + lastX := trail[len(trail)-1].X + if diff := abs(lastX - declaredX); diff > sliderTolerance*2 { + return false + } + + return true +} + +func (s *sliderService) VerifySlider(ctx context.Context, id string, x, y int, trail string) (token string, err error) { + // Trail is mandatory + if trail == "" { + return "", fmt.Errorf("trail required") + } + var points []TrailPoint + if jsonErr := json.Unmarshal([]byte(trail), &points); jsonErr != nil { + return "", fmt.Errorf("invalid trail") + } + if !validateTrail(points, x) { + return "", fmt.Errorf("trail validation failed") + } + + key := fmt.Sprintf("captcha:slider:%s", id) + val, err := s.redis.Get(ctx, key).Result() + if err != nil { + return "", fmt.Errorf("captcha not found or expired") + } + + var data sliderData + if err = json.Unmarshal([]byte(val), &data); err != nil { + return "", fmt.Errorf("invalid captcha data") + } + + diffX := abs(x - data.X) + diffY := abs(y - data.Y) + if diffX > sliderTolerance || diffY > sliderTolerance { + s.redis.Del(ctx, key) + return "", fmt.Errorf("position mismatch") + } + + s.redis.Del(ctx, key) + + sliderToken := uuid.New().String() + tokenKey := fmt.Sprintf("captcha:slider:token:%s", sliderToken) + if err = s.redis.Set(ctx, tokenKey, "1", sliderTokenExpiry).Err(); err != nil { + return "", err + } + + return sliderToken, nil +} + +func (s *sliderService) VerifySliderToken(ctx context.Context, token string) (bool, error) { + if token == "" { + return false, nil + } + tokenKey := fmt.Sprintf("captcha:slider:token:%s", token) + val, err := s.redis.Get(ctx, tokenKey).Result() + if err != nil { + return false, nil + } + if val != "1" { + return false, nil + } + s.redis.Del(ctx, tokenKey) + return true, nil +} + +func (s *sliderService) Verify(ctx context.Context, token string, code string, ip string) (bool, error) { + return s.VerifySliderToken(ctx, token) +} + +func (s *sliderService) GetType() CaptchaType { + return CaptchaTypeSlider +} + +// cropBlockShaped copies pixels within the shape mask from bg into a new block image. +// Pixels outside the mask are transparent. A 2-pixel white border is drawn along the shape edge. +func cropBlockShaped(bg *image.NRGBA, x, y int, shape sliderShape) *image.NRGBA { + block := image.NewNRGBA(image.Rect(0, 0, sliderBlockSize, sliderBlockSize)) + for dy := 0; dy < sliderBlockSize; dy++ { + for dx := 0; dx < sliderBlockSize; dx++ { + if inMask(dx, dy, shape) { + block.SetNRGBA(dx, dy, bg.NRGBAAt(x+dx, y+dy)) + } + } + } + + // Draw 2-pixel bright border along shape edge + borderColor := color.NRGBA{R: 255, G: 255, B: 255, A: 230} + for dy := 0; dy < sliderBlockSize; dy++ { + for dx := 0; dx < sliderBlockSize; dx++ { + if !inMask(dx, dy, shape) { + continue + } + nearEdge := false + check: + for ddy := -2; ddy <= 2; ddy++ { + for ddx := -2; ddx <= 2; ddx++ { + if abs(ddx)+abs(ddy) > 2 { + continue + } + nx, ny := dx+ddx, dy+ddy + if nx < 0 || nx >= sliderBlockSize || ny < 0 || ny >= sliderBlockSize || !inMask(nx, ny, shape) { + nearEdge = true + break check + } + } + } + if nearEdge { + block.SetNRGBA(dx, dy, borderColor) + } + } + } + return block +} + +// cutBackgroundShaped blanks the shape area and draws a border outline +func cutBackgroundShaped(bg *image.NRGBA, x, y int, shape sliderShape) { + holeColor := color.NRGBA{R: 0, G: 0, B: 0, A: 100} + borderColor := color.NRGBA{R: 255, G: 255, B: 255, A: 220} + + // Fill hole + for dy := 0; dy < sliderBlockSize; dy++ { + for dx := 0; dx < sliderBlockSize; dx++ { + if inMask(dx, dy, shape) { + bg.SetNRGBA(x+dx, y+dy, holeColor) + } + } + } + + // Draw 2-pixel border along hole edge + for dy := 0; dy < sliderBlockSize; dy++ { + for dx := 0; dx < sliderBlockSize; dx++ { + if !inMask(dx, dy, shape) { + continue + } + nearEdge := false + check: + for ddy := -2; ddy <= 2; ddy++ { + for ddx := -2; ddx <= 2; ddx++ { + if abs(ddx)+abs(ddy) > 2 { + continue + } + nx, ny := dx+ddx, dy+ddy + if nx < 0 || nx >= sliderBlockSize || ny < 0 || ny >= sliderBlockSize || !inMask(nx, ny, shape) { + nearEdge = true + break check + } + } + } + if nearEdge { + bg.SetNRGBA(x+dx, y+dy, borderColor) + } + } + } +} + +// generateBackground creates a colorful 320x160 background image +func generateBackground() *image.NRGBA { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + img := image.NewNRGBA(image.Rect(0, 0, sliderBgWidth, sliderBgHeight)) + + blockW := 60 + blockH := 60 + palette := []color.NRGBA{ + {R: 70, G: 130, B: 180, A: 255}, + {R: 60, G: 179, B: 113, A: 255}, + {R: 205, G: 92, B: 92, A: 255}, + {R: 255, G: 165, B: 0, A: 255}, + {R: 147, G: 112, B: 219, A: 255}, + {R: 64, G: 224, B: 208, A: 255}, + {R: 220, G: 120, B: 60, A: 255}, + {R: 100, G: 149, B: 237, A: 255}, + } + + for by := 0; by*blockH < sliderBgHeight; by++ { + for bx := 0; bx*blockW < sliderBgWidth; bx++ { + base := palette[r.Intn(len(palette))] + x0 := bx * blockW + y0 := by * blockH + x1 := x0 + blockW + y1 := y0 + blockH + for py := y0; py < y1 && py < sliderBgHeight; py++ { + for px := x0; px < x1 && px < sliderBgWidth; px++ { + v := int8(r.Intn(41) - 20) + img.SetNRGBA(px, py, color.NRGBA{ + R: addVariation(base.R, v), + G: addVariation(base.G, v), + B: addVariation(base.B, v), + A: 255, + }) + } + } + } + } + + // Add some random circles for visual complexity + numCircles := 6 + r.Intn(6) + for i := 0; i < numCircles; i++ { + cx := r.Intn(sliderBgWidth) + cy := r.Intn(sliderBgHeight) + radius := 18 + r.Intn(30) + circleColor := color.NRGBA{ + R: uint8(r.Intn(256)), + G: uint8(r.Intn(256)), + B: uint8(r.Intn(256)), + A: 180, + } + drawCircle(img, cx, cy, radius, circleColor) + } + + return img +} + +func addVariation(base uint8, v int8) uint8 { + result := int(base) + int(v) + if result < 0 { + return 0 + } + if result > 255 { + return 255 + } + return uint8(result) +} + +func drawCircle(img *image.NRGBA, cx, cy, radius int, c color.NRGBA) { + bounds := img.Bounds() + for y := cy - radius; y <= cy+radius; y++ { + for x := cx - radius; x <= cx+radius; x++ { + if (x-cx)*(x-cx)+(y-cy)*(y-cy) <= radius*radius { + if x >= bounds.Min.X && x < bounds.Max.X && y >= bounds.Min.Y && y < bounds.Max.Y { + img.SetNRGBA(x, y, c) + } + } + } + } +} + +func imageToPNGBase64(img image.Image) (string, error) { + var buf bytes.Buffer + if err := png.Encode(&buf, img); err != nil { + return "", err + } + return "data:image/png;base64," + base64.StdEncoding.EncodeToString(buf.Bytes()), nil +} diff --git a/pkg/captcha/turnstile.go b/pkg/captcha/turnstile.go new file mode 100644 index 0000000..52e5bca --- /dev/null +++ b/pkg/captcha/turnstile.go @@ -0,0 +1,37 @@ +package captcha + +import ( + "context" + + "github.com/perfect-panel/server/pkg/turnstile" +) + +type turnstileService struct { + service turnstile.Service +} + +func newTurnstileService(secret string) Service { + return &turnstileService{ + service: turnstile.New(turnstile.Config{ + Secret: secret, + }), + } +} + +func (s *turnstileService) Generate(ctx context.Context) (id string, image string, err error) { + // Turnstile doesn't need server-side generation + return "", "", nil +} + +func (s *turnstileService) Verify(ctx context.Context, token string, code string, ip string) (bool, error) { + if token == "" { + return false, nil + } + + // Verify with Cloudflare Turnstile + return s.service.Verify(ctx, token, ip) +} + +func (s *turnstileService) GetType() CaptchaType { + return CaptchaTypeTurnstile +} diff --git a/pkg/captcha/verify.go b/pkg/captcha/verify.go new file mode 100644 index 0000000..7de103a --- /dev/null +++ b/pkg/captcha/verify.go @@ -0,0 +1,78 @@ +package captcha + +import ( + "context" + + "github.com/pkg/errors" + "github.com/redis/go-redis/v9" + + "github.com/perfect-panel/server/pkg/xerr" +) + +// VerifyInput holds the captcha fields from a login/register/reset request. +type VerifyInput struct { + CaptchaId string + CaptchaCode string + CfToken string + SliderToken string + IP string +} + +// VerifyCaptcha validates the captcha according to captchaType. +// Returns nil when captchaType is empty / unrecognised (i.e. captcha disabled). +func VerifyCaptcha( + ctx context.Context, + redisClient *redis.Client, + captchaType string, + turnstileSecret string, + input VerifyInput, +) error { + switch captchaType { + case string(CaptchaTypeLocal): + if input.CaptchaId == "" || input.CaptchaCode == "" { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "captcha required") + } + svc := NewService(Config{ + Type: CaptchaTypeLocal, + RedisClient: redisClient, + }) + valid, err := svc.Verify(ctx, input.CaptchaId, input.CaptchaCode, input.IP) + if err != nil { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "verify captcha error") + } + if !valid { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "invalid captcha") + } + + case string(CaptchaTypeTurnstile): + if input.CfToken == "" { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "captcha required") + } + svc := NewService(Config{ + Type: CaptchaTypeTurnstile, + TurnstileSecret: turnstileSecret, + }) + valid, err := svc.Verify(ctx, input.CfToken, "", input.IP) + if err != nil { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "verify captcha error") + } + if !valid { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "invalid captcha") + } + + case string(CaptchaTypeSlider): + if input.SliderToken == "" { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "slider captcha required") + } + sliderSvc := NewSliderService(redisClient) + valid, err := sliderSvc.VerifySliderToken(ctx, input.SliderToken) + if err != nil { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "verify captcha error") + } + if !valid { + return errors.Wrapf(xerr.NewErrCode(xerr.VerifyCodeError), "invalid slider captcha") + } + } + + return nil +} diff --git a/pkg/constant/context.go b/pkg/constant/context.go index ea20e2a..9a8f2d7 100644 --- a/pkg/constant/context.go +++ b/pkg/constant/context.go @@ -15,4 +15,5 @@ const ( CtxKeyIncludeExpired CtxKey = "includeExpired" CtxKeyAPIVersionUseLatest CtxKey = "apiVersionUseLatest" CtxKeyAPIHeaderRaw CtxKey = "apiHeaderRaw" + CtxKeyHasAppId CtxKey = "hasAppId" ) diff --git a/pkg/speedlimit/calculator.go b/pkg/speedlimit/calculator.go new file mode 100644 index 0000000..287d6c1 --- /dev/null +++ b/pkg/speedlimit/calculator.go @@ -0,0 +1,109 @@ +package speedlimit + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "gorm.io/gorm" +) + +// TrafficLimitRule represents a dynamic speed throttling rule. +type TrafficLimitRule struct { + StatType string `json:"stat_type"` + StatValue int64 `json:"stat_value"` + TrafficUsage int64 `json:"traffic_usage"` + SpeedLimit int64 `json:"speed_limit"` +} + +// ThrottleResult contains the computed speed limit status for a user subscription. +type ThrottleResult struct { + BaseSpeed int64 `json:"base_speed"` // Plan base speed limit (Mbps, 0=unlimited) + EffectiveSpeed int64 `json:"effective_speed"` // Current effective speed limit (Mbps) + IsThrottled bool `json:"is_throttled"` // Whether the user is currently throttled + ThrottleRule string `json:"throttle_rule"` // Description of the matched rule (empty if not throttled) + UsedTrafficGB float64 `json:"used_traffic_gb"` // Traffic used in the matched rule's window (GB) + ThrottleStart int64 `json:"throttle_start"` // Window start Unix timestamp (seconds), 0 if not throttled + ThrottleEnd int64 `json:"throttle_end"` // Window end Unix timestamp (seconds), 0 if not throttled +} + +// Calculate computes the effective speed limit for a user subscription, +// considering traffic-based throttling rules. +func Calculate(ctx context.Context, db *gorm.DB, userId, subscribeId, baseSpeedLimit int64, trafficLimitJSON string) *ThrottleResult { + result := &ThrottleResult{ + BaseSpeed: baseSpeedLimit, + EffectiveSpeed: baseSpeedLimit, + } + + if trafficLimitJSON == "" { + return result + } + + var rules []TrafficLimitRule + if err := json.Unmarshal([]byte(trafficLimitJSON), &rules); err != nil { + return result + } + + if len(rules) == 0 { + return result + } + + now := time.Now() + for _, rule := range rules { + var startTime time.Time + + switch rule.StatType { + case "hour": + if rule.StatValue <= 0 { + continue + } + startTime = now.Add(-time.Duration(rule.StatValue) * time.Hour) + case "day": + if rule.StatValue <= 0 { + continue + } + startTime = now.AddDate(0, 0, -int(rule.StatValue)) + default: + continue + } + + var usedTraffic struct { + Upload int64 + Download int64 + } + err := db.WithContext(ctx). + Table("traffic_log"). + Select("COALESCE(SUM(upload), 0) as upload, COALESCE(SUM(download), 0) as download"). + Where("user_id = ? AND subscribe_id = ? AND timestamp >= ? AND timestamp < ?", + userId, subscribeId, startTime, now). + Scan(&usedTraffic).Error + + if err != nil { + continue + } + + usedGB := float64(usedTraffic.Upload+usedTraffic.Download) / (1024 * 1024 * 1024) + + if usedGB >= float64(rule.TrafficUsage) { + if rule.SpeedLimit > 0 { + if result.EffectiveSpeed == 0 || rule.SpeedLimit < result.EffectiveSpeed { + result.EffectiveSpeed = rule.SpeedLimit + result.IsThrottled = true + result.UsedTrafficGB = usedGB + result.ThrottleStart = startTime.Unix() + result.ThrottleEnd = now.Unix() + + statLabel := "小时" + if rule.StatType == "day" { + statLabel = "天" + } + result.ThrottleRule = fmt.Sprintf("%d%s内超%dGB,限速%dMbps", + rule.StatValue, statLabel, rule.TrafficUsage, rule.SpeedLimit) + } + } + } + } + + return result +} diff --git a/pkg/turnstile/service.go b/pkg/turnstile/service.go index e3be9a7..9af71e5 100644 --- a/pkg/turnstile/service.go +++ b/pkg/turnstile/service.go @@ -56,7 +56,9 @@ func (s *service) verify(ctx context.Context, secret string, token string, ip st _ = writer.WriteField("idempotency_key", key) } _ = writer.Close() - client := &http.Client{} + client := &http.Client{ + Timeout: 5 * time.Second, + } req, _ := http.NewRequest("POST", s.url, body) req.Header.Set("Content-Type", writer.FormDataContentType()) firstResult, err := client.Do(req) diff --git a/pkg/xerr/errCode.go b/pkg/xerr/errCode.go index 28c104a..e334a71 100644 --- a/pkg/xerr/errCode.go +++ b/pkg/xerr/errCode.go @@ -134,6 +134,11 @@ const ( DeviceBindLimitExceeded uint32 = 90019 ) +// Permission error +const ( + PermissionDenied uint32 = 40300 +) + const ( OrderNotExist uint32 = 61001 PaymentMethodNotFound uint32 = 61002 diff --git a/pkg/xerr/errMsg.go b/pkg/xerr/errMsg.go index f235187..3889559 100644 --- a/pkg/xerr/errMsg.go +++ b/pkg/xerr/errMsg.go @@ -102,6 +102,9 @@ func init() { PaymentMethodNotFound: "Payment method not found", OrderStatusError: "Order status error", InsufficientOfPeriod: "Insufficient number of period", + + // Permission error + PermissionDenied: "Permission denied", } } diff --git a/ppanel.api b/ppanel.api index 324f565..0786080 100644 --- a/ppanel.api +++ b/ppanel.api @@ -30,6 +30,7 @@ import ( "apis/admin/ads.api" "apis/admin/marketing.api" "apis/admin/application.api" + "apis/admin/group.api" "apis/public/user.api" "apis/public/subscribe.api" "apis/public/redemption.api" diff --git a/queue/logic/group/recalculateGroupLogic.go b/queue/logic/group/recalculateGroupLogic.go new file mode 100644 index 0000000..91b3685 --- /dev/null +++ b/queue/logic/group/recalculateGroupLogic.go @@ -0,0 +1,87 @@ +package group + +import ( + "context" + "time" + + "github.com/perfect-panel/server/internal/logic/admin/group" + "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + "github.com/perfect-panel/server/pkg/logger" + + "github.com/hibiken/asynq" +) + +type RecalculateGroupLogic struct { + svc *svc.ServiceContext +} + +func NewRecalculateGroupLogic(svc *svc.ServiceContext) *RecalculateGroupLogic { + return &RecalculateGroupLogic{ + svc: svc, + } +} + +func (l *RecalculateGroupLogic) ProcessTask(ctx context.Context, t *asynq.Task) error { + logger.Infof("[RecalculateGroup] Starting scheduled group recalculation: %s", time.Now().Format("2006-01-02 15:04:05")) + + // 1. Check if group management is enabled + var enabledConfig struct { + Value string `gorm:"column:value"` + } + err := l.svc.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "enabled"). + Select("value"). + First(&enabledConfig).Error + if err != nil { + logger.Errorw("[RecalculateGroup] Failed to read group enabled config", logger.Field("error", err.Error())) + return err + } + + // If not enabled, skip execution + if enabledConfig.Value != "true" && enabledConfig.Value != "1" { + logger.Debugf("[RecalculateGroup] Group management is not enabled, skipping") + return nil + } + + // 2. Get grouping mode + var modeConfig struct { + Value string `gorm:"column:value"` + } + err = l.svc.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "mode"). + Select("value"). + First(&modeConfig).Error + if err != nil { + logger.Errorw("[RecalculateGroup] Failed to read group mode config", logger.Field("error", err.Error())) + return err + } + + mode := modeConfig.Value + if mode == "" { + mode = "average" // default mode + } + + // 3. Only execute if mode is "traffic" + if mode != "traffic" { + logger.Debugf("[RecalculateGroup] Group mode is not 'traffic' (current: %s), skipping", mode) + return nil + } + + // 4. Execute traffic-based grouping + logger.Infof("[RecalculateGroup] Executing traffic-based grouping") + + logic := group.NewRecalculateGroupLogic(ctx, l.svc) + req := &types.RecalculateGroupRequest{ + Mode: "traffic", + TriggerType: "scheduled", + } + + if err := logic.RecalculateGroup(req); err != nil { + logger.Errorw("[RecalculateGroup] Failed to execute traffic grouping", logger.Field("error", err.Error())) + return err + } + + logger.Infof("[RecalculateGroup] Successfully completed traffic-based grouping: %s", time.Now().Format("2006-01-02 15:04:05")) + return nil +} diff --git a/queue/logic/order/activateOrderLogic.go b/queue/logic/order/activateOrderLogic.go index 3c95285..582f595 100644 --- a/queue/logic/order/activateOrderLogic.go +++ b/queue/logic/order/activateOrderLogic.go @@ -7,26 +7,25 @@ import ( "encoding/json" "errors" "fmt" - "strconv" "time" + "github.com/perfect-panel/server/internal/logic/admin/group" "github.com/perfect-panel/server/internal/model/log" "github.com/perfect-panel/server/pkg/constant" "github.com/perfect-panel/server/pkg/logger" - tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api/v5" "github.com/google/uuid" "github.com/hibiken/asynq" - "github.com/perfect-panel/server/internal/logic/telegram" "github.com/perfect-panel/server/internal/model/order" - internaltypes "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/internal/model/redemption" "github.com/perfect-panel/server/internal/model/subscribe" "github.com/perfect-panel/server/internal/model/user" "github.com/perfect-panel/server/internal/svc" + "github.com/perfect-panel/server/internal/types" + internaltypes "github.com/perfect-panel/server/internal/types" "github.com/perfect-panel/server/pkg/tool" "github.com/perfect-panel/server/pkg/uuidx" - "github.com/perfect-panel/server/queue/types" + queueTypes "github.com/perfect-panel/server/queue/types" "gorm.io/gorm" ) @@ -126,8 +125,8 @@ func (l *ActivateOrderLogic) ProcessTask(ctx context.Context, task *asynq.Task) } // parsePayload unMarshals the task payload into a structured format -func (l *ActivateOrderLogic) parsePayload(ctx context.Context, payload []byte) (*types.ForthwithActivateOrderPayload, error) { - var p types.ForthwithActivateOrderPayload +func (l *ActivateOrderLogic) parsePayload(ctx context.Context, payload []byte) (*queueTypes.ForthwithActivateOrderPayload, error) { + var p queueTypes.ForthwithActivateOrderPayload if err := json.Unmarshal(payload, &p); err != nil { logger.WithContext(ctx).Error("[ActivateOrderLogic] Unmarshal payload failed", logger.Field("error", err.Error()), @@ -224,27 +223,8 @@ func (l *ActivateOrderLogic) NewPurchase(ctx context.Context, orderInfo *order.O return err } - // check new user only restriction at activation to prevent concurrent bypass - if orderInfo.Type == OrderTypeSubscribe && sub.Discount != "" { - var dis []internaltypes.SubscribeDiscount - if jsonErr := json.Unmarshal([]byte(sub.Discount), &dis); jsonErr == nil { - newUserOnly := isNewUserOnlyForQuantity(dis, orderInfo.Quantity) - if newUserOnly { - if time.Since(userInfo.CreatedAt) > 24*time.Hour { - return fmt.Errorf("new user only: user %d is not a new user", userInfo.Id) - } - var historyCount int64 - if e := l.svc.DB.Model(&order.Order{}). - Where("user_id = ? AND subscribe_id = ? AND type = 1 AND status = ? AND order_no != ?", - orderInfo.UserId, orderInfo.SubscribeId, OrderStatusFinished, orderInfo.OrderNo). - Count(&historyCount).Error; e != nil { - return fmt.Errorf("new user only: check history error: %w", e) - } - if historyCount >= 1 { - return fmt.Errorf("new user only: user %d already activated subscribe %d", userInfo.Id, orderInfo.SubscribeId) - } - } - } + if err = validateNewUserOnlyEligibilityAtActivation(ctx, l.svc.DB, orderInfo, sub); err != nil { + return err } var userSub *user.Subscribe @@ -322,15 +302,15 @@ func (l *ActivateOrderLogic) NewPurchase(ctx context.Context, orderInfo *order.O } } + // Trigger user group recalculation (runs in background) + l.triggerUserGroupRecalculation(ctx, userInfo.Id) + // Handle commission in separate goroutine to avoid blocking go l.handleCommission(context.Background(), userInfo, orderInfo) // Clear cache l.clearServerCache(ctx, sub) - // Send notifications - l.sendNotifications(ctx, orderInfo, userInfo, sub, userSub, telegram.PurchaseNotify) - logger.WithContext(ctx).Info("Insert user subscribe success") return nil } @@ -576,7 +556,10 @@ func (l *ActivateOrderLogic) extendGiftSubscription(ctx context.Context, giftSub // This runs asynchronously to avoid blocking the main order processing flow. func (l *ActivateOrderLogic) handleCommission(ctx context.Context, userInfo *user.User, orderInfo *order.Order) { if !l.shouldProcessCommission(userInfo, orderInfo.IsNew) { - l.grantGiftDaysToBothParties(ctx, userInfo, orderInfo.OrderNo) + // 普通用户路径(佣金比例=0):只有首单才双方赠N天 + if orderInfo.IsNew { + l.grantGiftDaysToBothParties(ctx, userInfo, orderInfo.OrderNo) + } return } @@ -782,6 +765,63 @@ func (l *ActivateOrderLogic) clearServerCache(ctx context.Context, sub *subscrib } } +// triggerUserGroupRecalculation triggers user group recalculation after subscription changes +// This runs asynchronously in background to avoid blocking the main order processing flow +func (l *ActivateOrderLogic) triggerUserGroupRecalculation(ctx context.Context, userId int64) { + go func() { + // Use a new context with timeout for group recalculation + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Check if group management is enabled + var groupEnabled string + err := l.svc.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "enabled"). + Select("value"). + Scan(&groupEnabled).Error + if err != nil || groupEnabled != "true" && groupEnabled != "1" { + logger.Debugf("[Group Trigger] Group management not enabled, skipping recalculation") + return + } + + // Get the configured grouping mode + var groupMode string + err = l.svc.DB.Table("system"). + Where("`category` = ? AND `key` = ?", "group", "mode"). + Select("value"). + Scan(&groupMode).Error + if err != nil { + logger.Errorw("[Group Trigger] Failed to get group mode", logger.Field("error", err.Error())) + return + } + + // Validate group mode + if groupMode != "average" && groupMode != "subscribe" && groupMode != "traffic" { + logger.Debugf("[Group Trigger] Invalid group mode (current: %s), skipping", groupMode) + return + } + + // Trigger group recalculation with the configured mode + logic := group.NewRecalculateGroupLogic(ctx, l.svc) + req := &types.RecalculateGroupRequest{ + Mode: groupMode, + } + + if err := logic.RecalculateGroup(req); err != nil { + logger.Errorw("[Group Trigger] Failed to recalculate user group", + logger.Field("user_id", userId), + logger.Field("error", err.Error()), + ) + return + } + + logger.Infow("[Group Trigger] Successfully recalculated user group", + logger.Field("user_id", userId), + logger.Field("mode", groupMode), + ) + }() +} + // Renewal handles subscription renewal including subscription extension, // traffic reset (if configured), commission processing, and notifications func (l *ActivateOrderLogic) Renewal(ctx context.Context, orderInfo *order.Order, iapExpireAt int64) error { @@ -827,9 +867,6 @@ func (l *ActivateOrderLogic) Renewal(ctx context.Context, orderInfo *order.Order // Handle commission go l.handleCommission(context.Background(), userInfo, orderInfo) - // Send notifications - l.sendNotifications(ctx, orderInfo, userInfo, sub, userSub, telegram.RenewalNotify) - return nil } @@ -907,6 +944,9 @@ func (l *ActivateOrderLogic) updateSubscriptionForRenewal(ctx context.Context, u userSub.ExpireTime = tool.AddTime(sub.UnitTime, orderInfo.Quantity, userSub.ExpireTime) userSub.Status = 1 + // 续费时重置过期流量字段 + userSub.ExpiredDownload = 0 + userSub.ExpiredUpload = 0 if err := l.svc.UserModel.UpdateSubscribe(ctx, userSub); err != nil { logger.WithContext(ctx).Error("Update user subscribe failed", logger.Field("error", err.Error())) @@ -931,6 +971,8 @@ func (l *ActivateOrderLogic) ResetTraffic(ctx context.Context, orderInfo *order. // Reset traffic userSub.Download = 0 userSub.Upload = 0 + userSub.ExpiredDownload = 0 + userSub.ExpiredUpload = 0 userSub.Status = 1 if err := l.svc.UserModel.UpdateSubscribe(ctx, userSub); err != nil { @@ -974,9 +1016,6 @@ func (l *ActivateOrderLogic) ResetTraffic(ctx context.Context, orderInfo *order. logger.WithContext(ctx).Error("[Order Queue]Insert reset subscribe log failed", logger.Field("error", err.Error())) } - // Send notifications - l.sendNotifications(ctx, orderInfo, userInfo, sub, userSub, telegram.ResetTrafficNotify) - return nil } @@ -1041,140 +1080,9 @@ func (l *ActivateOrderLogic) Recharge(ctx context.Context, orderInfo *order.Orde return err } - // Send notifications - l.sendRechargeNotifications(ctx, orderInfo, userInfo) - return nil } -// sendNotifications sends both user and admin notifications for order completion -func (l *ActivateOrderLogic) sendNotifications(ctx context.Context, orderInfo *order.Order, userInfo *user.User, sub *subscribe.Subscribe, userSub *user.Subscribe, notifyType string) { - // Send user notification - if telegramId, ok := findTelegram(userInfo); ok { - templateData := l.buildUserNotificationData(orderInfo, sub, userSub) - if text, err := tool.RenderTemplateToString(notifyType, templateData); err == nil { - l.sendUserNotifyWithTelegram(telegramId, text) - } - } - - // Send admin notification - adminData := l.buildAdminNotificationData(orderInfo, sub) - if text, err := tool.RenderTemplateToString(telegram.AdminOrderNotify, adminData); err == nil { - l.sendAdminNotifyWithTelegram(ctx, text) - } -} - -// sendRechargeNotifications sends specific notifications for balance recharge orders -func (l *ActivateOrderLogic) sendRechargeNotifications(ctx context.Context, orderInfo *order.Order, userInfo *user.User) { - // Send user notification - if telegramId, ok := findTelegram(userInfo); ok { - templateData := map[string]string{ - "OrderAmount": fmt.Sprintf("%.2f", float64(orderInfo.Price)/100), - "PaymentMethod": orderInfo.Method, - "Time": orderInfo.CreatedAt.Format("2006-01-02 15:04:05"), - "Balance": fmt.Sprintf("%.2f", float64(userInfo.Balance)/100), - } - if text, err := tool.RenderTemplateToString(telegram.RechargeNotify, templateData); err == nil { - l.sendUserNotifyWithTelegram(telegramId, text) - } - } - - // Send admin notification - adminData := map[string]string{ - "OrderNo": orderInfo.OrderNo, - "TradeNo": orderInfo.TradeNo, - "OrderAmount": fmt.Sprintf("%.2f", float64(orderInfo.Price)/100), - "SubscribeName": "余额充值", - "OrderStatus": "已支付", - "OrderTime": orderInfo.CreatedAt.Format("2006-01-02 15:04:05"), - "PaymentMethod": orderInfo.Method, - } - if text, err := tool.RenderTemplateToString(telegram.AdminOrderNotify, adminData); err == nil { - l.sendAdminNotifyWithTelegram(ctx, text) - } -} - -// buildUserNotificationData creates template data for user notifications -func (l *ActivateOrderLogic) buildUserNotificationData(orderInfo *order.Order, sub *subscribe.Subscribe, userSub *user.Subscribe) map[string]string { - data := map[string]string{ - "OrderNo": orderInfo.OrderNo, - "SubscribeName": sub.Name, - "OrderAmount": fmt.Sprintf("%.2f", float64(orderInfo.Price)/100), - } - - if userSub != nil { - data["ExpireTime"] = userSub.ExpireTime.Format("2006-01-02 15:04:05") - data["ResetTime"] = time.Now().Format("2006-01-02 15:04:05") - } - - return data -} - -// buildAdminNotificationData creates template data for admin notifications -func (l *ActivateOrderLogic) buildAdminNotificationData(orderInfo *order.Order, sub *subscribe.Subscribe) map[string]string { - subscribeName := sub.Name - if orderInfo.Type == OrderTypeResetTraffic { - subscribeName = "流量重置" - } - - return map[string]string{ - "OrderNo": orderInfo.OrderNo, - "TradeNo": orderInfo.TradeNo, - "SubscribeName": subscribeName, - "OrderAmount": fmt.Sprintf("%.2f", float64(orderInfo.Price)/100), - "OrderStatus": "已支付", - "OrderTime": orderInfo.CreatedAt.Format("2006-01-02 15:04:05"), - "PaymentMethod": orderInfo.Method, - } -} - -// sendUserNotifyWithTelegram sends a notification message to a user via Telegram -func (l *ActivateOrderLogic) sendUserNotifyWithTelegram(chatId int64, text string) { - if l.svc.TelegramBot == nil { - return - } - msg := tgbotapi.NewMessage(chatId, text) - msg.ParseMode = "markdown" - if _, err := l.svc.TelegramBot.Send(msg); err != nil { - logger.Error("Send telegram user message failed", logger.Field("error", err.Error())) - } -} - -// sendAdminNotifyWithTelegram sends a notification message to all admin users via Telegram -func (l *ActivateOrderLogic) sendAdminNotifyWithTelegram(ctx context.Context, text string) { - if l.svc.TelegramBot == nil { - return - } - admins, err := l.svc.UserModel.QueryAdminUsers(ctx) - if err != nil { - logger.WithContext(ctx).Error("Query admin users failed", logger.Field("error", err.Error())) - return - } - - for _, admin := range admins { - if telegramId, ok := findTelegram(admin); ok { - msg := tgbotapi.NewMessage(telegramId, text) - msg.ParseMode = "markdown" - if _, err := l.svc.TelegramBot.Send(msg); err != nil { - logger.WithContext(ctx).Error("Send telegram admin message failed", logger.Field("error", err.Error())) - } - } - } -} - -// findTelegram extracts Telegram chat ID from user authentication methods. -// Returns the chat ID and a boolean indicating if Telegram auth was found. -func findTelegram(u *user.User) (int64, bool) { - for _, item := range u.AuthMethods { - if item.AuthType == "telegram" { - if telegramId, err := strconv.ParseInt(item.AuthIdentifier, 10, 64); err == nil { - return telegramId, true - } - } - } - return 0, false -} - // RedemptionActivate handles redemption code activation including subscription creation, // redemption record creation, used count update, cache clearing, and notifications func (l *ActivateOrderLogic) RedemptionActivate(ctx context.Context, orderInfo *order.Order) error { @@ -1242,6 +1150,7 @@ func (l *ActivateOrderLogic) RedemptionActivate(ctx context.Context, orderInfo * Traffic: us.Traffic, Download: us.Download, Upload: us.Upload, + NodeGroupId: us.NodeGroupId, } break } @@ -1328,6 +1237,7 @@ func (l *ActivateOrderLogic) RedemptionActivate(ctx context.Context, orderInfo * Token: uuidx.SubscribeToken(orderInfo.OrderNo), UUID: uuid.New().String(), Status: 1, + NodeGroupId: sub.NodeGroupId, // Inherit node_group_id from subscription plan } err = l.svc.UserModel.InsertSubscribe(ctx, newSubscribe, tx) @@ -1374,6 +1284,9 @@ func (l *ActivateOrderLogic) RedemptionActivate(ctx context.Context, orderInfo * return err } + // Trigger user group recalculation (runs in background) + l.triggerUserGroupRecalculation(ctx, userInfo.Id) + // 7. 清理缓存(关键步骤:让节点获取最新订阅) l.clearServerCache(ctx, sub) @@ -1406,12 +1319,22 @@ func (l *ActivateOrderLogic) RedemptionActivate(ctx context.Context, orderInfo * } // isNewUserOnlyForQuantity checks whether the matched discount tier has new_user_only enabled. +// Returns true only when all matching tiers for the given quantity require new-user status +// (i.e. no fallback tier with new_user_only=false exists). When both a new-user-only tier +// and a general tier exist for the same quantity, non-new-users can still purchase via the +// general tier, so this returns false. func isNewUserOnlyForQuantity(discounts []internaltypes.SubscribeDiscount, inputQuantity int64) bool { + hasNewUserOnly := false + hasFallback := false for _, d := range discounts { - if inputQuantity == d.Quantity { - return d.NewUserOnly + if d.Quantity != inputQuantity { + continue + } + if d.NewUserOnly { + hasNewUserOnly = true + } else { + hasFallback = true } } - - return false + return hasNewUserOnly && !hasFallback } diff --git a/queue/logic/order/newUserEligibility.go b/queue/logic/order/newUserEligibility.go new file mode 100644 index 0000000..066ef3c --- /dev/null +++ b/queue/logic/order/newUserEligibility.go @@ -0,0 +1,58 @@ +package orderLogic + +import ( + "context" + "encoding/json" + "fmt" + "time" + + commonLogic "github.com/perfect-panel/server/internal/logic/common" + "github.com/perfect-panel/server/internal/model/order" + "github.com/perfect-panel/server/internal/model/subscribe" + internaltypes "github.com/perfect-panel/server/internal/types" + "gorm.io/gorm" +) + +func validateNewUserOnlyEligibilityAtActivation( + ctx context.Context, + db *gorm.DB, + orderInfo *order.Order, + sub *subscribe.Subscribe, +) error { + if orderInfo == nil || sub == nil || orderInfo.Type != OrderTypeSubscribe || sub.Discount == "" { + return nil + } + + var discounts []internaltypes.SubscribeDiscount + if err := json.Unmarshal([]byte(sub.Discount), &discounts); err != nil { + return nil + } + if !isNewUserOnlyForQuantity(discounts, orderInfo.Quantity) { + return nil + } + + eligibility, err := commonLogic.ResolveNewUserEligibility(ctx, db, orderInfo.UserId) + if err != nil { + return err + } + if !eligibility.IsNewUserAt(time.Now()) { + return fmt.Errorf("new user only: user %d is not a new user", orderInfo.UserId) + } + + historyCount, err := commonLogic.CountScopedSubscribePurchaseOrders( + ctx, + db, + eligibility.ScopeUserIDs, + orderInfo.SubscribeId, + []int64{OrderStatusFinished}, + orderInfo.OrderNo, + ) + if err != nil { + return fmt.Errorf("new user only: check history error: %w", err) + } + if historyCount >= 1 { + return fmt.Errorf("new user only: user %d already activated subscribe %d", orderInfo.UserId, orderInfo.SubscribeId) + } + + return nil +} diff --git a/queue/logic/subscription/checkSubscriptionLogic.go b/queue/logic/subscription/checkSubscriptionLogic.go index 81b86e7..02fbf01 100644 --- a/queue/logic/subscription/checkSubscriptionLogic.go +++ b/queue/logic/subscription/checkSubscriptionLogic.go @@ -62,7 +62,6 @@ func (l *CheckSubscriptionLogic) ProcessTask(ctx context.Context, _ *asynq.Task) } l.clearServerCache(ctx, list...) logger.Infow("[Check Subscription Traffic] Update subscribe status", logger.Field("user_ids", ids), logger.Field("count", int64(len(ids)))) - } else { logger.Info("[Check Subscription Traffic] No subscribe need to update") } @@ -108,6 +107,7 @@ func (l *CheckSubscriptionLogic) ProcessTask(ctx context.Context, _ *asynq.Task) } else { logger.Info("[Check Subscription Expire] No subscribe need to update") } + return nil }) if err != nil { diff --git a/queue/logic/traffic/trafficStatisticsLogic.go b/queue/logic/traffic/trafficStatisticsLogic.go index 37614cb..9c5e9b1 100644 --- a/queue/logic/traffic/trafficStatisticsLogic.go +++ b/queue/logic/traffic/trafficStatisticsLogic.go @@ -3,11 +3,13 @@ package traffic import ( "context" "encoding/json" + "fmt" "strings" "time" "github.com/perfect-panel/server/internal/model/node" "github.com/perfect-panel/server/pkg/logger" + "github.com/perfect-panel/server/pkg/speedlimit" "github.com/hibiken/asynq" "github.com/perfect-panel/server/internal/model/traffic" @@ -98,11 +100,13 @@ func (l *TrafficStatisticsLogic) ProcessTask(ctx context.Context, task *asynq.Ta // update user subscribe with log d := int64(float32(log.Download) * ratio * realTimeMultiplier) u := int64(float32(log.Upload) * ratio * realTimeMultiplier) - if err := l.svc.UserModel.UpdateUserSubscribeWithTraffic(ctx, sub.Id, d, u); err != nil { + isExpired := now.After(sub.ExpireTime) + if err := l.svc.UserModel.UpdateUserSubscribeWithTraffic(ctx, sub.Id, d, u, isExpired); err != nil { logger.WithContext(ctx).Error("[TrafficStatistics] Update user subscribe with log failed", logger.Field("sid", log.SID), logger.Field("download", float32(log.Download)*ratio), logger.Field("upload", float32(log.Upload)*ratio), + logger.Field("is_expired", isExpired), logger.Field("error", err.Error()), ) continue @@ -124,6 +128,21 @@ func (l *TrafficStatisticsLogic) ProcessTask(ctx context.Context, task *asynq.Ta logger.Field("error", err.Error()), ) } + + // 写完流量后检查是否触发按量限速,若触发则清除节点缓存使限速立即生效 + if planSub, planErr := l.svc.SubscribeModel.FindOne(ctx, sub.SubscribeId); planErr == nil && + (planSub.SpeedLimit > 0 || planSub.TrafficLimit != "") { + throttle := speedlimit.Calculate(ctx, l.svc.DB, sub.UserId, sub.Id, planSub.SpeedLimit, planSub.TrafficLimit) + if throttle.IsThrottled { + cacheKey := fmt.Sprintf("%s%d", node.ServerUserListCacheKey, payload.ServerId) + if delErr := l.svc.Redis.Del(ctx, cacheKey).Err(); delErr != nil { + logger.WithContext(ctx).Error("[TrafficStatistics] Clear server user cache failed", + logger.Field("serverId", payload.ServerId), + logger.Field("error", delErr.Error()), + ) + } + } + } } return nil } diff --git a/scripts/gen_test_excel.py b/scripts/gen_test_excel.py new file mode 100644 index 0000000..0ebbf47 --- /dev/null +++ b/scripts/gen_test_excel.py @@ -0,0 +1,274 @@ +"""Generate ppanel-server test case Excel file.""" +import os +from openpyxl import Workbook +from openpyxl.styles import ( + Font, PatternFill, Alignment, Border, Side +) +from openpyxl.utils import get_column_letter + +OUTPUT_PATH = os.path.join(os.path.dirname(__file__), "..", "tests", "ppanel_test_cases.xlsx") + +# ── Color palette ────────────────────────────────────────────────────────────── +C_HEADER_BG = "1F4E79" # dark blue – header row +C_HEADER_FONT = "FFFFFF" # white +C_SHEET_TITLE = "2E75B6" # mid blue – sheet title row +C_P0_BG = "FFE2E2" # light red – P0 +C_P1_BG = "FFF2CC" # light yellow – P1 +C_P2_BG = "E2EFDA" # light green – P2 +C_BORDER = "BFBFBF" + +def thin_border(): + s = Side(style="thin", color=C_BORDER) + return Border(left=s, right=s, top=s, bottom=s) + +def header_fill(hex_color): + return PatternFill("solid", fgColor=hex_color) + +def row_fill(hex_color): + return PatternFill("solid", fgColor=hex_color) + +# ── Column definitions ───────────────────────────────────────────────────────── +COLUMNS = ["用例ID", "模块", "功能点", "前置条件", "测试步骤", "预期结果", + "实际结果", "测试状态", "优先级", "备注"] +COL_WIDTHS = [16, 16, 28, 32, 40, 40, 20, 12, 8, 20] + +# ── Test data ────────────────────────────────────────────────────────────────── +SHEET1_ORDER = { + "name": "订单核心流程", + "rows": [ + # id, 模块, 功能点, 前置, 步骤, 预期, 优先 + ("TC-ORDER-001","订单/预创建","正常预览订单价格","用户已登录,套餐存在且在售","传入有效 subscribe_id, quantity=1","返回 price/amount/discount 字段正确","P0"), + ("TC-ORDER-002","订单/预创建","数量为0时自动修正为1","用户已登录","quantity=0","自动设为1,正常返回价格","P1"), + ("TC-ORDER-003","订单/预创建","套餐购买数量限制(Quota)","用户已达到该套餐购买上限","再次预创建同套餐订单","返回 SubscribeQuotaLimit 错误","P0"), + ("TC-ORDER-004","订单/预创建","新用户专属折扣(24h内注册)","用户注册在24h内,套餐有 new_user_only 折扣","预创建订单","折扣生效,amount < price","P0"), + ("TC-ORDER-005","订单/预创建","老用户不享受新用户折扣","用户注册超过24h","预创建有 new_user_only 折扣的套餐","返回 SubscribeNewUserOnly 错误","P0"), + ("TC-ORDER-006","订单/预创建","新用户已购过不重复享受新用户折扣","用户24h内注册,但已购买过该套餐","预创建同套餐","折扣不生效,按原价计算","P0"), + ("TC-ORDER-007","订单/预创建","优惠券不存在","用户已登录","传入不存在的 coupon code","返回 CouponNotExist 错误","P1"), + ("TC-ORDER-008","订单/预创建","优惠券已用完(count限制)","优惠券 used_count >= count","传入该优惠券","返回 CouponAlreadyUsed 错误","P1"), + ("TC-ORDER-009","订单/预创建","优惠券个人使用次数超限","用户已使用该优惠券达 user_limit 次","再次使用","返回 CouponInsufficientUsage 错误","P1"), + ("TC-ORDER-010","订单/预创建","优惠券不适用于该套餐","优惠券绑定了特定套餐,与当前套餐不符","传入该优惠券","返回 CouponNotApplicable 错误","P1"), + ("TC-ORDER-011","订单/预创建","支付手续费计算","支付方式有手续费配置","传入 payment_id","feeAmount 正确,amount = 原金额 + 手续费","P1"), + ("TC-ORDER-012","订单/预创建","礼品金额抵扣","用户 gift_amount > 0","预创建订单","deduction_amount 正确,amount 减去礼品金额","P1"), + ("TC-ORDER-013","订单/预创建","礼品金额全额抵扣(amount归零)","用户 gift_amount >= 订单金额","预创建订单","amount=0,deduction_amount = 原订单金额","P1"), + ("TC-ORDER-014","订单/购买","正常购买订阅","用户已登录,套餐在售有库存","发起购买请求","订单创建成功,返回 order_no","P0"), + ("TC-ORDER-015","订单/购买","套餐库存为0不允许购买","套餐 inventory=0","发起购买","返回 SubscribeOutOfStock 错误","P0"), + ("TC-ORDER-016","订单/购买","单订阅模式:已有 pending 订单自动关闭","SingleModel=true,用户已有 pending 订单","对同套餐再次购买","旧 pending 订单关闭,新订单创建成功","P0"), + ("TC-ORDER-017","订单/购买","单订阅模式:自动路由为续费","SingleModel=true,用户已有有效订阅","购买相同套餐","订单类型=2(续费),parent_id 指向原订单","P0"), + ("TC-ORDER-018","订单/购买","数量超过 MaxQuantity 限制","—","quantity > MaxQuantity","返回 InvalidParams 错误","P1"), + ("TC-ORDER-019","订单/购买","金额超过 MaxOrderAmount","套餐单价极高","购买","返回 InvalidParams 错误","P1"), + ("TC-ORDER-020","订单/购买","15分钟后自动关闭未支付订单","订单已创建,未支付","等待15分钟后触发队列","订单状态变为 Close(3)","P0"), + ("TC-ORDER-021","订单/激活","订单激活(NewPurchase)","订单状态=已支付(2),类型=1","触发激活队列","用户订阅创建,订单状态变为 Finished(5)","P0"), + ("TC-ORDER-022","订单/激活","订单激活(Renewal)","订单状态=已支付,类型=2","触发激活","订阅到期时间延长","P0"), + ("TC-ORDER-023","订单/激活","订单激活(ResetTraffic)","订单状态=已支付,类型=3","触发激活","用户流量重置","P0"), + ("TC-ORDER-024","订单/激活","订单激活(Recharge)","订单状态=已支付,类型=4","触发激活","用户余额增加","P0"), + ("TC-ORDER-025","订单/激活","订单激活(Redemption)","订单状态=已支付,类型=5","触发激活","兑换码激活成功","P0"), + ("TC-ORDER-026","订单/激活","幂等性:已完成订单不重复处理","订单状态=Finished(5)","再次触发激活","直接跳过,不重复执行","P0"), + ("TC-ORDER-027","订单/激活","非已支付状态订单不处理","订单状态=Pending(1) 或 Close(3)","触发激活","跳过,返回 ErrInvalidOrderStatus","P0"), + ] +} + +SHEET2_USER = { + "name": "用户模块", + "rows": [ + ("TC-USER-001","用户/注册","邮箱注册","邮箱未注册","提交有效邮箱+密码","用户创建成功,返回 token","P0"), + ("TC-USER-002","用户/登录","邮箱密码登录","用户已注册","提交正确邮箱+密码","返回 JWT token,session 写入 Redis","P0"), + ("TC-USER-003","用户/设备登录","AES-CBC 加密设备登录","配置 security_secret","Body 使用正确密钥加密","登录成功","P0"), + ("TC-USER-004","用户/设备登录","错误密钥设备登录","—","Body 使用错误密钥加密","返回认证失败错误","P0"), + ("TC-USER-005","用户/退出登录","解绑设备(退出家庭组)","用户在家庭组中","调用 unbind_device","用户从家庭组移除,device 记录不删除、不禁用","P0"), + ("TC-USER-006","用户/注销账号","正常注销","用户已登录","调用 delete_account","账号软删除,auth_methods 软删除,Redis 缓存清理","P0"), + ("TC-USER-007","用户/注销账号","家主注销 → 解散家庭","用户是家庭组家主","注销账号","家庭所有成员 status=removed,family status=disabled","P0"), + ("TC-USER-008","用户/注销账号","成员注销 → 仅退出家庭","用户是家庭组成员","注销账号","仅该成员退出,家庭组继续存在","P0"), + ("TC-USER-009","用户/注销","缓存清理(email key 残留问题)","用户已注销,email 缓存可能残留","注销后检查 Redis","cache:user:email:{email} 已删除","P0"), + ("TC-USER-010","用户/邀请","绑定邀请码","用户未绑定过邀请码","提交有效邀请码","referer_id 写入,邀请关系建立","P1"), + ("TC-USER-011","用户/邀请","重复绑定邀请码","用户已绑定邀请码","再次绑定","返回错误,不允许重复绑定","P1"), + ("TC-USER-012","用户/佣金","首购返佣","用户通过邀请码注册,完成首次付款","订单激活","邀请人佣金增加","P1"), + ("TC-USER-013","用户/佣金","only_first_purchase=true 仅首购返佣","配置仅首购","被邀请人第二次购买","不再发佣金","P1"), + ("TC-USER-014","用户/佣金","赠送天数(双方)","邀请关系建立,被邀请人购买","订单激活","邀请人和被邀请人各获得赠送天数","P1"), + ("TC-USER-015","用户/家庭组","踢出家庭成员","用户是家庭组家主","踢出某成员","该成员退出家庭组,设备记录不变","P1"), + ("TC-USER-016","用户/订阅","查看订阅状态(含节点分组名和限速时间)","用户有有效订阅","查询订阅状态","返回节点分组名、限速起止时间","P1"), + ] +} + +SHEET3_SUB = { + "name": "订阅套餐", + "rows": [ + ("TC-SUB-001","套餐/列表","获取可用套餐列表","—","调用套餐列表接口","返回所有在售套餐","P1"), + ("TC-SUB-002","套餐/列表","老版本客户端裁剪套餐列表","请求头含 X-App-Id(老版本标识)","调用套餐列表","每个套餐的 discount 列表去掉最后一项","P1"), + ("TC-SUB-003","套餐/购买限制","Quota 限制(每用户购买上限)","套餐设置 quota=1","用户购买2次同套餐","第二次返回 SubscribeQuotaLimit","P0"), + ("TC-SUB-004","套餐/折扣","数量折扣梯度","套餐配置多级数量折扣","购买不同数量","对应折扣率正确应用","P1"), + ("TC-SUB-005","套餐/库存","库存充足时正常购买","inventory > 0","购买","成功,inventory -1","P1"), + ("TC-SUB-006","套餐/库存","库存=-1(无限库存)","inventory=-1","多次购买","不减少库存,始终可购","P1"), + ] +} + +SHEET4_PAY = { + "name": "支付与优惠券", + "rows": [ + ("TC-PAY-001","支付/方式","获取可用支付方式列表","—","调用支付方式接口","返回当前配置的支付方式","P1"), + ("TC-PAY-002","支付/手续费","固定手续费计算","支付方式配置固定手续费","下单","feeAmount = 配置值","P1"), + ("TC-PAY-003","支付/手续费","百分比手续费计算","支付方式配置百分比手续费","下单","feeAmount = amount × 百分比","P1"), + ("TC-PAY-004","支付/手续费","amount=0 时不计算手续费","礼品金额全额抵扣后 amount=0","下单","feeAmount=0","P1"), + ("TC-CPN-001","优惠券/固定减免","固定金额优惠券","优惠券类型=固定,value=100","使用优惠券","订单减免100","P1"), + ("TC-CPN-002","优惠券/百分比","百分比优惠券","优惠券类型=百分比,value=0.8","使用优惠券","订单金额×0.8","P1"), + ("TC-CPN-003","优惠券/过期","过期优惠券不可用","优惠券 expire_at < now","使用","返回错误(CouponExpired)","P1"), + ("TC-CPN-004","优惠券/套餐绑定","仅限指定套餐使用","优惠券绑定套餐A","用于套餐B","返回 CouponNotApplicable","P1"), + ] +} + +SHEET5_IAP = { + "name": "IAP苹果内购", + "rows": [ + ("TC-IAP-001","IAP/绑定","绑定苹果内购 transaction","苹果 transaction 有效","提交 transaction_id","订单创建并激活,订阅开通","P1"), + ("TC-IAP-002","IAP/绑定","重复绑定同一 transaction","transaction 已绑定","再次提交","幂等处理,不重复创建订单","P1"), + ("TC-IAP-003","IAP/单订阅模式","内购续费路由","SingleModel=true,用户已有订阅","提交续费 transaction","路由为续费类型订单","P1"), + ("TC-IAP-004","IAP/对账","日对账任务","配置了 IAP 对账","触发日对账","检查并补处理漏掉的 transaction","P2"), + ] +} + +SHEET6_LOG = { + "name": "日志与缓存", + "rows": [ + ("TC-LOG-001","日志/佣金","佣金记录写入 system_logs","发生佣金发放","触发订单激活","type=33 的记录写入,content.type 为 331 或 332","P2"), + ("TC-LOG-002","日志/礼品金额","礼品金额扣除记录","用户有 gift_amount,下单扣除","购买","GiftTypeReduce 记录写入 system_logs","P2"), + ("TC-CACHE-001","缓存/用户","注销后 user email 缓存清理","用户已注销","检查 Redis","cache:user:email:{email} 已删除","P0"), + ("TC-CACHE-002","缓存/订阅","订阅 token 缓存有效","用户有订阅","查询订阅","从 cache:user:subscribe:token:{token} 命中","P2"), + ("TC-CACHE-003","缓存/签名","X-App-Id 签名验证","AppSecrets 已配置","发送带签名请求","验签通过,正常处理","P1"), + ("TC-CACHE-004","缓存/签名","无 X-App-Id 跳过签名","—","发送无签名请求","直接通过,不验签","P1"), + ] +} + +SHEET7_QUEUE = { + "name": "队列任务", + "rows": [ + ("TC-QUEUE-001","队列/订单关闭","超时自动关闭订单","未支付订单存在","等待15分钟","订单状态=Close","P0"), + ("TC-QUEUE-002","队列/订阅检查","定期检查订阅到期","用户订阅即将到期","触发 checkSubscription","到期通知发送","P2"), + ("TC-QUEUE-003","队列/流量统计","服务器流量统计写入","有流量数据上报","触发 trafficStat","流量数据正确写入 DB","P2"), + ("TC-QUEUE-004","队列/邮件","批量发送邮件任务","已创建批量邮件任务","触发队列","邮件发送成功,任务状态更新","P2"), + ("TC-QUEUE-005","队列/流量重置","定期重置用户流量","配置了流量重置周期","触发 resetTraffic","用户流量归零","P2"), + ] +} + +ALL_SHEETS = [SHEET1_ORDER, SHEET2_USER, SHEET3_SUB, SHEET4_PAY, SHEET5_IAP, SHEET6_LOG, SHEET7_QUEUE] + +PRIORITY_FILL = { + "P0": row_fill(C_P0_BG), + "P1": row_fill(C_P1_BG), + "P2": row_fill(C_P2_BG), +} + + +def write_sheet(wb: Workbook, sheet_def: dict): + ws = wb.create_sheet(title=sheet_def["name"]) + rows = sheet_def["rows"] + + # ── Title row ────────────────────────────────────────────────────────────── + ws.merge_cells(start_row=1, start_column=1, end_row=1, end_column=len(COLUMNS)) + title_cell = ws.cell(row=1, column=1, value=f"ppanel-server 测试用例 — {sheet_def['name']}") + title_cell.font = Font(name="微软雅黑", bold=True, size=13, color=C_HEADER_FONT) + title_cell.fill = header_fill(C_SHEET_TITLE) + title_cell.alignment = Alignment(horizontal="center", vertical="center") + ws.row_dimensions[1].height = 28 + + # ── Header row ───────────────────────────────────────────────────────────── + for col_idx, col_name in enumerate(COLUMNS, start=1): + cell = ws.cell(row=2, column=col_idx, value=col_name) + cell.font = Font(name="微软雅黑", bold=True, size=10, color=C_HEADER_FONT) + cell.fill = header_fill(C_HEADER_BG) + cell.alignment = Alignment(horizontal="center", vertical="center", wrap_text=True) + cell.border = thin_border() + ws.row_dimensions[2].height = 22 + + # ── Data rows ────────────────────────────────────────────────────────────── + for r_idx, row in enumerate(rows, start=3): + tc_id, module, feature, precond, steps, expected, priority = row + values = [tc_id, module, feature, precond, steps, expected, "", "", priority, ""] + fill = PRIORITY_FILL.get(priority, None) + for c_idx, val in enumerate(values, start=1): + cell = ws.cell(row=r_idx, column=c_idx, value=val) + cell.font = Font(name="微软雅黑", size=9) + cell.alignment = Alignment(horizontal="left", vertical="center", wrap_text=True) + cell.border = thin_border() + if fill: + cell.fill = fill + ws.row_dimensions[r_idx].height = 45 + + # ── Column widths ────────────────────────────────────────────────────────── + for col_idx, width in enumerate(COL_WIDTHS, start=1): + ws.column_dimensions[get_column_letter(col_idx)].width = width + + # ── Freeze panes ────────────────────────────────────────────────────────── + ws.freeze_panes = "A3" + + # ── Auto filter ─────────────────────────────────────────────────────────── + ws.auto_filter.ref = f"A2:{get_column_letter(len(COLUMNS))}2" + + +def write_legend_sheet(wb: Workbook): + ws = wb.create_sheet(title="说明", index=0) + ws.column_dimensions["A"].width = 18 + ws.column_dimensions["B"].width = 50 + + title = ws.cell(row=1, column=1, value="ppanel-server 测试用例说明") + ws.merge_cells("A1:B1") + title.font = Font(name="微软雅黑", bold=True, size=13, color=C_HEADER_FONT) + title.fill = header_fill(C_SHEET_TITLE) + title.alignment = Alignment(horizontal="center", vertical="center") + ws.row_dimensions[1].height = 28 + + legend_data = [ + ("项目", "说明"), + ("测试框架", "ppanel-server — go-zero + Gin"), + ("数据库", "本地 MySQL(真实),禁止 SQLite"), + ("Redis", "本地 Redis 或 miniredis"), + ("时间戳规范", "后端统一返回秒级 Unix(),前端 ×1000"), + ("", ""), + ("优先级", "含义"), + ("P0(红色)", "核心业务,必须通过。订单/认证/缓存清理等"), + ("P1(黄色)", "重要功能,强烈建议测试。折扣/优惠券/邀请等"), + ("P2(绿色)", "辅助功能,建议测试。日志/队列/IAP 等"), + ("", ""), + ("测试状态", "填写规范"), + ("Pass", "用例通过"), + ("Fail", "用例失败,需记录实际结果"), + ("Block", "用例被阻塞(依赖功能未就绪)"), + ("Skip", "本轮跳过"), + ("", ""), + ("Sheet 说明", ""), + ("Sheet1 订单核心流程", "27 条:预创建/购买/激活全流程"), + ("Sheet2 用户模块", "16 条:注册/登录/注销/邀请/家庭组"), + ("Sheet3 订阅套餐", "6 条:库存/折扣/限额"), + ("Sheet4 支付与优惠券", "8 条:手续费/优惠券各类型"), + ("Sheet5 IAP苹果内购", "4 条:内购/对账"), + ("Sheet6 日志与缓存", "6 条:日志写入/缓存清理"), + ("Sheet7 队列任务", "5 条:队列任务验证"), + ] + + for r_idx, (key, val) in enumerate(legend_data, start=2): + c1 = ws.cell(row=r_idx, column=1, value=key) + c2 = ws.cell(row=r_idx, column=2, value=val) + for c in (c1, c2): + c.font = Font(name="微软雅黑", size=9) + c.alignment = Alignment(vertical="center", wrap_text=True) + c.border = thin_border() + if key in ("项目", "优先级", "测试状态", "Sheet 说明"): + for c in (c1, c2): + c.font = Font(name="微软雅黑", bold=True, size=9, color=C_HEADER_FONT) + c.fill = header_fill(C_HEADER_BG) + ws.row_dimensions[r_idx].height = 18 + + +def main(): + os.makedirs(os.path.dirname(os.path.abspath(OUTPUT_PATH)), exist_ok=True) + wb = Workbook() + wb.remove(wb.active) # remove default sheet + + write_legend_sheet(wb) + for sheet_def in ALL_SHEETS: + write_sheet(wb, sheet_def) + + wb.save(OUTPUT_PATH) + print(f"Excel saved: {os.path.abspath(OUTPUT_PATH)}") + + +if __name__ == "__main__": + main() diff --git a/scripts/migrate_paid_users.go b/scripts/migrate_paid_users.go index 4718b20..a7f72d3 100644 --- a/scripts/migrate_paid_users.go +++ b/scripts/migrate_paid_users.go @@ -276,13 +276,13 @@ func main() { SELECT user_id AS uid FROM ` + "`order`" + ` WHERE status = ? AND user_id > 0 UNION SELECT user_id AS uid FROM apple_iap_transactions WHERE user_id > 0 + UNION + SELECT user_id AS uid FROM user_subscribe WHERE user_id > 0 AND (expire_time IS NULL OR expire_time > NOW()) ) t - INNER JOIN user_subscribe s ON s.user_id = t.uid INNER JOIN user u ON u.id = t.uid - WHERE (s.expire_time IS NULL OR s.expire_time > NOW()) - AND u.id NOT IN ( + WHERE u.id NOT IN ( SELECT user_id FROM user_auth_methods WHERE auth_type = 'email' AND auth_identifier = 'devneeds52@gmail.com' - ) + ) ORDER BY t.uid `, orderStatusCompleted).Scan(&paidIDs).Error if err != nil { diff --git a/scripts/test_device_login.go b/scripts/test_device_login.go new file mode 100644 index 0000000..4f77aa6 --- /dev/null +++ b/scripts/test_device_login.go @@ -0,0 +1,295 @@ +package main + +// 设备登录测试脚本 +// 用法: go run scripts/test_device_login.go +// 功能: 模拟客户端设备登录,自动加密请求体,解密响应,打印 token + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/forgoer/openssl" +) + +// ==================== 配置区域 ==================== + +const ( + serverURL = "https://tapi.hifast.biz" // 服务地址 + securitySecret = "c0qhq99a-nq8h-ropg-wrlc-ezj4dlkxqpzx" // device.security_secret + identifier = "test-device-script-001" // 设备唯一标识 + userAgent = "TestScript/1.0" // UserAgent + + appId = "android-client" // AppSignature.AppSecrets 中的 key + appSecret = "uB4G,XxL2{7b" // AppSignature.AppSecrets 中的 value +) + +// ==================== AES 工具(与服务端 pkg/aes/aes.go 一致)==================== + +func generateKey(key string) []byte { + hash := sha256.Sum256([]byte(key)) + return hash[:32] +} + +func generateIv(iv, key string) []byte { + h := md5.New() + h.Write([]byte(iv)) + return generateKey(hex.EncodeToString(h.Sum(nil)) + key) +} + +func aesEncrypt(plainText []byte, keyStr string) (data string, nonce string, err error) { + nonce = fmt.Sprintf("%x", time.Now().UnixNano()) + key := generateKey(keyStr) + iv := generateIv(nonce, keyStr) + dst, err := openssl.AesCBCEncrypt(plainText, key, iv, openssl.PKCS7_PADDING) + if err != nil { + return "", "", err + } + return base64.StdEncoding.EncodeToString(dst), nonce, nil +} + +func aesDecrypt(cipherText string, keyStr string, ivStr string) (string, error) { + decode, err := base64.StdEncoding.DecodeString(cipherText) + if err != nil { + return "", err + } + key := generateKey(keyStr) + iv := generateIv(ivStr, keyStr) + dst, err := openssl.AesCBCDecrypt(decode, key, iv, openssl.PKCS7_PADDING) + return string(dst), err +} + +// ==================== 签名工具(与服务端 pkg/signature 一致)==================== + +func buildStringToSign(method, path, rawQuery string, body []byte, xAppId, timestamp, nonce string) string { + canonical := canonicalQuery(rawQuery) + bodyHash := sha256Hex(body) + parts := []string{ + strings.ToUpper(method), + path, + canonical, + bodyHash, + xAppId, + timestamp, + nonce, + } + return strings.Join(parts, "\n") +} + +func canonicalQuery(rawQuery string) string { + if rawQuery == "" { + return "" + } + pairs := strings.Split(rawQuery, "&") + sort.Strings(pairs) + return strings.Join(pairs, "&") +} + +func sha256Hex(data []byte) string { + h := sha256.Sum256(data) + return fmt.Sprintf("%x", h) +} + +func buildSignature(stringToSign, secret string) string { + mac := hmac.New(sha256.New, []byte(secret)) + mac.Write([]byte(stringToSign)) + return hex.EncodeToString(mac.Sum(nil)) +} + +func signedRequest(method, url, rawQuery string, body []byte, token string) (*http.Request, error) { + var bodyReader io.Reader + if body != nil { + bodyReader = bytes.NewReader(body) + } + req, err := http.NewRequest(method, url, bodyReader) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + if token != "" { + req.Header.Set("Authorization", token) // 不带 Bearer 前缀,服务端直接 Parse token + } + + timestamp := strconv.FormatInt(time.Now().Unix(), 10) + nonce := fmt.Sprintf("%x", time.Now().UnixNano()) + + // 提取 path + path := req.URL.Path + + sts := buildStringToSign(method, path, rawQuery, body, appId, timestamp, nonce) + sig := buildSignature(sts, appSecret) + + req.Header.Set("X-App-Id", appId) + req.Header.Set("X-Timestamp", timestamp) + req.Header.Set("X-Nonce", nonce) + req.Header.Set("X-Signature", sig) + return req, nil +} + +// ==================== 主逻辑 ==================== + +func main() { + fmt.Println("=== 设备登录测试 ===") + fmt.Printf("Server: %s\n", serverURL) + fmt.Printf("Identifier: %s\n", identifier) + fmt.Println() + + // 1. 构造原始请求体 + payload := map[string]string{ + "identifier": identifier, + "user_agent": userAgent, + } + plainBytes, err := json.Marshal(payload) + if err != nil { + fmt.Printf("[ERROR] marshal payload: %v\n", err) + return + } + fmt.Printf("原始请求体: %s\n", string(plainBytes)) + + // 2. AES 加密请求体 + encData, nonce, err := aesEncrypt(plainBytes, securitySecret) + if err != nil { + fmt.Printf("[ERROR] encrypt: %v\n", err) + return + } + + encBody := map[string]string{ + "data": encData, + "time": nonce, + } + encBytes, _ := json.Marshal(encBody) + fmt.Printf("加密请求体: %s\n\n", string(encBytes)) + + // 3. 发送请求 + req, err := signedRequest("POST", serverURL+"/v1/auth/login/device", "", encBytes, "") + if err != nil { + fmt.Printf("[ERROR] new request: %v\n", err) + return + } + req.Header.Set("Login-Type", "device") + + client := &http.Client{Timeout: 10 * time.Second} + resp, err := client.Do(req) + if err != nil { + fmt.Printf("[ERROR] request failed: %v\n", err) + return + } + defer resp.Body.Close() + + respBody, _ := io.ReadAll(resp.Body) + fmt.Printf("HTTP Status: %d\n", resp.StatusCode) + fmt.Printf("原始响应: %s\n\n", string(respBody)) + + // 4. 解密响应 + // 响应格式: {"code":200,"data":{"data":"","time":""},"message":""} + var outer struct { + Code int `json:"code"` + Message string `json:"message"` + Data json.RawMessage `json:"data"` + } + if err := json.Unmarshal(respBody, &outer); err != nil { + fmt.Printf("[ERROR] parse response: %v\n", err) + return + } + + if outer.Code != 200 { + fmt.Printf("[FAIL] 登录失败: code=%d message=%s\n", outer.Code, outer.Message) + return + } + + // data 字段是加密对象 + var encResp struct { + Data string `json:"data"` + Time string `json:"time"` + } + if err := json.Unmarshal(outer.Data, &encResp); err != nil { + // 如果 Device.Enable=false,data 直接就是明文对象 + fmt.Printf("响应 data 非加密格式,直接解析: %s\n", string(outer.Data)) + var loginResp struct { + Token string `json:"token"` + } + if err2 := json.Unmarshal(outer.Data, &loginResp); err2 == nil && loginResp.Token != "" { + fmt.Printf("[OK] Token: %s\n", loginResp.Token) + } + return + } + + decrypted, err := aesDecrypt(encResp.Data, securitySecret, encResp.Time) + if err != nil { + fmt.Printf("[ERROR] decrypt response: %v\n", err) + return + } + fmt.Printf("解密后响应: %s\n\n", decrypted) + + var loginResp struct { + Token string `json:"token"` + } + if err := json.Unmarshal([]byte(decrypted), &loginResp); err != nil { + fmt.Printf("[ERROR] parse decrypted: %v\n", err) + return + } + + fmt.Printf("[OK] Token: %s\n", loginResp.Token) + + // 5. 用 token 请求订阅列表 + fmt.Println("\n=== 请求订阅列表 ===") + subReq, err := signedRequest("GET", serverURL+"/v1/public/subscribe/list", "", nil, loginResp.Token) + if err != nil { + fmt.Printf("[ERROR] build subscribe request: %v\n", err) + return + } + subReq.Header.Set("Login-Type", "device") + + subResp, err := client.Do(subReq) + if err != nil { + fmt.Printf("[ERROR] subscribe list request: %v\n", err) + return + } + defer subResp.Body.Close() + + subBody, _ := io.ReadAll(subResp.Body) + fmt.Printf("HTTP Status: %d\n", subResp.StatusCode) + fmt.Printf("原始响应: %s\n", string(subBody)) + + // 解密订阅列表响应 + var subOuter struct { + Code int `json:"code"` + Message string `json:"message"` + Data json.RawMessage `json:"data"` + } + if err := json.Unmarshal(subBody, &subOuter); err != nil { + fmt.Printf("[ERROR] parse subscribe response: %v\n", err) + return + } + if subOuter.Code != 200 { + fmt.Printf("[FAIL] 订阅列表失败: code=%d message=%s\n", subOuter.Code, subOuter.Message) + return + } + + var subEnc struct { + Data string `json:"data"` + Time string `json:"time"` + } + if err := json.Unmarshal(subOuter.Data, &subEnc); err != nil || subEnc.Data == "" { + // 无加密,直接打印 + fmt.Printf("\n[OK] 订阅列表(明文): %s\n", string(subOuter.Data)) + return + } + subDecrypted, err := aesDecrypt(subEnc.Data, securitySecret, subEnc.Time) + if err != nil { + fmt.Printf("[ERROR] decrypt subscribe list: %v\n", err) + return + } + fmt.Printf("\n[OK] 订阅列表(解密): %s\n", subDecrypted) +} diff --git a/test_device_login b/test_device_login new file mode 100755 index 0000000..d7d1a1a Binary files /dev/null and b/test_device_login differ diff --git a/说明文档.md b/说明文档.md index a7c9b27..4b2f60b 100644 --- a/说明文档.md +++ b/说明文档.md @@ -25,7 +25,7 @@ certbot certonly --manual --preferred-challenges dns -d airoport.win -d "*.airop | docker exec -i ppanel-mysql mysql -uroot -pjpcV41ppanel -go run scripts/migrate_paid_users.go -src 'root:rootpassword@tcp(127.0.0.1:3306)/ppanel?charset=utf8mb4&parseTime=True&loc=Local' -dst 'root:jpcV41ppanel@tcp(154.12.35.103:3306)/ppanel?charset=utf8mb4&parseTime=True&loc=Local' -clean +go run scripts/migrate_paid_users.go -src 'root:rootpassword@tcp(127.0.0.1:3306)/ppanel?charset=utf8mb4&parseTime=True&loc=Local' -dst 'root:jpcV41ppanel@tcp(103.150.215.44:3306)/hifast?charset=utf8mb4&parseTime=True&loc=Local' -clean