Skip to main content

Production Checklist

Security

Prototype protection, input validation

Performance

Caching enabled, metrics monitoring

Error Handling

Graceful failures, logging

Monitoring

Metrics, alerts, dashboards

Production Configuration

import { createRuleEngine } from 'rule-engine-js';

const engine = createRuleEngine({
  // Security
  strict: true,                  // Strict type checking
  allowPrototypeAccess: false,   // Block prototype pollution
  maxDepth: 10,                  // Limit nesting
  maxOperators: 100,             // Limit complexity

  // Performance
  enableCache: true,
  maxCacheSize: 2000,            // Increase for production

  // Monitoring
  enableDebug: false             // Disable debug logs
});

Environment Variables

# .env
NODE_ENV=production

# Rule Engine
RULE_ENGINE_MAX_DEPTH=10
RULE_ENGINE_MAX_CACHE=2000
RULE_ENGINE_STRICT=true

# Monitoring
METRICS_ENABLED=true
LOG_LEVEL=info
const engine = createRuleEngine({
  strict: process.env.RULE_ENGINE_STRICT === 'true',
  maxDepth: parseInt(process.env.RULE_ENGINE_MAX_DEPTH) || 10,
  maxCacheSize: parseInt(process.env.RULE_ENGINE_MAX_CACHE) || 2000
});

Singleton Pattern

Create one engine instance, reuse everywhere.
// engine.js
import { createRuleEngine } from 'rule-engine-js';

let engineInstance = null;

export function getEngine() {
  if (!engineInstance) {
    engineInstance = createRuleEngine({
      strict: true,
      maxDepth: 10,
      enableCache: true,
      maxCacheSize: 2000
    });
  }
  return engineInstance;
}

// Usage in other files
import { getEngine } from './engine';

const engine = getEngine();
const result = engine.evaluateExpr(rule, data);

Error Handling

Wrap evaluations in try-catch.
function evaluateRule(rule, context) {
  try {
    const result = engine.evaluateExpr(rule, context);

    if (!result.success) {
      logger.warn('Rule failed', { rule, error: result.error });
      return { success: false, error: 'Rule evaluation failed' };
    }

    return result;
  } catch (error) {
    logger.error('Rule error', { error: error.message, rule });
    return { success: false, error: 'Internal error' };
  }
}

Logging

import winston from 'winston';

const logger = winston.createLogger({
  level: process.env.LOG_LEVEL || 'info',
  format: winston.format.json(),
  transports: [
    new winston.transports.File({ filename: 'error.log', level: 'error' }),
    new winston.transports.File({ filename: 'combined.log' })
  ]
});

// Log rule evaluations
function logEvaluation(ruleId, result, context) {
  logger.info('Rule evaluated', {
    ruleId,
    success: result.success,
    duration: result.duration,
    timestamp: new Date().toISOString()
  });
}

Monitoring

Metrics Endpoint

// Express example
app.get('/metrics', (req, res) => {
  const metrics = engine.getMetrics();
  const cacheStats = engine.getCacheStats();

  res.json({
    evaluations: {
      total: metrics.evaluations,
      errors: metrics.errors,
      avgTime: metrics.avgTime.toFixed(2) + 'ms'
    },
    cache: {
      hitRate: (metrics.cacheHits / metrics.evaluations * 100).toFixed(1) + '%',
      size: cacheStats.expression?.size || 0,
      maxSize: cacheStats.expression?.maxSize || 0
    }
  });
});

Health Check

app.get('/health', (req, res) => {
  const metrics = engine.getMetrics();

  const isHealthy =
    metrics.avgTime < 10 &&                    // < 10ms avg
    metrics.errors / metrics.evaluations < 0.01; // < 1% error rate

  res.status(isHealthy ? 200 : 503).json({
    status: isHealthy ? 'healthy' : 'degraded',
    metrics
  });
});

Docker Deployment

# Dockerfile
FROM node:18-alpine

WORKDIR /app

COPY package*.json ./
RUN npm ci --only=production

COPY . .

ENV NODE_ENV=production
EXPOSE 3000

CMD ["node", "server.js"]
# docker-compose.yml
version: '3.8'

services:
  app:
    build: .
    ports:
      - "3000:3000"
    environment:
      - NODE_ENV=production
      - RULE_ENGINE_MAX_DEPTH=10
      - RULE_ENGINE_MAX_CACHE=2000
    restart: unless-stopped

Load Balancing

// pm2 ecosystem.config.js
module.exports = {
  apps: [{
    name: 'rule-engine-app',
    script: 'server.js',
    instances: 'max',  // Use all CPU cores
    exec_mode: 'cluster',
    env: {
      NODE_ENV: 'production'
    }
  }]
};

Caching Strategy

Redis for Distributed Cache

import Redis from 'ioredis';

const redis = new Redis(process.env.REDIS_URL);

async function evaluateWithCache(ruleId, rule, context) {
  const cacheKey = `rule:${ruleId}:${JSON.stringify(context)}`;

  // Check Redis cache
  const cached = await redis.get(cacheKey);
  if (cached) {
    return JSON.parse(cached);
  }

  // Evaluate
  const result = engine.evaluateExpr(rule, context);

  // Cache result (5 min TTL)
  await redis.setex(cacheKey, 300, JSON.stringify(result));

  return result;
}

Rate Limiting

import rateLimit from 'express-rate-limit';

const limiter = rateLimit({
  windowMs: 60 * 1000,  // 1 minute
  max: 100,             // 100 requests per minute
  message: 'Too many requests',
  standardHeaders: true,
  legacyHeaders: false
});

app.post('/evaluate', limiter, (req, res) => {
  // Handle rule evaluation
});

Graceful Shutdown

process.on('SIGTERM', async () => {
  logger.info('SIGTERM received, shutting down gracefully');

  // Clear caches
  engine.clearCache();

  // Close server
  server.close(() => {
    logger.info('Server closed');
    process.exit(0);
  });
});

Kubernetes Deployment

# deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rule-engine
spec:
  replicas: 3
  selector:
    matchLabels:
      app: rule-engine
  template:
    metadata:
      labels:
        app: rule-engine
    spec:
      containers:
      - name: rule-engine
        image: rule-engine:latest
        ports:
        - containerPort: 3000
        env:
        - name: NODE_ENV
          value: "production"
        - name: RULE_ENGINE_MAX_CACHE
          value: "2000"
        resources:
          requests:
            memory: "256Mi"
            cpu: "250m"
          limits:
            memory: "512Mi"
            cpu: "500m"
        livenessProbe:
          httpGet:
            path: /health
            port: 3000
          initialDelaySeconds: 30
          periodSeconds: 10
        readinessProbe:
          httpGet:
            path: /health
            port: 3000
          initialDelaySeconds: 5
          periodSeconds: 5

Best Practices

One engine instance per process.
Maximize cache size in production.
Track evaluations, errors, cache hit rate.
Never expose internal errors to users.
Prevent abuse with rate limiting.
Implement /health endpoint.
Use JSON logs for easy parsing.
Use load balancers and multiple instances.

Production Checklist

  • Security config verified
  • Environment variables set
  • Singleton pattern implemented
  • Error handling in place
  • Logging configured
  • Metrics endpoint added
  • Health check endpoint added
  • Rate limiting enabled
  • Graceful shutdown implemented
  • Load tested
  • Monitoring dashboards created
  • Alerts configured