Развертывание в production
Руководство по развертыванию платформы маркетплейсов в production среде.
Подготовка к развертыванию
Требования к инфраструктуре
- Сервер: минимум 4 CPU, 8GB RAM, 100GB SSD
- База данных: PostgreSQL 14+ с репликацией
- Кэш: Redis кластер для высокой доступности
- CDN: для статических ресурсов и изображений
- Load Balancer: для распределения нагрузки
Переменные окружения
# Production настройки
NODE_ENV=production
DATABASE_URL=postgresql://user:pass@host:5432/dbname
REDIS_URL=redis://redis-host:6379
CDN_URL=https://cdn.example.com
# Security
JWT_SECRET=your-secure-jwt-secret
ENCRYPTION_KEY=your-encryption-key
SESSION_SECRET=your-session-secret
# External services
SMTP_HOST=smtp.example.com
SMTP_USER=notifications@example.com
SMTP_PASS=smtp-password
# Payment gateways
STRIPE_SECRET_KEY=sk_live_...
PAYPAL_CLIENT_ID=live_client_id
Развертывание с Docker
Docker Compose для production
version: '3.8'
services:
app:
image: marketplace-platform:latest
ports:
- "3000:3000"
environment:
- NODE_ENV=production
- DATABASE_URL=${DATABASE_URL}
- REDIS_URL=${REDIS_URL}
depends_on:
- db
- redis
restart: unless-stopped
db:
image: postgres:14
environment:
POSTGRES_DB: marketplace
POSTGRES_USER: ${DB_USER}
POSTGRES_PASSWORD: ${DB_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
restart: unless-stopped
redis:
image: redis:7-alpine
volumes:
- redis_data:/data
restart: unless-stopped
nginx:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
- ./ssl:/etc/nginx/ssl
depends_on:
- app
restart: unless-stopped
volumes:
postgres_data:
redis_data:
Настройка Nginx
upstream app {
server app:3000;
}
server {
listen 80;
server_name example.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name example.com;
ssl_certificate /etc/nginx/ssl/cert.pem;
ssl_certificate_key /etc/nginx/ssl/key.pem;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
location / {
proxy_pass http://app;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Static files
location /static/ {
alias /var/www/static/;
expires 1y;
add_header Cache-Control "public, immutable";
}
}
CI/CD Pipeline
GitHub Actions
name: Deploy to Production
on:
push:
branches: [main]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build Docker image
run: |
docker build -t marketplace-platform:${{ github.sha }} .
docker tag marketplace-platform:${{ github.sha }} marketplace-platform:latest
- name: Push to registry
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
docker push marketplace-platform:latest
- name: Deploy to server
uses: appleboy/ssh-action@v0.1.5
with:
host: ${{ secrets.HOST }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.SSH_KEY }}
script: |
cd /opt/marketplace
docker-compose pull
docker-compose up -d
docker system prune -f
Мониторинг и логирование
Structured Logging
const winston = require('winston');
const logger = winston.createLogger({
level: 'info',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.errors({ stack: true }),
winston.format.json()
),
transports: [
new winston.transports.File({ filename: 'error.log', level: 'error' }),
new winston.transports.File({ filename: 'combined.log' }),
],
});
if (process.env.NODE_ENV !== 'production') {
logger.add(new winston.transports.Console({
format: winston.format.simple()
}));
}
Health Checks
app.get('/health', (req, res) => {
const health = {
status: 'ok',
timestamp: Date.now(),
uptime: process.uptime(),
memory: process.memoryUsage(),
database: 'connected', // Check DB connection
redis: 'connected' // Check Redis connection
};
res.status(200).json(health);
});
Backup стратегия
Автоматические бэкапы базы данных
#!/bin/bash
# backup.sh
BACKUP_DIR="/backups"
DB_NAME="marketplace"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
# Create backup
pg_dump $DATABASE_URL > $BACKUP_DIR/backup_$TIMESTAMP.sql
# Compress backup
gzip $BACKUP_DIR/backup_$TIMESTAMP.sql
# Remove old backups (keep 30 days)
find $BACKUP_DIR -name "backup_*.sql.gz" -mtime +30 -delete
# Upload to S3 (optional)
aws s3 cp $BACKUP_DIR/backup_$TIMESTAMP.sql.gz s3://your-backup-bucket/