I'll suggest a comprehensive project that incorporates all these technologies. Let's build a Real-time Order Processing System for E-commerce.
User Browser β Load Balancer β API Gateway β Microservices β Kafka β Monitoring Dashboard
β β β β β
AWS ALB ECS/EKS Multiple MSK/Kinesis CloudWatch
Containers Prometheus
# Create main project structure
mkdir ecommerce-microservices
cd ecommerce-microservices
# Initialize monorepo with multiple services
mkdir -p services/{order,product,user,notification}
mkdir -p load-balancer monitoring kafka-config cicd
mkdir -p infrastructure/aws
# Initialize each service
cd services/order
npx create-react-app frontend --template typescript
cd ../..Order Service (Node.js + Express):
// services/order/server.js
const express = require('express');
const { Kafka } = require('kafkajs');
const promBundle = require('express-prom-bundle');
const app = express();
const metricsMiddleware = promBundle({ includeMethod: true });
app.use(metricsMiddleware);
const kafka = new Kafka({
clientId: 'order-service',
brokers: [process.env.KAFKA_BROKERS || 'localhost:9092']
});
const producer = kafka.producer();
app.post('/api/orders', async (req, res) => {
const order = req.body;
// Produce to Kafka
await producer.send({
topic: 'orders',
messages: [{ value: JSON.stringify(order) }]
});
res.json({ status: 'processing', orderId: order.id });
});
// Health check endpoint for load balancer
app.get('/health', (req, res) => {
res.json({ status: 'healthy', service: 'order' });
});
app.listen(3001, () => {
console.log('Order Service running on port 3001');
producer.connect();
});Product Service (Similar structure) User Service (Similar structure)
// services/order/frontend/src/App.js
import React, { useState } from 'react';
import axios from 'axios';
function App() {
const [order, setOrder] = useState({});
const createOrder = async () => {
const response = await axios.post(
`${process.env.REACT_APP_API_URL}/api/orders`,
{ productId: '123', quantity: 2 }
);
setOrder(response.data);
};
return (
<div>
<button onClick={createOrder}>Create Order</button>
<div>Order Status: {order.status}</div>
</div>
);
}
export default App;# services/order/Dockerfile
FROM node:16-alpine
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY . .
EXPOSE 3001
CMD ["node", "server.js"]# docker-compose.yml
version: '3.8'
services:
zookeeper:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
kafka:
image: confluentinc/cp-kafka:latest
depends_on:
- zookeeper
ports:
- "9092:9092"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
order-service:
build: ./services/order
ports:
- "3001:3001"
environment:
KAFKA_BROKERS: kafka:9092
nginx:
image: nginx:alpine
ports:
- "80:80"
volumes:
- ./load-balancer/nginx.conf:/etc/nginx/nginx.conf# load-balancer/nginx.conf
http {
upstream order_services {
server order-service-1:3001;
server order-service-2:3001;
server order-service-3:3001;
}
upstream product_services {
server product-service-1:3002;
server product-service-2:3002;
}
server {
listen 80;
location /api/orders {
proxy_pass http://order_services;
health_check;
}
location /api/products {
proxy_pass http://product_services;
health_check;
}
location /health {
return 200 "healthy";
}
}
}Each microservice must have:
app.get('/health', (req, res) => {
const healthcheck = {
uptime: process.uptime(),
message: 'OK',
timestamp: Date.now()
};
res.status(200).send(healthcheck);
});// services/kafka-producer/index.js
const { Kafka } = require('kafkajs');
class KafkaProducer {
constructor() {
this.kafka = new Kafka({
clientId: 'ecommerce-producer',
brokers: [process.env.KAFKA_BROKERS]
});
this.producer = this.kafka.producer();
}
async send(topic, message) {
await this.producer.connect();
await this.producer.send({
topic,
messages: [{ value: JSON.stringify(message) }]
});
}
}
module.exports = KafkaProducer;// services/kafka-consumer/index.js
const { Kafka } = require('kafkajs');
class KafkaConsumer {
constructor(groupId) {
this.kafka = new Kafka({
clientId: 'ecommerce-consumer',
brokers: [process.env.KAFKA_BROKERS]
});
this.consumer = this.kafka.consumer({ groupId });
}
async subscribe(topic, callback) {
await this.consumer.connect();
await this.consumer.subscribe({ topic, fromBeginning: true });
await this.consumer.run({
eachMessage: async ({ message }) => {
callback(JSON.parse(message.value.toString()));
}
});
}
}# monitoring/prometheus.yml
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'order-service'
static_configs:
- targets: ['order-service:3001']
metrics_path: '/metrics'
- job_name: 'product-service'
static_configs:
- targets: ['product-service:3002']
metrics_path: '/metrics'
- job_name: 'node-exporter'
static_configs:
- targets: ['node-exporter:9100']Create dashboard JSON files for:
- Service health monitoring
- Request rates and latency
- Error rates
- Kafka message throughput
- Container resource usage
# infrastructure/aws/main.tf
provider "aws" {
region = "us-east-1"
}
# ECS Cluster
resource "aws_ecs_cluster" "ecommerce_cluster" {
name = "ecommerce-cluster"
}
# Load Balancer
resource "aws_lb" "main" {
name = "ecommerce-lb"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.lb_sg.id]
subnets = aws_subnet.public.*.id
}
# Target Groups for each service
resource "aws_lb_target_group" "order_service" {
name = "order-service-tg"
port = 3001
protocol = "HTTP"
vpc_id = aws_vpc.main.id
health_check {
path = "/health"
}
}{
"family": "order-service",
"networkMode": "awsvpc",
"containerDefinitions": [{
"name": "order-service",
"image": "your-ecr-repo/order-service:latest",
"portMappings": [{
"containerPort": 3001,
"hostPort": 3001
}],
"environment": [
{"name": "KAFKA_BROKERS", "value": "your-msk-brokers"}
]
}]
}# .github/workflows/deploy.yml
name: Deploy to ECS
on:
push:
branches: [ main ]
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build and push Docker image
run: |
docker build -t order-service ./services/order
docker tag order-service:latest ${{ steps.login-ecr.outputs.registry }}/order-service:latest
docker push ${{ steps.login-ecr.outputs.registry }}/order-service:latest
- name: Deploy to ECS
run: |
aws ecs update-service \
--cluster ecommerce-cluster \
--service order-service \
--force-new-deployment# .env.example
KAFKA_BROKERS=your-kafka-brokers:9092
AWS_REGION=us-east-1
ECS_CLUSTER=ecommerce-cluster
MONITORING_ENDPOINT=http://prometheus:9090#!/bin/bash
# deploy.sh
# 1. Build all services
docker-compose build
# 2. Run tests
npm test
# 3. Push to ECR
aws ecr get-login-password | docker login --username AWS --password-stdin your-account-id.dkr.ecr.region.amazonaws.com
# 4. Deploy to ECS
aws ecs update-service --cluster ecommerce-cluster --service order-service --force-new-deployment-
Local Development:
- Set up all microservices locally
- Test with Docker Compose
- Verify Kafka message flow
-
AWS Setup:
- Create VPC, subnets, security groups
- Set up ECR repositories
- Configure MSK (Managed Streaming for Kafka)
- Create ECS cluster
-
CI/CD Pipeline:
- Configure GitHub Secrets
- Test the pipeline with dummy deployment
- Set up staging environment
-
Monitoring & Alerting:
- Deploy Prometheus and Grafana
- Set up CloudWatch alarms
- Configure Slack/Email alerts
-
Load Testing:
- Use Apache Bench or k6
- Test auto-scaling
- Monitor performance metrics
- Service Health (HTTP 200 rates)
- Response Time P95, P99
- Error Rates (4xx, 5xx)
- Kafka Lag per consumer group
- Container CPU/Memory usage
- API Request Rate per service
- Frontend: React, TypeScript, Axios
- Backend: Node.js, Express
- Message Queue: Apache Kafka (AWS MSK)
- Containerization: Docker
- Orchestration: AWS ECS/EKS
- Load Balancer: AWS ALB + Nginx
- Monitoring: Prometheus, Grafana, AWS CloudWatch
- CI/CD: GitHub Actions, AWS CodePipeline
- Infrastructure: Terraform, AWS CDK
This architecture provides scalability, fault tolerance, and real-time processing capabilities. Start with basic implementations and gradually add complexity as you become comfortable with each component.