Microservices Architecture from Scratch with Docker
Build a microservices system with Docker from the ground up. Covers service decomposition, inter-service communication, Docker networking, and practical patterns for a real order management system.
Everyone loves talking about microservices. Far fewer people have actually built them from scratch and dealt with the consequences. I have, and I can tell you: the architecture diagrams look elegant, but the reality involves a lot of debugging network issues between containers at 2 AM.
That said, microservices genuinely solve real problems at scale. Let me walk you through building a small but realistic system -- an order management platform with three services -- so you can see both the power and the pain.
What We're Building
An order system with three services:
- User Service -- manages user accounts and authentication
- Product Service -- manages product catalog and inventory
- Order Service -- processes orders, talks to both other services
Project Structure
order-system/
├── docker-compose.yml
├── services/
│ ├── user-service/
│ │ ├── Dockerfile
│ │ ├── package.json
│ │ └── src/
│ │ └── index.js
│ ├── product-service/
│ │ ├── Dockerfile
│ │ ├── package.json
│ │ └── src/
│ │ └── index.js
│ └── order-service/
│ ├── Dockerfile
│ ├── package.json
│ └── src/
│ └── index.js
The User Service
// services/user-service/src/index.js
const express = require('express');
const crypto = require('crypto');
const app = express();
app.use(express.json());
// In-memory store (swap for a real DB in production)
const users = new Map();
app.post('/users', (req, res) => {
const { email, name } = req.body;
if (!email || !name) {
return res.status(400).json({ error: 'email and name required' });
}
const id = crypto.randomUUID();
const user = { id, email, name, createdAt: new Date().toISOString() };
users.set(id, user);
res.status(201).json(user);
});
app.get('/users/:id', (req, res) => {
const user = users.get(req.params.id);
if (!user) return res.status(404).json({ error: 'User not found' });
res.json(user);
});
app.get('/health', (req, res) => res.json({ status: 'ok', service: 'user' }));
const PORT = process.env.PORT || 3001;
app.listen(PORT, () => console.log(User service on port ${PORT}));
The Product Service
// services/product-service/src/index.js
const express = require('express');
const crypto = require('crypto');
const app = express();
app.use(express.json());
const products = new Map();
// Seed some products
const seedProducts = [
{ name: 'Mechanical Keyboard', price: 149.99, stock: 50 },
{ name: 'USB-C Hub', price: 49.99, stock: 200 },
{ name: '27" Monitor', price: 399.99, stock: 30 },
];
seedProducts.forEach(p => {
const id = crypto.randomUUID();
products.set(id, { id, ...p });
});
app.get('/products', (req, res) => {
res.json([...products.values()]);
});
app.get('/products/:id', (req, res) => {
const product = products.get(req.params.id);
if (!product) return res.status(404).json({ error: 'Product not found' });
res.json(product);
});
// Reserve stock (called by order service)
app.post('/products/:id/reserve', (req, res) => {
const product = products.get(req.params.id);
if (!product) return res.status(404).json({ error: 'Product not found' });
const { quantity } = req.body;
if (product.stock < quantity) {
return res.status(409).json({ error: 'Insufficient stock', available: product.stock });
}
product.stock -= quantity;
res.json({ reserved: quantity, remainingStock: product.stock });
});
// Release stock (if order fails)
app.post('/products/:id/release', (req, res) => {
const product = products.get(req.params.id);
if (!product) return res.status(404).json({ error: 'Product not found' });
const { quantity } = req.body;
product.stock += quantity;
res.json({ released: quantity, currentStock: product.stock });
});
app.get('/health', (req, res) => res.json({ status: 'ok', service: 'product' }));
const PORT = process.env.PORT || 3002;
app.listen(PORT, () => console.log(Product service on port ${PORT}));
The Order Service
This is where things get interesting. The order service needs to talk to both other services:
// services/order-service/src/index.js
const express = require('express');
const crypto = require('crypto');
const app = express();
app.use(express.json());
const orders = new Map();
// Service URLs -- Docker Compose service names resolve via DNS
const USER_SERVICE = process.env.USER_SERVICE_URL || 'http://user-service:3001';
const PRODUCT_SERVICE = process.env.PRODUCT_SERVICE_URL || 'http://product-service:3002';
async function fetchJson(url, options = {}) {
const response = await fetch(url, {
...options,
headers: { 'Content-Type': 'application/json', ...options.headers },
body: options.body ? JSON.stringify(options.body) : undefined,
});
const data = await response.json();
if (!response.ok) throw new Error(data.error || HTTP ${response.status});
return data;
}
app.post('/orders', async (req, res) => {
const { userId, items } = req.body;
// items: [{ productId, quantity }]
if (!userId || !items?.length) {
return res.status(400).json({ error: 'userId and items required' });
}
try {
// 1. Verify user exists
const user = await fetchJson(${USER_SERVICE}/users/${userId});
// 2. Reserve stock for each item
const reserved = [];
let total = 0;
for (const item of items) {
try {
await fetchJson(${PRODUCT_SERVICE}/products/${item.productId}/reserve, {
method: 'POST',
body: { quantity: item.quantity },
});
const product = await fetchJson(
${PRODUCT_SERVICE}/products/${item.productId}
);
total += product.price * item.quantity;
reserved.push(item);
} catch (err) {
// Rollback previously reserved items
for (const r of reserved) {
await fetchJson(${PRODUCT_SERVICE}/products/${r.productId}/release, {
method: 'POST',
body: { quantity: r.quantity },
}).catch(() => {}); // Best-effort rollback
}
return res.status(409).json({ error: Failed to reserve: ${err.message} });
}
}
// 3. Create order
const order = {
id: crypto.randomUUID(),
userId,
userName: user.name,
items,
total: Math.round(total * 100) / 100,
status: 'confirmed',
createdAt: new Date().toISOString(),
};
orders.set(order.id, order);
res.status(201).json(order);
} catch (err) {
res.status(500).json({ error: err.message });
}
});
app.get('/orders/:id', (req, res) => {
const order = orders.get(req.params.id);
if (!order) return res.status(404).json({ error: 'Order not found' });
res.json(order);
});
app.get('/health', (req, res) => res.json({ status: 'ok', service: 'order' }));
const PORT = process.env.PORT || 3003;
app.listen(PORT, () => console.log(Order service on port ${PORT}));
Look at the rollback logic in the order handler. When reserving stock for multiple items and one fails, we need to release the ones we already reserved. This is the saga pattern in its simplest form -- compensating transactions instead of distributed locks.
Dockerfiles
All three services use the same Dockerfile pattern:
FROM node:20-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --production
COPY src/ ./src/
EXPOSE 3001
CMD ["node", "src/index.js"]
Adjust the EXPOSE port for each service (3001, 3002, 3003).
Docker Compose
This ties everything together:
# docker-compose.yml
version: '3.8'
services:
user-service:
build: ./services/user-service
ports:
- "3001:3001"
environment:
- PORT=3001
networks:
- backend
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:3001/health"]
interval: 10s
timeout: 5s
retries: 3
product-service:
build: ./services/product-service
ports:
- "3002:3002"
environment:
- PORT=3002
networks:
- backend
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:3002/health"]
interval: 10s
timeout: 5s
retries: 3
order-service:
build: ./services/order-service
ports:
- "3003:3003"
environment:
- PORT=3003
- USER_SERVICE_URL=http://user-service:3001
- PRODUCT_SERVICE_URL=http://product-service:3002
depends_on:
user-service:
condition: service_healthy
product-service:
condition: service_healthy
networks:
- backend
networks:
backend:
driver: bridge
The key here is Docker's built-in DNS. Inside the backend network, user-service resolves to the IP of that container. No hardcoded addresses. No service discovery tools. Docker handles it.
The depends_on with service_healthy ensures the order service doesn't start until the others are up and responding to health checks.
Running It
docker compose up --build
Test it:
# Create a user
curl -X POST http://localhost:3001/users \
-H "Content-Type: application/json" \
-d '{"name": "Alice", "email": "alice@example.com"}'
# Returns: { "id": "abc-123", ... }
# List products
curl http://localhost:3002/products
# Returns array of products with IDs
# Place an order
curl -X POST http://localhost:3003/orders \
-H "Content-Type: application/json" \
-d '{"userId": "abc-123", "items": [{"productId": "xyz-456", "quantity": 2}]}'
Patterns You'll Need in Production
Circuit Breaker
When the user service is down, the order service shouldn't keep hammering it. A circuit breaker trips after N failures and stops making requests for a cooldown period:
class CircuitBreaker {
constructor(threshold = 5, cooldownMs = 30000) {
this.failures = 0;
this.threshold = threshold;
this.cooldownMs = cooldownMs;
this.state = 'closed'; // closed = normal, open = blocking
this.nextAttempt = 0;
}
async call(fn) {
if (this.state === 'open') {
if (Date.now() < this.nextAttempt) {
throw new Error('Circuit breaker is open');
}
this.state = 'half-open';
}
try {
const result = await fn();
this.failures = 0;
this.state = 'closed';
return result;
} catch (err) {
this.failures++;
if (this.failures >= this.threshold) {
this.state = 'open';
this.nextAttempt = Date.now() + this.cooldownMs;
}
throw err;
}
}
}
const userServiceBreaker = new CircuitBreaker();
// Usage:
const user = await userServiceBreaker.call(
() => fetchJson(${USER_SERVICE}/users/${userId})
);
Centralized Logging
When something breaks across three services, you need correlated logs. Pass a request ID through all service calls:
// Middleware for all services
app.use((req, res, next) => {
req.requestId = req.headers['x-request-id'] || crypto.randomUUID();
res.setHeader('x-request-id', req.requestId);
next();
});
// When calling other services, forward the ID
await fetchJson(${USER_SERVICE}/users/${userId}, {
headers: { 'x-request-id': req.requestId }
});
Now you can grep for a single request ID across all service logs and trace the entire flow.
When Not to Use Microservices
Honestly? Most projects shouldn't start with microservices. A well-structured monolith is easier to develop, easier to debug, easier to deploy. You can always extract services later when you hit specific scaling bottlenecks.
Microservices make sense when:
- Different parts of the system need to scale independently
- You have multiple teams that need to deploy independently
- Services have genuinely different runtime requirements (one needs GPUs, another just needs a small VM)
If none of those apply, start with a monolith. You can thank me later.