I was trying to create a minimal structure for use Temporal with services on containers (Docker compose), based on the repo my-temporal-dockercompose but without the load balancer. Just a single container for each service.
# Loki Docker plugin for logging from each container service
x-logging: &logging
logging:
driver: loki
options:
loki-url: "http://host.docker.internal:3100/loki/api/v1/push"
mode: non-blocking
max-buffer-size: 4m
loki-retries: "3"
services:
elasticsearch:
container_name: temporal-elasticsearch
environment:
- cluster.routing.allocation.disk.threshold_enabled=true
- cluster.routing.allocation.disk.watermark.low=512mb
- cluster.routing.allocation.disk.watermark.high=256mb
- cluster.routing.allocation.disk.watermark.flood_stage=128mb
- discovery.type=single-node
- ES_JAVA_OPTS=-Xms256m -Xmx256m
- xpack.security.enabled=false
image: elasticsearch:${ELASTICSEARCH_VERSION}
restart: unless-stopped
networks:
- temporal-network
expose:
- 9200
volumes:
- /var/lib/elasticsearch/data
# Temporal database
postgresql:
container_name: temporal-postgresql
command: postgres -c 'max_connections=200'
environment:
- POSTGRES_PASSWORD=${POSTGRES_USER}
- POSTGRES_USER=${POSTGRES_PWD}
- TZ=${TZ_INFO}
image: postgres:${POSTGRESQL_VERSION}
restart: unless-stopped
networks:
- temporal-network
ports:
- "${POSTGRES_DEFAULT_PORT:-5432}:5432"
volumes:
- ./postgresql_data:/var/lib/postgresql/data
postgres-exporter:
container_name: postgres-exporter
image: prometheuscommunity/postgres-exporter:${POSTGRES_EXPORTER_IMG}
networks:
- temporal-network
depends_on:
- postgresql
environment:
- DATA_SOURCE_URI=postgresql:${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}/postgres?sslmode=disable
- DATA_SOURCE_USER=${POSTGRES_USER}
- DATA_SOURCE_PASS=${POSTGRES_PWD}
- PG_EXPORTER_INCLUDE_DATABASES=temporal,temporal_visibility
- TZ=${TZ_INFO}
expose:
- 9187
# Temporal Services
# Temporal history (store ....)
temporal-history:
<<: *logging
container_name: temporal-history
networks:
- temporal-network
depends_on:
- postgresql
# - temporal-frontend
environment:
- DB=postgres12
- DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PWD=${POSTGRES_PWD}
- POSTGRES_SEEDS=postgresql
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
- SERVICES=history
- USE_INTERNAL_FRONTEND=true
- LOG_LEVEL=warn
- PROMETHEUS_ENDPOINT=0.0.0.0:8001
- NUM_HISTORY_SHARDS=2048
- TZ=${TZ_INFO}
- TEMPORAL_SEEDS=temporal-history,temporal-matching
image: temporalio/server:${TEMPORAL_SERVER_IMG}
expose:
- 7234
- 8000
restart: unless-stopped
healthcheck:
test: ["CMD", "tctl", "--address", "localhost:7234", "cluster", "health"]
interval: 5s
timeout: 5s
retries: 10
start_period: 10s
volumes:
- ./dynamicconfig:/etc/temporal/config/dynamicconfig
- ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
temporal-matching:
<<: *logging
container_name: temporal-matching
networks:
- temporal-network
# depends_on:
# - temporal-history
depends_on:
temporal-history:
condition: service_healthy
healthcheck:
test: ["CMD", "tctl", "--address", "localhost:7235", "cluster", "health"]
interval: 5s
timeout: 5s
retries: 10
start_period: 10s
environment:
- DB=postgres12
- DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PWD=${POSTGRES_PWD}
- POSTGRES_SEEDS=postgresql
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
- SERVICES=matching
# Default values ports for matching service
- MATCHING_GRPC_PORT=7235
- MATCHING_MEMBERSHIP_PORT=6939
- PROMETHEUS_ENDPOINT=0.0.0.0:8001
- NUM_HISTORY_SHARDS=2048
- USE_INTERNAL_FRONTEND=true
- LOG_LEVEL=warn
- TZ=${TZ_INFO}
- TEMPORAL_SEEDS=temporal-history,temporal-matching
image: temporalio/server:${TEMPORAL_SERVER_IMG}
expose:
- 7235
- 8001
restart: unless-stopped
volumes:
- ./dynamicconfig:/etc/temporal/config/dynamicconfig
- ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
temporal-frontend:
<<: *logging
container_name: temporal-frontend
networks:
- temporal-network
depends_on:
temporal-matching:
condition: service_healthy
healthcheck:
test: [ "CMD", "tctl", "--address", "localhost:7237", "cluster", "health" ]
interval: 5s
timeout: 5s
retries: 10
start_period: 10s
environment:
- DB=postgres12
- DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PWD=${POSTGRES_PWD}
- POSTGRES_SEEDS=postgresql
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
- SERVICES=frontend
- FRONTEND_GRPC_PORT=7237
- PROMETHEUS_ENDPOINT=0.0.0.0:8002
- NUM_HISTORY_SHARDS=2048
- USE_INTERNAL_FRONTEND=true
- LOG_LEVEL=warn
- FRONTEND_HTTP_PORT=7244
- TZ=${TZ_INFO}
- TEMPORAL_SEEDS=temporal-history,temporal-matching
image: temporalio/server:${TEMPORAL_SERVER_IMG}
expose:
- 7237
- 7244
- 8002
restart: unless-stopped
volumes:
- ./dynamicconfig:/etc/temporal/config/dynamicconfig
- ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
temporal-internal-frontend:
<<: *logging
container_name: temporal-internal-frontend
networks:
- temporal-network
depends_on:
temporal-matching:
condition: service_healthy
healthcheck:
test: [ "CMD", "tctl", "--address", "localhost:7231", "cluster", "health" ]
interval: 5s
timeout: 5s
retries: 10
start_period: 10s
environment:
- DB=postgres12
- DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PWD=${POSTGRES_PWD}
- POSTGRES_SEEDS=postgresql
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
- SERVICES=internal-frontend
- PROMETHEUS_ENDPOINT=0.0.0.0:8007
- NUM_HISTORY_SHARDS=2048
- USE_INTERNAL_FRONTEND=true
- LOG_LEVEL=warn
- TZ=${TZ_INFO}
- TEMPORAL_SEEDS=temporal-history,temporal-matching
image: temporalio/server:${TEMPORAL_SERVER_IMG}
expose:
- 7231
- 8007
ports:
- published: 7231
target: 7231
- published: 8007
target: 8007
restart: unless-stopped
volumes:
- ./dynamicconfig:/etc/temporal/config/dynamicconfig
- ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
temporal-worker:
<<: *logging
container_name: temporal-worker
networks:
- temporal-network
depends_on:
temporal-frontend:
condition: service_healthy
temporal-internal-frontend:
condition: service_healthy
environment:
- DB=postgres12
- DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PWD=${POSTGRES_PWD}
- POSTGRES_SEEDS=postgresql
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
- SERVICES=worker
- PROMETHEUS_ENDPOINT=0.0.0.0:8003
- USE_INTERNAL_FRONTEND=true
- LOG_LEVEL=warn
- TZ=${TZ_INFO}
- TEMPORAL_SEEDS=temporal-history,temporal-matching
image: temporalio/server:${TEMPORAL_SERVER_IMG}
expose:
- 7232
- 8003
restart: unless-stopped
volumes:
- ./dynamicconfig:/etc/temporal/config/dynamicconfig
- ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
temporal-admin-tools:
<<: *logging
container_name: temporal-admin-tools
networks:
- temporal-network
depends_on:
- temporal-frontend
- postgresql
environment:
- TEMPORAL_CLI_ADDRESS=temporal-frontend:7237
- TEMPORAL_ADDRESS=temporal-frontend:7237
- DB=postgres12
- DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PWD=${POSTGRES_PWD}
- POSTGRES_SEEDS=postgresql
- TEMPORAL_HOME=/etc/temporal
- PUBLIC_FRONTEND_ADDRESS=temporal-frontend:7237
- TZ=${TZ_INFO}
image: temporalio/admin-tools:${TEMPORAL_ADMINTOOLS_IMG}
volumes:
- "./script/setup.sh:/etc/temporal/setup.sh"
entrypoint:
- /etc/temporal/setup.sh
restart: unless-stopped
stdin_open: true
tty: true
temporal-ui:
container_name: temporal-ui
networks:
- temporal-network
depends_on:
- temporal-frontend
environment:
- TEMPORAL_ADDRESS=temporal-frontend:7237
- TEMPORAL_CORS_ORIGINS=http://localhost:3000
- TEMPORAL_UI_PORT=8080
- TEMPORAL_SHOW_TEMPORAL_SYSTEM_NAMESPACE=true
- TEMPORAL_ACTIVITY_COMMANDS_DISABLED=false
- TZ=${TZ_INFO}
image: temporalio/ui:${TEMPORAL_UI_IMG}
ports:
- "8080:8080"
networks:
temporal-network:
driver: bridge
name: temporal-network
I’ve tried using healty checks, but the temporal-history service keeps getting this error:
temporal-history | {"level":"error","ts":"2025-10-06T16:40:13.266-0300","msg":"long poll to refresh Nexus endpoints returned error","error":"Not enough hosts to serve the request"
I understand that this is related to service discovery, but how can I solve this without using a load balancer?
Even the repository that uses load balancer recommends keep retrying until the problem is solved if it happens. But in my case, it always happens.
Thanks