Problems with a basic local deployment of services

I was trying to create a minimal structure for use Temporal with services on containers (Docker compose), based on the repo my-temporal-dockercompose but without the load balancer. Just a single container for each service.

# Loki Docker plugin for logging from each container service
x-logging: &logging
  logging:
    driver: loki
    options:
      loki-url: "http://host.docker.internal:3100/loki/api/v1/push"
      mode: non-blocking
      max-buffer-size: 4m
      loki-retries: "3"

services:
  elasticsearch:
    container_name: temporal-elasticsearch
    environment:
      - cluster.routing.allocation.disk.threshold_enabled=true
      - cluster.routing.allocation.disk.watermark.low=512mb
      - cluster.routing.allocation.disk.watermark.high=256mb
      - cluster.routing.allocation.disk.watermark.flood_stage=128mb
      - discovery.type=single-node
      - ES_JAVA_OPTS=-Xms256m -Xmx256m
      - xpack.security.enabled=false
    image: elasticsearch:${ELASTICSEARCH_VERSION}
    restart: unless-stopped
    networks:
      - temporal-network
    expose:
      - 9200
    volumes:
      - /var/lib/elasticsearch/data
  # Temporal database
  postgresql:
    container_name: temporal-postgresql
    command: postgres -c 'max_connections=200'
    environment:
      - POSTGRES_PASSWORD=${POSTGRES_USER}
      - POSTGRES_USER=${POSTGRES_PWD}
      - TZ=${TZ_INFO}
    image: postgres:${POSTGRESQL_VERSION}
    restart: unless-stopped
    networks:
      - temporal-network
    ports:
      - "${POSTGRES_DEFAULT_PORT:-5432}:5432"
    volumes:
      - ./postgresql_data:/var/lib/postgresql/data
  postgres-exporter:
    container_name: postgres-exporter
    image: prometheuscommunity/postgres-exporter:${POSTGRES_EXPORTER_IMG}
    networks:
      - temporal-network
    depends_on:
      - postgresql
    environment:
      - DATA_SOURCE_URI=postgresql:${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}/postgres?sslmode=disable
      - DATA_SOURCE_USER=${POSTGRES_USER}
      - DATA_SOURCE_PASS=${POSTGRES_PWD}
      - PG_EXPORTER_INCLUDE_DATABASES=temporal,temporal_visibility
      - TZ=${TZ_INFO}
    expose:
      - 9187

  # Temporal Services
  # Temporal history (store ....)
  temporal-history:
    <<: *logging
    container_name: temporal-history
    networks:
      - temporal-network
    depends_on:
      - postgresql
#      - temporal-frontend
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=history
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - PROMETHEUS_ENDPOINT=0.0.0.0:8001
      - NUM_HISTORY_SHARDS=2048
      - TZ=${TZ_INFO}
      - TEMPORAL_SEEDS=temporal-history,temporal-matching
    image: temporalio/server:${TEMPORAL_SERVER_IMG}
    expose:
      - 7234
      - 8000
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "tctl", "--address", "localhost:7234", "cluster", "health"]
      interval: 5s
      timeout: 5s
      retries: 10
      start_period: 10s
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-matching:
    <<: *logging
    container_name: temporal-matching
    networks:
      - temporal-network
#    depends_on:
#      - temporal-history
    depends_on:
      temporal-history:
        condition: service_healthy
    healthcheck:
      test: ["CMD", "tctl", "--address", "localhost:7235", "cluster", "health"]
      interval: 5s
      timeout: 5s
      retries: 10
      start_period: 10s
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=matching
      # Default values ports for matching service
      - MATCHING_GRPC_PORT=7235
      - MATCHING_MEMBERSHIP_PORT=6939
      - PROMETHEUS_ENDPOINT=0.0.0.0:8001
      - NUM_HISTORY_SHARDS=2048
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - TZ=${TZ_INFO}
      - TEMPORAL_SEEDS=temporal-history,temporal-matching
    image: temporalio/server:${TEMPORAL_SERVER_IMG}
    expose:
      - 7235
      - 8001
    restart: unless-stopped

    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-frontend:
    <<: *logging
    container_name: temporal-frontend
    networks:
      - temporal-network
    depends_on:
      temporal-matching:
        condition: service_healthy
    healthcheck:
      test: [ "CMD", "tctl", "--address", "localhost:7237", "cluster", "health" ]
      interval: 5s
      timeout: 5s
      retries: 10
      start_period: 10s
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=frontend
      - FRONTEND_GRPC_PORT=7237
      - PROMETHEUS_ENDPOINT=0.0.0.0:8002
      - NUM_HISTORY_SHARDS=2048
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - FRONTEND_HTTP_PORT=7244
      - TZ=${TZ_INFO}
      - TEMPORAL_SEEDS=temporal-history,temporal-matching
    image: temporalio/server:${TEMPORAL_SERVER_IMG}
    expose:
      - 7237
      - 7244
      - 8002
    restart: unless-stopped
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-internal-frontend:
    <<: *logging
    container_name: temporal-internal-frontend
    networks:
      - temporal-network
    depends_on:
      temporal-matching:
        condition: service_healthy
    healthcheck:
      test: [ "CMD", "tctl", "--address", "localhost:7231", "cluster", "health" ]
      interval: 5s
      timeout: 5s
      retries: 10
      start_period: 10s
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=internal-frontend
      - PROMETHEUS_ENDPOINT=0.0.0.0:8007
      - NUM_HISTORY_SHARDS=2048
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - TZ=${TZ_INFO}
      - TEMPORAL_SEEDS=temporal-history,temporal-matching
    image: temporalio/server:${TEMPORAL_SERVER_IMG}
    expose:
      - 7231
      - 8007
    ports:
      - published: 7231
        target: 7231
      - published: 8007
        target: 8007
    restart: unless-stopped
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-worker:
    <<: *logging
    container_name: temporal-worker
    networks:
      - temporal-network
    depends_on:
      temporal-frontend:
        condition: service_healthy
      temporal-internal-frontend:
        condition: service_healthy
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=worker
      - PROMETHEUS_ENDPOINT=0.0.0.0:8003
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - TZ=${TZ_INFO}
      - TEMPORAL_SEEDS=temporal-history,temporal-matching
    image: temporalio/server:${TEMPORAL_SERVER_IMG}
    expose:
      - 7232
      - 8003
    restart: unless-stopped
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-admin-tools:
    <<: *logging
    container_name: temporal-admin-tools
    networks:
      - temporal-network
    depends_on:
      - temporal-frontend
      - postgresql
    environment:
      - TEMPORAL_CLI_ADDRESS=temporal-frontend:7237
      - TEMPORAL_ADDRESS=temporal-frontend:7237
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - TEMPORAL_HOME=/etc/temporal
      - PUBLIC_FRONTEND_ADDRESS=temporal-frontend:7237
      - TZ=${TZ_INFO}
    image: temporalio/admin-tools:${TEMPORAL_ADMINTOOLS_IMG}
    volumes:
      - "./script/setup.sh:/etc/temporal/setup.sh"
    entrypoint:
      - /etc/temporal/setup.sh
    restart: unless-stopped
    stdin_open: true
    tty: true
  temporal-ui:
    container_name: temporal-ui
    networks:
      - temporal-network
    depends_on:
      - temporal-frontend
    environment:
      - TEMPORAL_ADDRESS=temporal-frontend:7237
      - TEMPORAL_CORS_ORIGINS=http://localhost:3000
      - TEMPORAL_UI_PORT=8080
      - TEMPORAL_SHOW_TEMPORAL_SYSTEM_NAMESPACE=true
      - TEMPORAL_ACTIVITY_COMMANDS_DISABLED=false
      - TZ=${TZ_INFO}
    image: temporalio/ui:${TEMPORAL_UI_IMG}
    ports:
      - "8080:8080"

networks:
  temporal-network:
    driver: bridge
    name: temporal-network

I’ve tried using healty checks, but the temporal-history service keeps getting this error:

temporal-history | {"level":"error","ts":"2025-10-06T16:40:13.266-0300","msg":"long poll to refresh Nexus endpoints returned error","error":"Not enough hosts to serve the request"

I understand that this is related to service discovery, but how can I solve this without using a load balancer?

Even the repository that uses load balancer recommends keep retrying until the problem is solved if it happens. But in my case, it always happens.

Thanks

what containers do you see start up, could check with
docker compose ps

for

test: ["CMD", "tctl", "--address", "localhost:7234", "cluster", "health"]

cluster health command only checks frontend service, it cannot check history/matching.

you could use grpc-health-probe, for example:

grpc-health-probe -addr=<host:matching_grpc_port> -service=temporal.api.workflowservice.v1.MatchingService
grpc-health-probe -addr=<host:history_grpc_port> -service=temporal.api.workflowservice.v1.HistoryService
grpc-health-probe -addr=<host:frontend_grpc_port> -service=temporal.api.workflowservice.v1.WorkflowService
1 Like

Sorry for take so long with checking your suggestions (vacation :sweat_smile: ), I checked and needed to create a custom Docker to include this grpc-health-probe, something like:

ARG TEMPORAL_SERVER_IMG=error

FROM temporalio/server:${TEMPORAL_SERVER_IMG}

COPY --from=ghcr.io/grpc-ecosystem/grpc-health-probe:v0.4.38 /ko-app/grpc-health-probe /bin/grpc_health_probe

ENTRYPOINT ["/etc/temporal/entrypoint.sh"]

My final compose is something similar to this:

# Loki Docker plugin for logging from each container service
x-logging: &logging
  logging:
    driver: loki
    options:
      loki-url: "http://host.docker.internal:3100/loki/api/v1/push"
      mode: non-blocking
      max-buffer-size: 4m
      loki-retries: "3"

services:
  # Temporal visibility
  elasticsearch:
    container_name: temporal-elasticsearch
    environment:
      - cluster.routing.allocation.disk.threshold_enabled=true
      - cluster.routing.allocation.disk.watermark.low=512mb
      - cluster.routing.allocation.disk.watermark.high=256mb
      - cluster.routing.allocation.disk.watermark.flood_stage=128mb
      - discovery.type=single-node
      - ES_JAVA_OPTS=-Xms256m -Xmx256m
      - xpack.security.enabled=false
      - TZ=${TZ_INFO}
    image: elasticsearch:${ELASTICSEARCH_VERSION}
    restart: unless-stopped
    networks:
      - temporal-network
    ports:
      - published: 9200
        target: 9200
    volumes:
      - /var/lib/elasticsearch/data
  # Temporal database
  postgresql:
    container_name: temporal-postgresql
    command: postgres -c 'max_connections=200'
    environment:
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PASSWORD=${POSTGRES_PWD}
      - TZ=${TZ_INFO}
    image: postgres:${POSTGRESQL_VERSION}
    restart: unless-stopped
    networks:
      - temporal-network
    ports:
      - "${POSTGRES_DEFAULT_EXTERNAL_PORT:-5432}:5432"
  postgres-exporter:
    container_name: postgres-exporter
    image: prometheuscommunity/postgres-exporter:${POSTGRES_EXPORTER_IMG}
    networks:
      - temporal-network
    depends_on:
      - postgresql
    environment:
      - DATA_SOURCE_URI=postgresql:${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}/postgres?sslmode=disable
      - DATA_SOURCE_USER=${POSTGRES_USER}
      - DATA_SOURCE_PASS=${POSTGRES_PWD}
      - PG_EXPORTER_INCLUDE_DATABASES=temporal,temporal_visibility
      - TZ=${TZ_INFO}
    ports:
      - published: 9187
        target: 9187
  # Temporal Services
  temporal-history:
    <<: *logging
    container_name: temporal-history
    networks:
      - temporal-network
    depends_on:
      - postgresql
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - TEMPORAL_HISTORY_NAMESPACEDEFAULT_ARCHIVAL_FILESTORE=enabled
      - TEMPORAL_VISIBILITY_NAMESPACEDEFAULT_ARCHIVAL_FILESTORE=enabled
      - SERVICES=history
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - PROMETHEUS_ENDPOINT=0.0.0.0:8000
      - NUM_HISTORY_SHARDS=2048
      - TZ=${TZ_INFO}
    build:
      context: .
      args:
        - TEMPORAL_SERVER_IMG=${TEMPORAL_SERVER_IMG}
    ports:
      - published: 7234
        target: 7234
      - published: 8000
        target: 8000
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "grpc_health_probe", "-addr=temporal-history:7234", "-service=temporal.api.workflowservice.v1.HistoryService"]
      interval: 5s
      timeout: 5s
      retries: 10
      start_period: 5s
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
  temporal-matching:
    <<: *logging
    container_name: temporal-matching
    networks:
      - temporal-network
    depends_on:
      - temporal-history
    healthcheck:
      test: ["CMD", "grpc_health_probe", "-addr=temporal-matching:7235", "-service=temporal.api.workflowservice.v1.MatchingService"]
      interval: 5s
      timeout: 5s
      retries: 10
      start_period: 5s
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=matching
      - MATCHING_GRPC_PORT=7235
      - MATCHING_MEMBERSHIP_PORT=6939
      - PROMETHEUS_ENDPOINT=0.0.0.0:8001
      - NUM_HISTORY_SHARDS=2048
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - TZ=${TZ_INFO}
    build:
      context: .
      args:
        - TEMPORAL_SERVER_IMG=${TEMPORAL_SERVER_IMG}
    ports:
      - published: 7235
        target: 7235
      - published: 8001
        target: 8001
    restart: unless-stopped
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-frontend:
    <<: *logging
    container_name: temporal-frontend
    networks:
      - temporal-network
    depends_on:
      - temporal-matching
    healthcheck:
      test: ["CMD", "grpc_health_probe", "-addr=temporal-frontend:7233", "-service=temporal.api.workflowservice.v1.WorkflowService"]
      interval: 10s
      timeout: 5s
      retries: 10
      start_period: 10s
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=frontend
      - FRONTEND_GRPC_PORT=7233
      - PROMETHEUS_ENDPOINT=0.0.0.0:8002
      - NUM_HISTORY_SHARDS=2048
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - FRONTEND_HTTP_PORT=7244
      - TZ=${TZ_INFO}
    build:
      context: .
      args:
        - TEMPORAL_SERVER_IMG=${TEMPORAL_SERVER_IMG}
    ports:
      - published: 7233
        target: 7233
      - published: 7244
        target: 7244
      - published: 8002
        target: 8002
    restart: unless-stopped
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-internal-frontend:
    <<: *logging
    container_name: temporal-internal-frontend
    networks:
      - temporal-network
    depends_on:
      - temporal-matching
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=internal-frontend
      - PROMETHEUS_ENDPOINT=0.0.0.0:8007
      - NUM_HISTORY_SHARDS=2048
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - TZ=${TZ_INFO}
    build:
      context: .
      args:
        - TEMPORAL_SERVER_IMG=${TEMPORAL_SERVER_IMG}
    ports:
      - published: 7231
        target: 7231
      - published: 8007
        target: 8007
    restart: unless-stopped
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-worker:
    <<: *logging
    container_name: temporal-worker
    networks:
      - temporal-network
    depends_on:
      - temporal-frontend
    environment:
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
      - SERVICES=worker
      - PROMETHEUS_ENDPOINT=0.0.0.0:8003
      - USE_INTERNAL_FRONTEND=true
      - LOG_LEVEL=warn
      - TZ=${TZ_INFO}
    build:
      context: .
      args:
        - TEMPORAL_SERVER_IMG=${TEMPORAL_SERVER_IMG}
    ports:
      - published: 7232
        target: 7232
      - published: 8003
        target: 8003
    restart: unless-stopped
    volumes:
      - ./dynamicconfig:/etc/temporal/config/dynamicconfig
      - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml
  temporal-admin-tools:
    <<: *logging
    container_name: temporal-admin-tools
    networks:
      - temporal-network
    depends_on:
      - postgresql
    environment:
      - TEMPORAL_CLI_ADDRESS=temporal-frontend:7233
      - TEMPORAL_ADDRESS=temporal-frontend:7233
      - DB=postgres12
      - DB_PORT=${POSTGRES_DEFAULT_INTERNAL_PORT:-5432}
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PWD=${POSTGRES_PWD}
      - POSTGRES_SEEDS=postgresql
      - TEMPORAL_HOME=/etc/temporal
      - PUBLIC_FRONTEND_ADDRESS=temporal-frontend:7233
      - TZ=${TZ_INFO}
    image: temporalio/admin-tools:${TEMPORAL_ADMINTOOLS_IMG}
    volumes:
      - "./script/setup.sh:/etc/temporal/setup.sh"
    entrypoint:
      - /etc/temporal/setup.sh
    restart: unless-stopped
    stdin_open: true
    tty: true
  temporal-ui:
    container_name: temporal-ui
    networks:
      - temporal-network
    depends_on:
      - temporal-admin-tools
    environment:
      - TEMPORAL_ADDRESS=temporal-frontend:7233
      - TEMPORAL_CORS_ORIGINS=http://localhost:3000
      - TEMPORAL_CSRF_COOKIE_INSECURE=true
      - TEMPORAL_UI_PORT=8080
      - TEMPORAL_SHOW_TEMPORAL_SYSTEM_NAMESPACE=true
      - TEMPORAL_ACTIVITY_COMMANDS_DISABLED=false
      - TZ=${TZ_INFO}
    image: temporalio/ui:${TEMPORAL_UI_IMG}
    ports:
      - "8080:8080"

networks:
  temporal-network:
    driver: bridge
    name: temporal-network

I ran into some problems trying to enforce dependencies for my services based on their health state, but it didn’t work as expected (condition: service_healthy). The only thing I was able to check was the overall health condition after a few minutes once all the services were finally running (example, the history service restarted a few times until it found the hosts it needed to serve requests).

My new questions are:

  • Does this approach seem ok? Can I start using this as a base for my production projects based on Docker/Compose?

  • Is starting with one instance of each service a good practice? Or is there already a general recomendation for the number of instances for each service? (e.g., matching, history, frontend, internal-worker, internal-frontend, …)

I still plan to improve on this approach, but any insights and recommendations are welcome.

Thanks! :grin: