Unable to run docker container for each service: matching, frontend, history, matching

I am trying to deploy each service as separate docker container. All containers starts fine but when I tried to reach http://localhost:8088 I get below error

 | {"level":"info","ts":"2022-04-24T11:53:39.647Z","msg":"bootstrap hosts fetched","service":"frontend","bootstrap-hostports":"0.0.0.0:6934,0.0.0.0:6935,0.0.0.0:6933","logging-call-at":"rpMonitor.go:271"}
temporal-web            | [2022-04-24T11:53:41.282Z] Auth is disabled in config
temporal-web            | [2022-04-24T11:53:41.860Z] will use insecure connection with Temporal server...
temporal-web            | temporal-web ssl is not enabled
temporal-web            | temporal-web up and listening on port 8088
temporal-web            | [2022-04-24T11:53:54.846Z] OperationalError: 14 UNAVAILABLE: DNS resolution failed
temporal-web            |     at Object.exports.createStatusError (/usr/app/node_modules/grpc/src/common.js:91:15)
temporal-web            |     at Object.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:1209:28)
temporal-web            |     at InterceptingListener._callNext (/usr/app/node_modules/grpc/src/client_interceptors.js:568:42)
temporal-web            |     at InterceptingListener.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:618:8)
temporal-web            |     at callback (/usr/app/node_modules/grpc/src/client_interceptors.js:847:24) {
temporal-web            |   cause: Error: 14 UNAVAILABLE: DNS resolution failed
temporal-web            |       at Object.exports.createStatusError (/usr/app/node_modules/grpc/src/common.js:91:15)
temporal-web            |       at Object.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:1209:28)
temporal-web            |       at InterceptingListener._callNext (/usr/app/node_modules/grpc/src/client_interceptors.js:568:42)
temporal-web            |       at InterceptingListener.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:618:8)
temporal-web            |       at callback (/usr/app/node_modules/grpc/src/client_interceptors.js:847:24) {
temporal-web            |     code: 14,
temporal-web            |     metadata: Metadata { _internal_repr: {}, flags: 0 },
temporal-web            |     details: 'DNS resolution failed'
temporal-web            |   },
temporal-web            |   isOperational: true,
temporal-web            |   code: 14,
temporal-web            |   metadata: Metadata { _internal_repr: {}, flags: 0 },
temporal-web            |   details: 'DNS resolution failed'
temporal-web            | }
temporal-web            | [2022-04-24T11:53:54.951Z] OperationalError: 14 UNAVAILABLE: DNS resolution failed
temporal-web            |     at Object.exports.createStatusError (/usr/app/node_modules/grpc/src/common.js:91:15)
temporal-web            |     at Object.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:1209:28)
temporal-web            |     at InterceptingListener._callNext (/usr/app/node_modules/grpc/src/client_interceptors.js:568:42)
temporal-web            |     at InterceptingListener.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:618:8)
temporal-web            |     at callback (/usr/app/node_modules/grpc/src/client_interceptors.js:847:24) {
temporal-web            |   cause: Error: 14 UNAVAILABLE: DNS resolution failed
temporal-web            |       at Object.exports.createStatusError (/usr/app/node_modules/grpc/src/common.js:91:15)
temporal-web            |       at Object.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:1209:28)
temporal-web            |       at InterceptingListener._callNext (/usr/app/node_modules/grpc/src/client_interceptors.js:568:42)
temporal-web            |       at InterceptingListener.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:618:8)
temporal-web            |       at callback (/usr/app/node_modules/grpc/src/client_interceptors.js:847:24) {
temporal-web            |     code: 14,
temporal-web            |     metadata: Metadata { _internal_repr: {}, flags: 0 },
temporal-web            |     details: 'DNS resolution failed'
temporal-web            |   },
temporal-web            |   isOperational: true,
temporal-web            |   code: 14,
temporal-web            |   metadata: Metadata { _internal_repr: {}, flags: 0 },
temporal-web            |   details: 'DNS resolution failed'
temporal-web            | }
temporal-frontend       | {"level":"error","ts":"2022-04-24T21:10:43.913Z","msg":"unable to bootstrap ringpop. retrying","service":"frontend","error":"join duration of 38.7320493s exceeded max 30s","logging-call-at":"ringpop.go:114","stacktrace":"go.temporal.io/server/common/log.(*zapLogger).Error\n\t/home/builder/temporal/common/log/zap_logger.go:142\ngo.temporal.io/server/common/membership.(*RingPop).bootstrap\n\t/home/builder/temporal/common/membership/ringpop.go:114\ngo.temporal.io/server/common/membership.(*RingPop).Start\n\t/home/builder/temporal/common/membership/ringpop.go:83\ngo.temporal.io/server/common/membership.(*ringpopMonitor).Start\n\t/home/builder/temporal/common/membership/rpMonitor.go:121\ngo.temporal.io/server/common/membership.MonitorLifetimeHooks.func1\n\t/home/builder/temporal/common/membership/fx.go:44\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).runStartHook\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:120\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).Start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:85\ngo.uber.org/fx.(*App).start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:749\ngo.uber.org/fx.withTimeout.func1\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:989"}
temporal-worker         | {"level":"error","ts":"2022-04-24T21:10:47.424Z","msg":"unable to bootstrap ringpop. retrying","service":"worker","error":"join duration of 41.3403459s exceeded max 30s","logging-call-at":"ringpop.go:114","stacktrace":"go.temporal.io/server/common/log.(*zapLogger).Error\n\t/home/builder/temporal/common/log/zap_logger.go:142\ngo.temporal.io/server/common/membership.(*RingPop).bootstrap\n\t/home/builder/temporal/common/membership/ringpop.go:114\ngo.temporal.io/server/common/membership.(*RingPop).Start\n\t/home/builder/temporal/common/membership/ringpop.go:83\ngo.temporal.io/server/common/membership.(*ringpopMonitor).Start\n\t/home/builder/temporal/common/membership/rpMonitor.go:121\ngo.temporal.io/server/common/membership.MonitorLifetimeHooks.func1\n\t/home/builder/temporal/common/membership/fx.go:44\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).runStartHook\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:120\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).Start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:85\ngo.uber.org/fx.(*App).start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:749\ngo.uber.org/fx.withTimeout.func1\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:989"}
temporal-matching       | {"level":"error","ts":"2022-04-24T21:10:51.423Z","msg":"unable to bootstrap ringpop. retrying","service":"matching","error":"join duration of 47.0306736s exceeded max 30s","logging-call-at":"ringpop.go:114","stacktrace":"go.temporal.io/server/common/log.(*zapLogger).Error\n\t/home/builder/temporal/common/log/zap_logger.go:142\ngo.temporal.io/server/common/membership.(*RingPop).bootstrap\n\t/home/builder/temporal/common/membership/ringpop.go:114\ngo.temporal.io/server/common/membership.(*RingPop).Start\n\t/home/builder/temporal/common/membership/ringpop.go:83\ngo.temporal.io/server/common/membership.(*ringpopMonitor).Start\n\t/home/builder/temporal/common/membership/rpMonitor.go:121\ngo.temporal.io/server/common/membership.MonitorLifetimeHooks.func1\n\t/home/builder/temporal/common/membership/fx.go:44\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).runStartHook\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:120\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).Start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:85\ngo.uber.org/fx.(*App).start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:749\ngo.uber.org/fx.withTimeout.func1\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:989"}
temporal-frontend       | {"level":"info","ts":"2022-04-24T21:10:53.923Z","msg":"bootstrap hosts fetched","service":"frontend","bootstrap-hostports":"0.0.0.0:6933,0.0.0.0:6939,0.0.0.0:6934,0.0.0.0:6935","logging-call-at":"rpMonitor.go:271"}
temporal-worker         | {"level":"info","ts":"2022-04-24T21:10:57.431Z","msg":"bootstrap hosts fetched","service":"worker","bootstrap-hostports":"0.0.0.0:6933,0.0.0.0:6939,0.0.0.0:6934,0.0.0.0:6935","logging-call-at":"rpMonitor.go:271"}
temporal-matching       | {"level":"info","ts":"2022-04-24T21:11:01.392Z","msg":"bootstrap hosts fetched","service":"matching","bootstrap-hostports":"0.0.0.0:6934,0.0.0.0:6935,0.0.0.0:6933,0.0.0.0:6939","logging-call-at":"rpMonitor.go:271"}

docker-compose.yaml

version: "3.5"
services:
  temporal-history:
    container_name: temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=history
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=0.0.0.0
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7234:7234
    volumes:
      - ./config/history:/etc/temporal/config

  temporal-matching:
    container_name: temporal-matching
    depends_on:
      - temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=matching
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=0.0.0.0
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7235:7235
    volumes:
      - ./config/matching:/etc/temporal/config

  temporal-frontend:
    container_name: temporal-frontend
    depends_on:
      - temporal-matching
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=frontend
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=0.0.0.0
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7233:7233
    volumes:
      - ./config/frontend:/etc/temporal/config

  temporal-worker:
    container_name: temporal-worker
    depends_on:
      - temporal-frontend
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=worker
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=0.0.0.0
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7239:7239
    volumes:
      - ./config/worker:/etc/temporal/config
  
  temporal-admin-tools:
    container_name: temporal-admin-tools
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_CLI_ADDRESS=temporal:7233
    image: temporalio/admin-tools:${TEMPORAL_VERSION}
    networks:
      - temporal-network
    stdin_open: true
    tty: true
 
  temporal-ui:
    container_name: temporal-ui
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_ADDRESS=temporal:7233
      - TEMPORAL_CORS_ORIGINS=http://localhost:3000
    image: temporalio/ui:${TEMPORAL_UI_VERSION}
    networks:
      - temporal-network
    ports:
      - 8080:8080

  temporal-web:
    container_name: temporal-web
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_GRPC_ENDPOINT=temporal:7233
      - TEMPORAL_PERMIT_WRITE_API=true
    image: temporalio/web:${TEMPORAL_WEB_VERSION}
    networks:
      - temporal-network
    ports:
      - 8088:8088
networks:
  temporal-network:
    driver: bridge
    name: temporal-network
version: "3.5"
services:
  temporal-history:
    container_name: temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=history
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=0.0.0.0
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7234:7234
    volumes:
      - ./config/history:/etc/temporal/config

  temporal-matching:
    container_name: temporal-matching
    depends_on:
      - temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=matching
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=0.0.0.0
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7235:7235
    volumes:
      - ./config/matching:/etc/temporal/config

  temporal-frontend:
    container_name: temporal-frontend
    depends_on:
      - temporal-matching
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=frontend
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=0.0.0.0
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7233:7233
    volumes:
      - ./config/frontend:/etc/temporal/config

  temporal-worker:
    container_name: temporal-worker
    depends_on:
      - temporal-frontend
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=worker
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=0.0.0.0
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7239:7239
    volumes:
      - ./config/worker:/etc/temporal/config
  
  temporal-admin-tools:
    container_name: temporal-admin-tools
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_CLI_ADDRESS=temporal:7233
    image: temporalio/admin-tools:${TEMPORAL_VERSION}
    networks:
      - temporal-network
    stdin_open: true
    tty: true
 
  temporal-ui:
    container_name: temporal-ui
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_ADDRESS=temporal:7233
      - TEMPORAL_CORS_ORIGINS=http://localhost:3000
    image: temporalio/ui:${TEMPORAL_UI_VERSION}
    networks:
      - temporal-network
    ports:
      - 8080:8080

  temporal-web:
    container_name: temporal-web
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_GRPC_ENDPOINT=temporal:7233
      - TEMPORAL_PERMIT_WRITE_API=true
    image: temporalio/web:${TEMPORAL_WEB_VERSION}
    networks:
      - temporal-network
    ports:
      - 8088:8088
networks:
  temporal-network:
    driver: bridge
    name: temporal-network

Take a look at the config template (specifically the global-membership section).
Also if you set your broadcast address to 127.0.0.1 I believe in each of your services you can just define
bindOnLocalHost: true
and don’t have to set BindOnIP. Hope this helps

Hi Tihomir,

I tried above settings but still getting same error

temporal-history        | {"level":"info","ts":"2022-04-26T12:51:04.699Z","msg":"Timer queue processor started.","shard-id":2,"address":"127.0.0.1:7234","component":"timer-queue-processor","cluster-name":"active","component":"timer-queue-processor","logging-call-at":"timerQueueProcessorBase.go:150"}
temporal-history        | {"level":"info","ts":"2022-04-26T12:51:04.699Z","msg":"none","shard-id":2,"address":"127.0.0.1:7234","component":"history-engine","lifecycle":"Started","logging-call-at":"historyEngine.go:261"}
temporal-history        | {"level":"info","ts":"2022-04-26T12:51:04.699Z","msg":"none","shard-id":2,"address":"127.0.0.1:7234","lifecycle":"Started","component":"shard-engine","logging-call-at":"context_impl.go:1342"}
temporal-history        | {"level":"info","ts":"2022-04-26T12:51:04.843Z","msg":"none","component":"shard-controller","address":"127.0.0.1:7234","lifecycle":"Started","logging-call-at":"controller_impl.go:118"}
temporal-history        | {"level":"info","ts":"2022-04-26T12:51:04.843Z","msg":"Starting to serve on history listener","service":"history","logging-call-at":"service.go:107"}
temporal-web            | [2022-04-26T12:51:07.948Z] Auth is disabled in config
temporal-web            | [2022-04-26T12:51:08.794Z] will use insecure connection with Temporal server...
temporal-web            | temporal-web ssl is not enabled
temporal-web            | temporal-web up and listening on port 8088
temporal-web            | [2022-04-26T12:51:32.690Z] OperationalError: 14 UNAVAILABLE: failed to connect to all addresses
temporal-web            |     at Object.exports.createStatusError (/usr/app/node_modules/grpc/src/common.js:91:15)
temporal-web            |     at Object.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:1209:28)
temporal-web            |     at InterceptingListener._callNext (/usr/app/node_modules/grpc/src/client_interceptors.js:568:42)
temporal-web            |     at InterceptingListener.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:618:8)
temporal-web            |     at callback (/usr/app/node_modules/grpc/src/client_interceptors.js:847:24) {
temporal-web            |   cause: Error: 14 UNAVAILABLE: failed to connect to all addresses
temporal-web            |       at Object.exports.createStatusError (/usr/app/node_modules/grpc/src/common.js:91:15)
temporal-web            |       at Object.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:1209:28)
temporal-web            |       at InterceptingListener._callNext (/usr/app/node_modules/grpc/src/client_interceptors.js:568:42)
temporal-web            |       at InterceptingListener.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:618:8)
temporal-web            |       at callback (/usr/app/node_modules/grpc/src/client_interceptors.js:847:24) {
temporal-web            |     code: 14,
temporal-web            |     metadata: Metadata { _internal_repr: {}, flags: 0 },
temporal-web            |     details: 'failed to connect to all addresses'
temporal-web            |   },
temporal-web            |   isOperational: true,
temporal-web            |   code: 14,
temporal-web            |   metadata: Metadata { _internal_repr: {}, flags: 0 },
temporal-web            |   details: 'failed to connect to all addresses'
temporal-web            | }
temporal-web            | [2022-04-26T12:51:32.799Z] OperationalError: 14 UNAVAILABLE: failed to connect to all addresses
temporal-web            |     at Object.exports.createStatusError (/usr/app/node_modules/grpc/src/common.js:91:15)
temporal-web            |     at Object.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:1209:28)
temporal-web            |     at InterceptingListener._callNext (/usr/app/node_modules/grpc/src/client_interceptors.js:568:42)
temporal-web            |     at InterceptingListener.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:618:8)
temporal-web            |     at callback (/usr/app/node_modules/grpc/src/client_interceptors.js:847:24) {
temporal-web            |   cause: Error: 14 UNAVAILABLE: failed to connect to all addresses
temporal-web            |       at Object.exports.createStatusError (/usr/app/node_modules/grpc/src/common.js:91:15)
temporal-web            |       at Object.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:1209:28)
temporal-web            |       at InterceptingListener._callNext (/usr/app/node_modules/grpc/src/client_interceptors.js:568:42)
temporal-web            |       at InterceptingListener.onReceiveStatus (/usr/app/node_modules/grpc/src/client_interceptors.js:618:8)
temporal-web            |       at callback (/usr/app/node_modules/grpc/src/client_interceptors.js:847:24) {
temporal-web            |     code: 14,
temporal-web            |     metadata: Metadata { _internal_repr: {}, flags: 0 },
temporal-web            |     details: 'failed to connect to all addresses'
temporal-web            |   },
temporal-web            |   isOperational: true,
temporal-web            |   code: 14,
temporal-web            |   metadata: Metadata { _internal_repr: {}, flags: 0 },
temporal-web            |   details: 'failed to connect to all addresses'
temporal-web            | }
temporal-matching       | {"level":"error","ts":"2022-04-26T12:51:38.914Z","msg":"unable to bootstrap ringpop. retrying","service":"matching","error":"join duration of 33.939739504s exceeded max 30s","logging-call-at":"ringpop.go:114","stacktrace":"go.temporal.io/server/common/log.(*zapLogger).Error\n\t/home/builder/temporal/common/log/zap_logger.go:142\ngo.temporal.io/server/common/membership.(*RingPop).bootstrap\n\t/home/builder/temporal/common/membership/ringpop.go:114\ngo.temporal.io/server/common/membership.(*RingPop).Start\n\t/home/builder/temporal/common/membership/ringpop.go:83\ngo.temporal.io/server/common/membership.(*ringpopMonitor).Start\n\t/home/builder/temporal/common/membership/rpMonitor.go:121\ngo.temporal.io/server/common/membership.MonitorLifetimeHooks.func1\n\t/home/builder/temporal/common/membership/fx.go:44\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).runStartHook\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:120\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).Start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:85\ngo.uber.org/fx.(*App).start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:749\ngo.uber.org/fx.withTimeout.func1\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:989"}
temporal-frontend       | {"level":"error","ts":"2022-04-26T12:51:46.093Z","msg":"unable to bootstrap ringpop. retrying","service":"frontend","error":"join duration of 40.388326974s exceeded max 30s","logging-call-at":"ringpop.go:114","stacktrace":"go.temporal.io/server/common/log.(*zapLogger).Error\n\t/home/builder/temporal/common/log/zap_logger.go:142\ngo.temporal.io/server/common/membership.(*RingPop).bootstrap\n\t/home/builder/temporal/common/membership/ringpop.go:114\ngo.temporal.io/server/common/membership.(*RingPop).Start\n\t/home/builder/temporal/common/membership/ringpop.go:83\ngo.temporal.io/server/common/membership.(*ringpopMonitor).Start\n\t/home/builder/temporal/common/membership/rpMonitor.go:121\ngo.temporal.io/server/common/membership.MonitorLifetimeHooks.func1\n\t/home/builder/temporal/common/membership/fx.go:44\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).runStartHook\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:120\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).Start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:85\ngo.uber.org/fx.(*App).start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:749\ngo.uber.org/fx.withTimeout.func1\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:989"}
temporal-matching       | {"level":"info","ts":"2022-04-26T12:51:48.912Z","msg":"bootstrap hosts fetched","service":"matching","bootstrap-hostports":"127.0.0.1:6934,127.0.0.1:6935,127.0.0.1:6933,127.0.0.1:6939","logging-call-at":"rpMonitor.go:271"}
temporal-worker         | {"level":"error","ts":"2022-04-26T12:51:49.644Z","msg":"unable to bootstrap ringpop. retrying","service":"worker","error":"join duration of 42.929887435s exceeded max 30s","logging-call-at":"ringpop.go:114","stacktrace":"go.temporal.io/server/common/log.(*zapLogger).Error\n\t/home/builder/temporal/common/log/zap_logger.go:142\ngo.temporal.io/server/common/membership.(*RingPop).bootstrap\n\t/home/builder/temporal/common/membership/ringpop.go:114\ngo.temporal.io/server/common/membership.(*RingPop).Start\n\t/home/builder/temporal/common/membership/ringpop.go:83\ngo.temporal.io/server/common/membership.(*ringpopMonitor).Start\n\t/home/builder/temporal/common/membership/rpMonitor.go:121\ngo.temporal.io/server/common/membership.MonitorLifetimeHooks.func1\n\t/home/builder/temporal/common/membership/fx.go:44\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).runStartHook\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:120\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).Start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:85\ngo.uber.org/fx.(*App).start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:749\ngo.uber.org/fx.withTimeout.func1\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:989"}
temporal-frontend       | {"level":"info","ts":"2022-04-26T12:51:56.102Z","msg":"bootstrap hosts fetched","service":"frontend","bootstrap-hostports":"127.0.0.1:6934,127.0.0.1:6935,127.0.0.1:6933,127.0.0.1:6939","logging-call-at":"rpMonitor.go:271"}
temporal-worker         | {"level":"info","ts":"2022-04-26T12:51:59.651Z","msg":"bootstrap hosts fetched","service":"worker","bootstrap-hostports":"127.0.0.1:6934,127.0.0.1:6935,127.0.0.1:6933,127.0.0.1:6939","logging-call-at":"rpMonitor.go:271"}
temporal-matching       | {"level":"error","ts":"2022-04-26T12:52:29.388Z","msg":"unable to bootstrap ringpop. retrying","service":"matching","error":"join duration of 40.498602512s exceeded max 30s","logging-call-at":"ringpop.go:114","stacktrace":"go.temporal.io/server/common/log.(*zapLogger).Error\n\t/home/builder/temporal/common/log/zap_logger.go:142\ngo.temporal.io/server/common/membership.(*RingPop).bootstrap\n\t/home/builder/temporal/common/membership/ringpop.go:114\ngo.temporal.io/server/common/membership.(*RingPop).Start\n\t/home/builder/temporal/common/membership/ringpop.go:83\ngo.temporal.io/server/common/membership.(*ringpopMonitor).Start\n\t/home/builder/temporal/common/membership/rpMonitor.go:121\ngo.temporal.io/server/common/membership.MonitorLifetimeHooks.func1\n\t/home/builder/temporal/common/membership/fx.go:44\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).runStartHook\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:120\ngo.uber.org/fx/internal/lifecycle.(*Lifecycle).Start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/internal/lifecycle/lifecycle.go:85\ngo.uber.org/fx.(*App).start\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:749\ngo.uber.org/fx.withTimeout.func1\n\t/go/pkg/mod/go.uber.org/fx@v1.16.0/app.go:989"}
temporal-matching       | {"level":"info","ts":"2022-04-26T12:52:39.371Z","msg":"bootstrap hosts fetched","service":"matching","bootstrap-hostports":"127.0.0.1:6934,127.0.0.1:6935,127.0.0.1:6933,127.0.0.1:6939","logging-call-at":"rpMonitor.go:271"}

Updated docker-compose.yaml

version: "3.5"
services:
  temporal-history:
    container_name: temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=history
      # - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=127.0.0.1
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7234:7234
    volumes:
      - ./config:/etc/temporal/config

  temporal-matching:
    container_name: temporal-matching
    depends_on:
      - temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=matching
      # - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=127.0.0.1
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7235:7235
    volumes:
      - ./config:/etc/temporal/config

  temporal-frontend:
    container_name: temporal-frontend
    depends_on:
      - temporal-matching
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=frontend
      # - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=127.0.0.1
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7233:7233
    volumes:
      - ./config:/etc/temporal/config

  temporal-worker:
    container_name: temporal-worker
    depends_on:
      - temporal-frontend
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=worker
      # - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=127.0.0.1
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7239:7239
    volumes:
      - ./config:/etc/temporal/config
  
  temporal-admin-tools:
    container_name: temporal-admin-tools
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_CLI_ADDRESS=127.0.0.1:7233
    image: temporalio/admin-tools:${TEMPORAL_VERSION}
    networks:
      - temporal-network
    stdin_open: true
    tty: true
 
  temporal-ui:
    container_name: temporal-ui
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_ADDRESS=127.0.0.1:7233
      - TEMPORAL_CORS_ORIGINS=http://localhost:3000
    image: temporalio/ui:${TEMPORAL_UI_VERSION}
    networks:
      - temporal-network
    ports:
      - 8080:8080

  temporal-web:
    container_name: temporal-web
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_GRPC_ENDPOINT=127.0.0.1:7233
      - TEMPORAL_PERMIT_WRITE_API=true
    image: temporalio/web:${TEMPORAL_WEB_VERSION}
    networks:
      - temporal-network
    ports:
      - 8088:8088
networks:
  temporal-network:
    driver: bridge
    name: temporal-network

docker.yaml from worker container. Its same in each container

log:
    stdout: true
    level: info

persistence:
    numHistoryShards: 4
    defaultStore: default
    visibilityStore: visibility
    datastores:
        default:
            sql:
                pluginName: "postgres"
                databaseName: "temporal"
                connectAddr: "host.docker.internal:5432"
                connectProtocol: "tcp"
                user: "postgres"
                password: ""
                maxConns: 20
                maxIdleConns: 20
                maxConnLifetime: 1h
                tls:
                    enabled: false
                    caFile: 
                    certFile: 
                    keyFile: 
                    enableHostVerification: false
                    serverName: 
        visibility:
            sql:
                
                
                
                
                
                
                
                
                pluginName: "postgres"
                databaseName: "temporal_visibility"
                connectAddr: "host.docker.internal:5432"
                connectProtocol: "tcp"
                user: "postgres"
                password: ""
                maxConns: 10
                maxIdleConns: 10
                maxConnLifetime: 1h
                tls:
                    enabled: false
                    caFile: 
                    certFile: 
                    keyFile: 
                    enableHostVerification: false
                    serverName: 

global:
    membership:
        maxJoinDuration: 30s
        broadcastAddress: "127.0.0.1"
    pprof:
        port: 0
    tls:
        refreshInterval: 0s
        expirationChecks:
            warningWindow: 0s
            errorWindow: 0s
            checkInterval: 0s
        internode:
            # This server section configures the TLS certificate that internal temporal
            # cluster nodes (history or matching) present to other clients within the Temporal Cluster.
            server:
                requireClientAuth: false

                certFile: 
                keyFile: 

                certData: 
                keyData: 

            # This client section is used to configure the TLS clients within
            # the Temporal Cluster that connect to an Internode (history or matching)
            client:
                serverName: 
                disableHostVerification: false
        frontend:
            # This server section configures the TLS certificate that the Frontend
            # server presents to all clients (specifically the Worker role within
            # the Temporal Cluster and all External SDKs connecting to the Cluster)
            server:
                requireClientAuth: false
                certFile: 
                keyFile: 

                certData: 
                keyData: 

            # This client section is used to configure the TLS clients within
            # the Temporal Cluster (specifically the Worker role) that connect to the Frontend service
            client:
                serverName: 
                disableHostVerification: false
    authorization:
        jwtKeyProvider:
            keySourceURIs:
            refreshInterval: 1m
        permissionsClaimName: permissions
        authorizer: 
        claimMapper: 
services:
    frontend:
        rpc:
            grpcPort: 7233
            membershipPort: 6933
            # bindOnIP: 172.21.0.5
            bindOnLocalHost: true

    matching:
        rpc:
            grpcPort: 7235
            membershipPort: 6935
            # bindOnIP: 172.21.0.5
            bindOnLocalHost: true

    history:
        rpc:
            grpcPort: 7234
            membershipPort: 6934
            # bindOnIP: 172.21.0.5
            bindOnLocalHost: true

    worker:
        rpc:
            grpcPort: 7239
            membershipPort: 6939
            # bindOnIP: 172.21.0.5
            bindOnLocalHost: true

clusterMetadata:
    enableGlobalNamespace: false
    failoverVersionIncrement: 10
    masterClusterName: "active"
    currentClusterName: "active"
    clusterInformation:
        active:
            enabled: true
            initialFailoverVersion: 1
            rpcName: "frontend"
            rpcAddress: 127.0.0.1:7233

dcRedirectionPolicy:
    policy: "noop"
    toDC: ""

archival:
  history:
    state: "enabled"
    enableRead: true
    provider:
      filestore:
        fileMode: "0666"
        dirMode: "0766"
  visibility:
    state: "enabled"
    enableRead: true
    provider:
      filestore:
        fileMode: "0666"
        dirMode: "0766"

namespaceDefaults:
  archival:
    history:
      state: "disabled"
      URI: "file:///tmp/temporal_archival/development"
    visibility:
      state: "disabled"
      URI: "file:///tmp/temporal_vis_archival/development"

publicClient:
    hostPort: "172.21.0.5:7233"

dynamicConfigClient:
    filepath: "config/dynamicconfig/development_sql.yaml"
    pollInterval: "60s"

I updated above hostPort to use

publicClient:
    hostPort: "127.0.0.1:7233"

But was still getting same error

Still trying to figure out the issue. The docker-compose you pasted uses sometimes
host.docker.internal, temporal:port, and 127.0.0.1. Think most likely it’s a combination of these things thats causing confusion.

Have you seen recommended setup here for using docker temporalio/server image in prod? Maybe that would be a better option.

I don’t think I am using temporal:port in docker-compose. host.docker.internal is only used to connect postgres on host machine.
Then link you mentioned does not have any example of docker-compose by which I can run each service in separate docker container.
Not sure why binding to localhost is not working. I would be happy to use existing (if there is any) docker-compose which can spawn container for each service.

I would be happy to use existing (if there is any) docker-compose which can spawn container for each service.

I am not aware of one that exists, all in docker-compose repo use the auto-setup image.
Will work on creating one that you need and get back to you here.

Thanks, that would be great. As its recommended not to use auto-setup in production. I was using local posgres to try that. That is why, I was working on to create docker-compose production ready

1 Like

Finally, I was able to make it work. Only issue is I had to manually go into each docker container (matching, worker, history, frontend) to get its IP address using ifconfig
eg.

history -> 172.21.0.2
matching -> 172.21.0.3
frontend -> 172.21.0.4
worker -> 172.21.0.5

Initially I had incorrect configuration and I started docker-compose up to see where each service was starting and was able to get those IP.

Now question is, is there a way to get these IP addresses automatically? So that this script works all the time.

Working temporal-docker-compose/docker-compose.yaml

version: "3.5"
services:
  temporal-history:
    container_name: temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=history
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=172.21.0.2
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7234:7234
    volumes:
      - ./config:/etc/temporal/config

  temporal-matching:
    container_name: temporal-matching
    depends_on:
      - temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=matching
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=172.21.0.3
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7235:7235
    volumes:
      - ./config:/etc/temporal/config

  temporal-frontend:
    container_name: temporal-frontend
    depends_on:
      - temporal-matching
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=frontend
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=172.21.0.4
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7233:7233
    volumes:
      - ./config:/etc/temporal/config

  temporal-worker:
    container_name: temporal-worker
    depends_on:
      - temporal-frontend
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=worker
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=172.21.0.5
      - PUBLIC_FRONTEND_ADDRESS=172.21.0.4:7233
    image: temporalio/server:latest
    networks:
      - temporal-network
    ports:
      - 7239:7239
    volumes:
      - ./config:/etc/temporal/config
  
  temporal-admin-tools:
    container_name: temporal-admin-tools
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_CLI_ADDRESS=temporal:7233
    image: temporalio/admin-tools:${TEMPORAL_VERSION}
    networks:
      - temporal-network
    stdin_open: true
    tty: true
 
  temporal-ui:
    container_name: temporal-ui
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_ADDRESS=172.21.0.4:7233
      - TEMPORAL_CORS_ORIGINS=http://localhost:3000
    image: temporalio/ui:${TEMPORAL_UI_VERSION}
    networks:
      - temporal-network
    ports:
      - 8080:8080

  temporal-web:
    container_name: temporal-web
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_GRPC_ENDPOINT=172.21.0.4:7233
      - TEMPORAL_PERMIT_WRITE_API=true
    image: temporalio/web:${TEMPORAL_WEB_VERSION}
    networks:
      - temporal-network
    ports:
      - 8088:8088
networks:
  temporal-network:
    driver: bridge
    name: temporal-network

In same directly I also had temporal-docker-compose/.env file with below details

COMPOSE_PROJECT_NAME=temporal
CASSANDRA_VERSION=3.11.9
ELASTICSEARCH_VERSION=7.16.2
MYSQL_VERSION=8
POSTGRESQL_VERSION=13
TEMPORAL_VERSION=1.16.1
TEMPORAL_WEB_VERSION=1.15.0
TEMPORAL_UI_VERSION=0.10.4

temporal-docker-compose/config/config-template.yaml

log:
    stdout: true
    level: {{ default .Env.LOG_LEVEL "info" }}

persistence:
    numHistoryShards: {{ default .Env.NUM_HISTORY_SHARDS "4" }}
    defaultStore: default
    visibilityStore: visibility
    {{- $es := default .Env.ENABLE_ES "false" | lower -}}
    {{- if eq $es "true" }}
    advancedVisibilityStore: es-visibility
    {{- end }}
    datastores:
        {{- $db := default .Env.DB "cassandra" | lower -}}
        {{- if eq $db "cassandra" }}
        default:
            cassandra:
                hosts: "{{ default .Env.CASSANDRA_SEEDS "" }}"
                keyspace: "{{ default .Env.KEYSPACE "temporal" }}"
                user: "{{ default .Env.CASSANDRA_USER "" }}"
                password: "{{ default .Env.CASSANDRA_PASSWORD "" }}"
                port: {{ default .Env.CASSANDRA_PORT "9042" }}
                maxConns: {{ default .Env.CASSANDRA_MAX_CONNS "20" }}
                tls:
                    enabled: {{ default .Env.CASSANDRA_TLS_ENABLED "false" }}
                    caFile: {{ default .Env.CASSANDRA_CA "" }}
                    certFile: {{ default .Env.CASSANDRA_CERT "" }}
                    keyFile: {{ default .Env.CASSANDRA_CERT_KEY "" }}
                    caData: {{ default .Env.CASSANDRA_CA_DATA "" }}
                    certData: {{ default .Env.CASSANDRA_CERT_DATA "" }}
                    keyData: {{ default .Env.CASSANDRA_CERT_KEY_DATA "" }}
                    enableHostVerification: {{ default .Env.CASSANDRA_HOST_VERIFICATION "false" }}
                    serverName: {{ default .Env.CASSANDRA_HOST_NAME "" }}
        visibility:
            cassandra:
                {{ $visibility_seeds_default := default .Env.CASSANDRA_SEEDS "" }}
                {{ $visibility_seeds := default .Env.VISIBILITY_CASSANDRA_SEEDS $visibility_seeds_default }}
                {{ $visibility_port_default := default .Env.CASSANDRA_PORT "9042" }}
                {{ $visibility_port := default .Env.VISIBILITY_CASSANDRA_PORT $visibility_port_default }}
                {{ $visibility_user_default := default .Env.CASSANDRA_USER "" }}
                {{ $visibility_user := default .Env.VISIBILITY_CASSANDRA_USER $visibility_user_default }}
                {{ $visibility_pwd_default := default .Env.CASSANDRA_PASSWORD "" }}
                {{ $visibility_pwd := default .Env.VISIBILITY_CASSANDRA_PASSWORD $visibility_pwd_default }}
                hosts: "{{ $visibility_seeds }}"
                keyspace: "{{ default .Env.VISIBILITY_KEYSPACE "temporal_visibility" }}"
                user: "{{ $visibility_user }}"
                password: "{{ $visibility_pwd }}"
                port: {{ $visibility_port }}
                maxConns: {{ default .Env.CASSANDRA_MAX_CONNS "10" }}
                tls:
                    enabled: {{ default .Env.CASSANDRA_TLS_ENABLED "false" }}
                    caFile: {{ default .Env.CASSANDRA_CA "" }}
                    certFile: {{ default .Env.CASSANDRA_CERT "" }}
                    keyFile: {{ default .Env.CASSANDRA_CERT_KEY "" }}
                    caData: {{ default .Env.CASSANDRA_CA_DATA "" }}
                    certData: {{ default .Env.CASSANDRA_CERT_DATA "" }}
                    keyData: {{ default .Env.CASSANDRA_CERT_KEY_DATA "" }}
                    enableHostVerification: {{ default .Env.CASSANDRA_HOST_VERIFICATION "false" }}
                    serverName: {{ default .Env.CASSANDRA_HOST_NAME "" }}
        {{- else if eq $db "mysql" }}
        default:
            sql:
                pluginName: "mysql"
                databaseName: "{{ default .Env.DBNAME "temporal" }}"
                connectAddr: "{{ default .Env.MYSQL_SEEDS "" }}:{{ default .Env.DB_PORT "3306" }}"
                connectProtocol: "tcp"
                user: "{{ default .Env.MYSQL_USER "" }}"
                password: "{{ default .Env.MYSQL_PWD "" }}"
                {{- if .Env.MYSQL_TX_ISOLATION_COMPAT }}
                connectAttributes:
                    tx_isolation: "'READ-COMMITTED'"
                {{- end }}
                maxConns: {{ default .Env.SQL_MAX_CONNS "20" }}
                maxIdleConns: {{ default .Env.SQL_MAX_IDLE_CONNS "20" }}
                maxConnLifetime: {{ default .Env.SQL_MAX_CONN_TIME "1h" }}
                tls:
                    enabled: {{ default .Env.SQL_TLS_ENABLED "false" }}
                    caFile: {{ default .Env.SQL_CA "" }}
                    certFile: {{ default .Env.SQL_CERT "" }}
                    keyFile: {{ default .Env.SQL_CERT_KEY "" }}
                    enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }}
                    serverName: {{ default .Env.SQL_HOST_NAME "" }}
        visibility:
            sql:                
                {{ $visibility_seeds_default := default .Env.MYSQL_SEEDS "" }}
                {{ $visibility_seeds := default .Env.VISIBILITY_MYSQL_SEEDS $visibility_seeds_default }}
                {{ $visibility_port_default := default .Env.DB_PORT "3306" }}
                {{ $visibility_port := default .Env.VISIBILITY_DB_PORT $visibility_port_default }}
                {{ $visibility_user_default := default .Env.MYSQL_USER "" }}
                {{ $visibility_user := default .Env.VISIBILITY_MYSQL_USER $visibility_user_default }}
                {{ $visibility_pwd_default := default .Env.MYSQL_PWD "" }}
                {{ $visibility_pwd := default .Env.VISIBILITY_MYSQL_PWD $visibility_pwd_default }}
                pluginName: "mysql"
                databaseName: "{{ default .Env.VISIBILITY_DBNAME "temporal_visibility" }}"
                connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}"
                connectProtocol: "tcp"
                user: "{{ $visibility_user }}"
                password: "{{ $visibility_pwd }}"
                {{- if .Env.MYSQL_TX_ISOLATION_COMPAT }}
                connectAttributes:
                    tx_isolation: "'READ-COMMITTED'"
                {{- end }}
                maxConns: {{ default .Env.SQL_VIS_MAX_CONNS "10" }}
                maxIdleConns: {{ default .Env.SQL_VIS_MAX_IDLE_CONNS "10" }}
                maxConnLifetime: {{ default .Env.SQL_VIS_MAX_CONN_TIME "1h" }}
                tls:
                    enabled: {{ default .Env.SQL_TLS_ENABLED "false" }}
                    caFile: {{ default .Env.SQL_CA "" }}
                    certFile: {{ default .Env.SQL_CERT "" }}
                    keyFile: {{ default .Env.SQL_CERT_KEY "" }}
                    enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }}
                    serverName: {{ default .Env.SQL_HOST_NAME "" }}
        {{- else if eq $db "postgresql" }}
        default:
            sql:
                pluginName: "postgres"
                databaseName: "{{ default .Env.DBNAME "temporal" }}"
                connectAddr: "{{ default .Env.POSTGRES_SEEDS "" }}:{{ default .Env.DB_PORT "5432" }}"
                connectProtocol: "tcp"
                user: "{{ default .Env.POSTGRES_USER "" }}"
                password: "{{ default .Env.POSTGRES_PWD "" }}"
                maxConns: {{ default .Env.SQL_MAX_CONNS "20" }}
                maxIdleConns: {{ default .Env.SQL_MAX_IDLE_CONNS "20" }}
                maxConnLifetime: {{ default .Env.SQL_MAX_CONN_TIME "1h" }}
                tls:
                    enabled: {{ default .Env.SQL_TLS_ENABLED "false" }}
                    caFile: {{ default .Env.SQL_CA "" }}
                    certFile: {{ default .Env.SQL_CERT "" }}
                    keyFile: {{ default .Env.SQL_CERT_KEY "" }}
                    enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }}
                    serverName: {{ default .Env.SQL_HOST_NAME "" }}
        visibility:
            sql:
                {{ $visibility_seeds_default := default .Env.POSTGRES_SEEDS "" }}
                {{ $visibility_seeds := default .Env.VISIBILITY_POSTGRES_SEEDS $visibility_seeds_default }}
                {{ $visibility_port_default := default .Env.DB_PORT "5432" }}
                {{ $visibility_port := default .Env.VISIBILITY_DB_PORT $visibility_port_default }}
                {{ $visibility_user_default := default .Env.POSTGRES_USER "" }}
                {{ $visibility_user := default .Env.VISIBILITY_POSTGRES_USER $visibility_user_default }}
                {{ $visibility_pwd_default := default .Env.POSTGRES_PWD "" }}
                {{ $visibility_pwd := default .Env.VISIBILITY_POSTGRES_PWD $visibility_pwd_default }}
                pluginName: "postgres"
                databaseName: "{{ default .Env.VISIBILITY_DBNAME "temporal_visibility" }}"
                connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}"
                connectProtocol: "tcp"
                user: "{{ $visibility_user }}"
                password: "{{ $visibility_pwd }}"
                maxConns: {{ default .Env.SQL_VIS_MAX_CONNS "10" }}
                maxIdleConns: {{ default .Env.SQL_VIS_MAX_IDLE_CONNS "10" }}
                maxConnLifetime: {{ default .Env.SQL_VIS_MAX_CONN_TIME "1h" }}
                tls:
                    enabled: {{ default .Env.SQL_TLS_ENABLED "false" }}
                    caFile: {{ default .Env.SQL_CA "" }}
                    certFile: {{ default .Env.SQL_CERT "" }}
                    keyFile: {{ default .Env.SQL_CERT_KEY "" }}
                    enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }}
                    serverName: {{ default .Env.SQL_HOST_NAME "" }}
        {{- end }}
        {{- if eq $es "true" }}
        es-visibility:
            elasticsearch:
                version: {{ default .Env.ES_VERSION "" }}
                url:
                    scheme: {{ default .Env.ES_SCHEME "http" }}
                    host: "{{ default .Env.ES_SEEDS "" }}:{{ default .Env.ES_PORT "9200" }}"
                username: "{{ default .Env.ES_USER "" }}"
                password: "{{ default .Env.ES_PWD "" }}"
                indices:
                    visibility: "{{ default .Env.ES_VIS_INDEX "temporal_visibility_v1_dev" }}"
        {{- end }}

global:
    membership:
        maxJoinDuration: 30s
        broadcastAddress: "{{ default .Env.TEMPORAL_BROADCAST_ADDRESS "" }}"
    pprof:
        port: {{ default .Env.PPROF_PORT "0" }}
    tls:
        refreshInterval: {{ default .Env.TEMPORAL_TLS_REFRESH_INTERVAL "0s" }}
        expirationChecks:
            warningWindow: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_WARNING_WINDOW "0s" }}
            errorWindow: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_ERROR_WINDOW "0s" }}
            checkInterval: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_CHECK_INTERVAL "0s" }}
        internode:
            # This server section configures the TLS certificate that internal temporal
            # cluster nodes (history or matching) present to other clients within the Temporal Cluster.
            server:
                requireClientAuth: {{ default .Env.TEMPORAL_TLS_REQUIRE_CLIENT_AUTH "false" }}

                certFile: {{ default .Env.TEMPORAL_TLS_SERVER_CERT "" }}
                keyFile: {{ default .Env.TEMPORAL_TLS_SERVER_KEY "" }}
                {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }}
                clientCaFiles:
                    - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }}
                {{- end }}

                certData: {{ default .Env.TEMPORAL_TLS_SERVER_CERT_DATA "" }}
                keyData: {{ default .Env.TEMPORAL_TLS_SERVER_KEY_DATA "" }}
                {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }}
                clientCaData:
                    - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }}
                {{- end }}

            # This client section is used to configure the TLS clients within
            # the Temporal Cluster that connect to an Internode (history or matching)
            client:
                serverName: {{ default .Env.TEMPORAL_TLS_INTERNODE_SERVER_NAME "" }}
                disableHostVerification: {{ default .Env.TEMPORAL_TLS_INTERNODE_DISABLE_HOST_VERIFICATION "false"}}
                {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }}
                rootCaFiles:
                    - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }}
                {{- end }}
                {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }}
                rootCaData:
                    - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }}
                {{- end }}
        frontend:
            # This server section configures the TLS certificate that the Frontend
            # server presents to all clients (specifically the Worker role within
            # the Temporal Cluster and all External SDKs connecting to the Cluster)
            server:
                requireClientAuth: {{ default .Env.TEMPORAL_TLS_REQUIRE_CLIENT_AUTH "false" }}
                certFile: {{ default .Env.TEMPORAL_TLS_FRONTEND_CERT "" }}
                keyFile: {{ default .Env.TEMPORAL_TLS_FRONTEND_KEY "" }}
                {{- if .Env.TEMPORAL_TLS_CLIENT1_CA_CERT }}
                clientCaFiles:
                    - {{ default .Env.TEMPORAL_TLS_CLIENT1_CA_CERT "" }}
                    - {{ default .Env.TEMPORAL_TLS_CLIENT2_CA_CERT "" }}
                {{- end }}

                certData: {{ default .Env.TEMPORAL_TLS_FRONTEND_CERT_DATA "" }}
                keyData: {{ default .Env.TEMPORAL_TLS_FRONTEND_KEY_DATA "" }}
                {{- if .Env.TEMPORAL_TLS_CLIENT1_CA_CERT_DATA }}
                clientCaData:
                    - {{ default .Env.TEMPORAL_TLS_CLIENT1_CA_CERT_DATA "" }}
                    - {{ default .Env.TEMPORAL_TLS_CLIENT2_CA_CERT_DATA "" }}
                {{- end }}

            # This client section is used to configure the TLS clients within
            # the Temporal Cluster (specifically the Worker role) that connect to the Frontend service
            client:
                serverName: {{ default .Env.TEMPORAL_TLS_FRONTEND_SERVER_NAME "" }}
                disableHostVerification: {{ default .Env.TEMPORAL_TLS_FRONTEND_DISABLE_HOST_VERIFICATION "false"}}
                {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }}
                rootCaFiles:
                    - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }}
                {{- end }}
                {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }}
                rootCaData:
                    - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }}
                {{- end }}
    {{- if .Env.STATSD_ENDPOINT }}
    metrics:
        statsd:
            hostPort: {{ .Env.STATSD_ENDPOINT }}
            prefix: "temporal"
    {{- else if .Env.PROMETHEUS_ENDPOINT }}
    metrics:
        prometheus:
            timerType: {{ default .Env.PROMETHEUS_TIMER_TYPE "histogram" }}
            listenAddress: "{{ .Env.PROMETHEUS_ENDPOINT }}"
    {{- end }}
    authorization:
        jwtKeyProvider:
            keySourceURIs:
                {{- if .Env.TEMPORAL_JWT_KEY_SOURCE1 }}
                - {{ default .Env.TEMPORAL_JWT_KEY_SOURCE1 "" }}
                {{- end }}
                {{- if .Env.TEMPORAL_JWT_KEY_SOURCE2 }}
                - {{ default .Env.TEMPORAL_JWT_KEY_SOURCE2 "" }}
                {{- end }}
            refreshInterval: {{ default .Env.TEMPORAL_JWT_KEY_REFRESH "1m" }}
        permissionsClaimName: {{ default .Env.TEMPORAL_JWT_PERMISSIONS_CLAIM "permissions" }}
        authorizer: {{ default .Env.TEMPORAL_AUTH_AUTHORIZER "" }}
        claimMapper: {{ default .Env.TEMPORAL_AUTH_CLAIM_MAPPER "" }}

{{- $temporalGrpcPort := default .Env.FRONTEND_GRPC_PORT "7233" }}
services:
    frontend:
        rpc:
            grpcPort: {{ $temporalGrpcPort }}
            membershipPort: {{ default .Env.FRONTEND_MEMBERSHIP_PORT "6933" }}
            bindOnIP: {{ default .Env.BIND_ON_IP "127.0.0.1" }}

    matching:
        rpc:
            grpcPort: {{ default .Env.MATCHING_GRPC_PORT "7235" }}
            membershipPort: {{ default .Env.MATCHING_MEMBERSHIP_PORT "6935" }}
            bindOnIP: {{ default .Env.BIND_ON_IP "127.0.0.1" }}

    history:
        rpc:
            grpcPort: {{ default .Env.HISTORY_GRPC_PORT "7234" }}
            membershipPort: {{ default .Env.HISTORY_MEMBERSHIP_PORT "6934" }}
            bindOnIP: {{ default .Env.BIND_ON_IP "127.0.0.1" }}

    worker:
        rpc:
            grpcPort: {{ default .Env.WORKER_GRPC_PORT "7239" }}
            membershipPort: {{ default .Env.WORKER_MEMBERSHIP_PORT "6939" }}
            bindOnIP: {{ default .Env.BIND_ON_IP "127.0.0.1" }}

clusterMetadata:
    enableGlobalNamespace: false
    failoverVersionIncrement: 10
    masterClusterName: "active"
    currentClusterName: "active"
    clusterInformation:
        active:
            enabled: true
            initialFailoverVersion: 1
            rpcName: "frontend"
            rpcAddress: {{ (print "127.0.0.1:" $temporalGrpcPort) }}

dcRedirectionPolicy:
    policy: "noop"
    toDC: ""

archival:
  history:
    state: "enabled"
    enableRead: true
    provider:
      filestore:
        fileMode: "0666"
        dirMode: "0766"
  visibility:
    state: "enabled"
    enableRead: true
    provider:
      filestore:
        fileMode: "0666"
        dirMode: "0766"

namespaceDefaults:
  archival:
    history:
      state: "disabled"
      URI: "file:///tmp/temporal_archival/development"
    visibility:
      state: "disabled"
      URI: "file:///tmp/temporal_vis_archival/development"

{{ $publicIp := default .Env.BIND_ON_IP "127.0.0.1" -}}
{{- $defaultPublicHostPost := (print $publicIp ":" $temporalGrpcPort) -}}
publicClient:
    hostPort: "{{ default .Env.PUBLIC_FRONTEND_ADDRESS $defaultPublicHostPost }}"

dynamicConfigClient:
    filepath: "{{ default .Env.DYNAMIC_CONFIG_FILE_PATH "/etc/temporal/config/dynamicconfig" }}"
    pollInterval: "60s"

temporal-docker-compose/config/dynamicconfig/development_sql.yaml

frontend.enableClientVersionCheck:
- value: true
  constraints: {}
history.persistenceMaxQPS:
- value: 3000
  constraints: {}
frontend.persistenceMaxQPS:
- value: 3000
  constraints: {}
frontend.historyMgrNumConns:
- value: 10
  constraints: {}
frontend.throttledLogRPS:
- value: 20
  constraints: {}
history.historyMgrNumConns:
- value: 50
  constraints: {}
history.defaultActivityRetryPolicy:
- value:
    InitialIntervalInSeconds: 1
    MaximumIntervalCoefficient: 100.0
    BackoffCoefficient: 2.0
    MaximumAttempts: 0
history.defaultWorkflowRetryPolicy:
- value:
    InitialIntervalInSeconds: 1
    MaximumIntervalCoefficient: 100.0
    BackoffCoefficient: 2.0
    MaximumAttempts: 0
system.advancedVisibilityWritingMode:
  - value: "off"
    constraints: {}
limit.maxIDLength:
  - value: 255
    constraints: {}
system.forceSearchAttributesCacheRefreshOnRead:
  - value: true # Dev setup only. Please don't turn this on in production.
    constraints: {}

Regarding getting ip addresses for container. I am wondering if we can get set static ip for these containers in docker compose. Will that be a good practice for production deployment ?

Below docker compose works for me and it has static IP , so don’t need to look it from container.
No changes to other files
docker-compose.yaml with static ip address

version: "3.5"
services:
  temporal-history:
    container_name: temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=history
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=10.1.0.2
    image: temporalio/server:latest
    networks:
      temporal-network:
        ipv4_address: 10.1.0.2
    ports:
      - 7234:7234
    volumes:
      - ./config:/etc/temporal/config

  temporal-matching:
    container_name: temporal-matching
    depends_on:
      - temporal-history
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=matching
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=10.1.0.3
    image: temporalio/server:latest
    networks:
      temporal-network:
        ipv4_address: 10.1.0.3
    ports:
      - 7235:7235
    volumes:
      - ./config:/etc/temporal/config

  temporal-frontend:
    container_name: temporal-frontend
    depends_on:
      - temporal-matching
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=frontend
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=10.1.0.4
    image: temporalio/server:latest
    networks:
      temporal-network:
        ipv4_address: 10.1.0.4
    ports:
      - 7233:7233
    volumes:
      - ./config:/etc/temporal/config

  temporal-worker:
    container_name: temporal-worker
    depends_on:
      - temporal-frontend
    environment:
      - DB=postgresql
      - DB_PORT=5432
      - POSTGRES_USER=postgres
      # - POSTGRES_PWD=temporal
      - POSTGRES_SEEDS=host.docker.internal
      - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development_sql.yaml
      - SERVICES=worker
      - BIND_ON_IP=0.0.0.0
      - TEMPORAL_BROADCAST_ADDRESS=10.1.0.5
      - PUBLIC_FRONTEND_ADDRESS=10.1.0.4:7233
    image: temporalio/server:latest
    networks:
      temporal-network:
        ipv4_address: 10.1.0.5
    ports:
      - 7239:7239
    volumes:
      - ./config:/etc/temporal/config
  
  temporal-admin-tools:
    container_name: temporal-admin-tools
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_CLI_ADDRESS=10.1.0.4:7233
    image: temporalio/admin-tools:${TEMPORAL_VERSION}
    networks:
      - temporal-network
    stdin_open: true
    tty: true
 
  temporal-ui:
    container_name: temporal-ui
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_ADDRESS=10.1.0.4:7233
      - TEMPORAL_CORS_ORIGINS=http://localhost:3000
    image: temporalio/ui:${TEMPORAL_UI_VERSION}
    networks:
      - temporal-network
    ports:
      - 8080:8080

  temporal-web:
    container_name: temporal-web
    depends_on:
      - temporal-worker
    environment:
      - TEMPORAL_GRPC_ENDPOINT=10.1.0.4:7233
      - TEMPORAL_PERMIT_WRITE_API=true
    image: temporalio/web:${TEMPORAL_WEB_VERSION}
    networks:
      - temporal-network
    ports:
      - 8088:8088
networks:
  temporal-network:
    driver: bridge
    ipam:
      driver: default
      config:
        - subnet: 10.1.0.0/24

Yes I think currently you will have to know the container IP in production env where you cannot use 127.0.0.1, or use static IP via ipv4_address in docker-compose as you did.

Just to add as reference, this github repo contains some examples of how to deploy each Temporal service role in independent container via docker compose, as well as a single node docker swarm deployment. Hope it helps.

1 Like

hey, can i know steps to recreate this setup, i dont want to use auto-setup.
i usedthis repo provided but getting a lot of errors.

Whats the errors you are getting? The readme was missing loki plugin install and users reported issue running in windows which am looking into.