Skip to content

Conversation

@dotcs
Copy link

@dotcs dotcs commented Jan 27, 2020

This PR adds a separate dockerfile with multiple data-nodes configured.

Start the cluster with

docker-compose --file docker-compose-3dn.yml up

It then lists three data-nodes.

image

@dotcs
Copy link
Author

dotcs commented Jan 27, 2020

This PR addresses #40.

@mesosinfo
Copy link

Hello, As your multiple data-nodes yml file, but ResourceManager container be killed after load docker compose.
Please check it.

@forresty
Copy link

Hello, As your multiple data-nodes yml file, but ResourceManager container be killed after load docker compose.
Please check it.

@mesosinfo probably you need to prune your existing volumes (maybe namenode is enough)?

@webtaken
Copy link

Hello, As your multiple data-nodes yml file, but ResourceManager container be killed after load docker compose. Please check it.

Same issue here

@webtaken
Copy link

Hello, As your multiple data-nodes yml file, but ResourceManager container be killed after load docker compose.
Please check it.

@mesosinfo probably you need to prune your existing volumes (maybe namenode is enough)?

How can I do that, can you share some commands?

@webtaken
Copy link

Hello, As your multiple data-nodes yml file, but ResourceManager container be killed after load docker compose. Please check it.

Hey I changes a little bit the docker-compose-3dn.yml file to this, and it works hope it was useful:

version: "3"

services:
  namenode:
    image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
    container_name: namenode
    restart: always
    ports:
      - 9870:9870
      - 9000:9000
    volumes:
      - hadoop_namenode:/hadoop/dfs/name
    environment:
      - CLUSTER_NAME=test
    env_file:
      - ./hadoop.env

  datanode1:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    container_name: datanode1
    restart: always
    volumes:
      - hadoop_datanode1:/hadoop/dfs/data
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870"
    env_file:
      - ./hadoop.env
    depends_on:
      - namenode

  datanode2:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    container_name: datanode2
    restart: always
    volumes:
      - hadoop_datanode2:/hadoop/dfs/data
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870"
    env_file:
      - ./hadoop.env
    depends_on:
      - namenode

  datanode3:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    container_name: datanode3
    restart: always
    volumes:
      - hadoop_datanode3:/hadoop/dfs/data
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870"
    env_file:
      - ./hadoop.env
    depends_on:
      - namenode

  resourcemanager:
    image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
    container_name: resourcemanager
    # command: hdfs dfsadmin -safemode leave && yarn resourcemanager
    restart: always
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode1:9864 datanode2:9864 datanode3:9864"
    env_file:
      - ./hadoop.env
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3

  nodemanager1:
    image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
    container_name: nodemanager
    restart: always
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode1:9864 datanode2:9864 datanode3:9864 resourcemanager:8088"
    env_file:
      - ./hadoop.env
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3

  historyserver:
    image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
    container_name: historyserver
    restart: always
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode1:9864 datanode2:9864 datanode3:9864 resourcemanager:8088"
    volumes:
      - hadoop_historyserver:/hadoop/yarn/timeline
    env_file:
      - ./hadoop.env
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3

volumes:
  hadoop_namenode:
  hadoop_datanode1:
  hadoop_datanode2:
  hadoop_datanode3:
  hadoop_historyserver:

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

4 participants