docker-hadoop resource manager not launching

  docker, docker-compose, hadoop, java

I am trying to run Hadoop with docker-compose. I found this https://github.com/big-data-europe/docker-hadoop, which I changed a bit to fit my purposes. It mostly works but ResourceManager doesn’t open. I am leaving down here both the code for the Hadoop services in docker-compose and the error message I get in the logs of ResourceManager. Unfortunately, MapReduce doesn’t work and I can’t continue my work.

version: '3.9'

services:
   namenode:
       image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
       container_name: namenode
       domainname: namenode
       hostname: namenode
       ports:
           - 9870:9870
           - 9001:9000
       volumes:
           - hadoop_namenode:/hadoop/dfs/name
       environment:
           - CLUSTER_NAME=twitter-data
       env_file:
           - ./hadoop.env

   datanode:
       image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
       container_name: datanode
       hostname: localhost
       ports: 
           - 9864:9864
       volumes:
       - hadoop_datanode:/hadoop/dfs/data
       environment:
           SERVICE_PRECONDITION: "namenode:9870"
       env_file:
       - ./hadoop.env
   
   resourcemanager:
       image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
       container_name: resourcemanager
       ports:
           - 8088:8088
       environment:
           SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864"
       env_file:
       - ./hadoop.env

   nodemanager:
       image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
       container_name: nodemanager
       environment:
           SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 resourcemanager:8088"
       env_file:
           - ./hadoop.env
   
   historyserver:
       image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
       container_name: historyserver
       environment:
           SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 resourcemanager:8088"
       volumes:
           - hadoop_historyserver:/hadoop/yarn/timeline
       env_file:
           - ./hadoop.env
...

volumes:
   hadoop_namenode:
   hadoop_datanode:
   hadoop_historyserver:
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422),
    at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165),
    at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157),
    at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95),
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359),
    at com.sun.proxy.$Proxy87.addBlock(Unknown Source),
    at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1081),
    at org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1866),
    at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1668),
    at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716),
2021-08-15 09:45:15,425 ERROR delegation.AbstractDelegationTokenSecretManager: ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted,
2021-08-15 09:45:15,428 INFO handler.ContextHandler: Stopped [email protected]{/,null,UNAVAILABLE}{/cluster},
2021-08-15 09:45:15,432 INFO server.AbstractConnector: Stopped [email protected]{HTTP/1.1,[http/1.1]}{0.0.0.0:8088},
2021-08-15 09:45:15,433 INFO handler.ContextHandler: Stopped [email protected]{/static,jar:file:/opt/hadoop-3.2.1/share/hadoop/yarn/hadoop-yarn-common-3.2.1.jar!/webapps/static,UNAVAILABLE},
2021-08-15 09:45:15,433 INFO handler.ContextHandler: Stopped [email protected]{/logs,file:///opt/hadoop-3.2.1/logs/,UNAVAILABLE},
2021-08-15 09:45:15,536 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,536 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,536 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,536 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,537 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,537 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,537 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,537 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,537 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,538 INFO event.AsyncDispatcher: AsyncDispatcher is draining to stop, ignoring any new events.,
2021-08-15 09:45:15,538 INFO ipc.Server: Stopping server on 8033,
2021-08-15 09:45:15,538 INFO ipc.Server: Stopping IPC Server listener on 8033,
2021-08-15 09:45:15,538 INFO ipc.Server: Stopping IPC Server Responder,
2021-08-15 09:45:15,538 INFO resourcemanager.ResourceManager: Transitioning to standby state,
2021-08-15 09:45:15,538 INFO resourcemanager.ResourceManager: Transitioned to standby state,
2021-08-15 09:45:15,538 FATAL resourcemanager.ResourceManager: Error starting ResourceManager,
org.apache.hadoop.service.ServiceStateException: org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /rmstate/FSRMStateRoot/RMVersionNode.tmp could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.,
    at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2219),
    at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:294),
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2789),
    at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:892),
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:574),
    at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java),
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:528),
    at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1070),
    at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:999),
    at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:927),
    at java.security.AccessController.doPrivileged(Native Method),
    at javax.security.auth.Subject.doAs(Subject.java:422),
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730),
    at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2915),
,
    at org.apache.hadoop.service.ServiceStateException.convert(ServiceStateException.java:105),
    at org.apache.hadoop.service.AbstractService.start(AbstractService.java:203),
    at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.startActiveServices(ResourceManager.java:1262),
    at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$1.run(ResourceManager.java:1303),
    at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$1.run(ResourceManager.java:1299),
    at java.security.AccessController.doPrivileged(Native Method),
    at javax.security.auth.Subject.doAs(Subject.java:422),
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730),
    at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.transitionToActive(ResourceManager.java:1299),
    at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceStart(ResourceManager.java:1350),
    at org.apache.hadoop.service.AbstractService.start(AbstractService.java:194),
    at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.main(ResourceManager.java:1535),
Caused by: org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /rmstate/FSRMStateRoot/RMVersionNode.tmp could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.,
    at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2219),
    at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:294),
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2789),
    at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:892),
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:574),
    at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java),
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:528),
    at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1070),
    at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:999),
    at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:927),
    at java.security.AccessController.doPrivileged(Native Method),
    at javax.security.auth.Subject.doAs(Subject.java:422),
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730),
    at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2915),
,
    at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1545),
    at org.apache.hadoop.ipc.Client.call(Client.java:1491),
    at org.apache.hadoop.ipc.Client.call(Client.java:1388),
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:233),
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:118),
    at com.sun.proxy.$Proxy86.addBlock(Unknown Source),
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:517),
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method),
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62),
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43),
    at java.lang.reflect.Method.invoke(Method.java:498),
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422),
    at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165),
    at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157),
    at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95),
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359),
    at com.sun.proxy.$Proxy87.addBlock(Unknown Source),
    at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1081),
    at org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1866),
    at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1668),
    at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716),
2021-08-15 09:45:15,541 INFO resourcemanager.ResourceManager: SHUTDOWN_MSG: ,
/************************************************************,
SHUTDOWN_MSG: Shutting down ResourceManager at 5c9e23d4e7cb/172.22.0.5,
************************************************************/

Source: Docker Questions

LEAVE A COMMENT