2020-04-24 23:36:52 +00:00
|
|
|
{ config, lib, pkgs, ...}:
|
2021-10-28 06:52:43 +00:00
|
|
|
with lib;
|
2020-04-24 23:36:52 +00:00
|
|
|
let
|
|
|
|
cfg = config.services.hadoop;
|
2021-10-28 06:52:43 +00:00
|
|
|
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
|
|
|
restartIfChanged = mkOption {
|
|
|
|
type = types.bool;
|
|
|
|
description = ''
|
|
|
|
Automatically restart the service on config change.
|
|
|
|
This can be set to false to defer restarts on clusters running critical applications.
|
|
|
|
Please consider the security implications of inadvertently running an older version,
|
|
|
|
and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
|
|
|
|
'';
|
|
|
|
default = false;
|
|
|
|
};
|
2020-04-24 23:36:52 +00:00
|
|
|
in
|
|
|
|
{
|
|
|
|
options.services.hadoop.hdfs = {
|
2021-10-28 06:52:43 +00:00
|
|
|
namenode = {
|
2021-12-06 16:07:01 +00:00
|
|
|
enable = mkEnableOption "Whether to run the HDFS NameNode";
|
|
|
|
formatOnInit = mkOption {
|
2021-10-28 06:52:43 +00:00
|
|
|
type = types.bool;
|
|
|
|
default = false;
|
|
|
|
description = ''
|
2021-12-06 16:07:01 +00:00
|
|
|
Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.
|
|
|
|
For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)
|
|
|
|
to initialize an HA cluster manually.
|
2021-10-28 06:52:43 +00:00
|
|
|
'';
|
|
|
|
};
|
|
|
|
inherit restartIfChanged;
|
|
|
|
openFirewall = mkOption {
|
|
|
|
type = types.bool;
|
|
|
|
default = true;
|
|
|
|
description = ''
|
|
|
|
Open firewall ports for namenode
|
|
|
|
'';
|
|
|
|
};
|
2020-04-24 23:36:52 +00:00
|
|
|
};
|
2021-10-28 06:52:43 +00:00
|
|
|
datanode = {
|
2021-12-06 16:07:01 +00:00
|
|
|
enable = mkEnableOption "Whether to run the HDFS DataNode";
|
|
|
|
inherit restartIfChanged;
|
|
|
|
openFirewall = mkOption {
|
2021-10-28 06:52:43 +00:00
|
|
|
type = types.bool;
|
2021-12-06 16:07:01 +00:00
|
|
|
default = true;
|
2021-10-28 06:52:43 +00:00
|
|
|
description = ''
|
2021-12-06 16:07:01 +00:00
|
|
|
Open firewall ports for datanode
|
2021-10-28 06:52:43 +00:00
|
|
|
'';
|
|
|
|
};
|
2021-12-06 16:07:01 +00:00
|
|
|
};
|
|
|
|
journalnode = {
|
|
|
|
enable = mkEnableOption "Whether to run the HDFS JournalNode";
|
2021-10-28 06:52:43 +00:00
|
|
|
inherit restartIfChanged;
|
|
|
|
openFirewall = mkOption {
|
|
|
|
type = types.bool;
|
|
|
|
default = true;
|
|
|
|
description = ''
|
2021-12-06 16:07:01 +00:00
|
|
|
Open firewall ports for journalnode
|
|
|
|
'';
|
|
|
|
};
|
|
|
|
};
|
|
|
|
zkfc = {
|
|
|
|
enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller";
|
|
|
|
inherit restartIfChanged;
|
|
|
|
};
|
|
|
|
httpfs = {
|
|
|
|
enable = mkEnableOption "Whether to run the HDFS HTTPfs server";
|
|
|
|
tempPath = mkOption {
|
|
|
|
type = types.path;
|
|
|
|
default = "/tmp/hadoop/httpfs";
|
|
|
|
description = ''
|
|
|
|
HTTPFS_TEMP path used by HTTPFS
|
|
|
|
'';
|
|
|
|
};
|
|
|
|
inherit restartIfChanged;
|
|
|
|
openFirewall = mkOption {
|
|
|
|
type = types.bool;
|
|
|
|
default = true;
|
|
|
|
description = ''
|
|
|
|
Open firewall ports for HTTPFS
|
2021-10-28 06:52:43 +00:00
|
|
|
'';
|
|
|
|
};
|
2020-04-24 23:36:52 +00:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
config = mkMerge [
|
2021-12-06 16:07:01 +00:00
|
|
|
(mkIf cfg.hdfs.namenode.enable {
|
2020-04-24 23:36:52 +00:00
|
|
|
systemd.services.hdfs-namenode = {
|
|
|
|
description = "Hadoop HDFS NameNode";
|
|
|
|
wantedBy = [ "multi-user.target" ];
|
2021-10-28 06:52:43 +00:00
|
|
|
inherit (cfg.hdfs.namenode) restartIfChanged;
|
2020-04-24 23:36:52 +00:00
|
|
|
|
2021-12-06 16:07:01 +00:00
|
|
|
preStart = (mkIf cfg.hdfs.namenode.formatOnInit ''
|
2020-04-24 23:36:52 +00:00
|
|
|
${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
|
2021-12-06 16:07:01 +00:00
|
|
|
'');
|
2020-04-24 23:36:52 +00:00
|
|
|
|
|
|
|
serviceConfig = {
|
|
|
|
User = "hdfs";
|
|
|
|
SyslogIdentifier = "hdfs-namenode";
|
|
|
|
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode";
|
2021-10-28 06:52:43 +00:00
|
|
|
Restart = "always";
|
2020-04-24 23:36:52 +00:00
|
|
|
};
|
|
|
|
};
|
2021-10-28 06:52:43 +00:00
|
|
|
|
|
|
|
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [
|
|
|
|
9870 # namenode.http-address
|
|
|
|
8020 # namenode.rpc-address
|
2021-12-06 16:07:01 +00:00
|
|
|
8022 # namenode. servicerpc-address
|
2021-10-28 06:52:43 +00:00
|
|
|
]);
|
2020-04-24 23:36:52 +00:00
|
|
|
})
|
2021-12-06 16:07:01 +00:00
|
|
|
(mkIf cfg.hdfs.datanode.enable {
|
2020-04-24 23:36:52 +00:00
|
|
|
systemd.services.hdfs-datanode = {
|
|
|
|
description = "Hadoop HDFS DataNode";
|
|
|
|
wantedBy = [ "multi-user.target" ];
|
2021-10-28 06:52:43 +00:00
|
|
|
inherit (cfg.hdfs.datanode) restartIfChanged;
|
2020-04-24 23:36:52 +00:00
|
|
|
|
|
|
|
serviceConfig = {
|
|
|
|
User = "hdfs";
|
|
|
|
SyslogIdentifier = "hdfs-datanode";
|
|
|
|
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} datanode";
|
2021-10-28 06:52:43 +00:00
|
|
|
Restart = "always";
|
2020-04-24 23:36:52 +00:00
|
|
|
};
|
|
|
|
};
|
2021-10-28 06:52:43 +00:00
|
|
|
|
|
|
|
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.datanode.openFirewall [
|
|
|
|
9864 # datanode.http.address
|
|
|
|
9866 # datanode.address
|
|
|
|
9867 # datanode.ipc.address
|
|
|
|
]);
|
2020-04-24 23:36:52 +00:00
|
|
|
})
|
2021-12-06 16:07:01 +00:00
|
|
|
(mkIf cfg.hdfs.journalnode.enable {
|
|
|
|
systemd.services.hdfs-journalnode = {
|
|
|
|
description = "Hadoop HDFS JournalNode";
|
|
|
|
wantedBy = [ "multi-user.target" ];
|
|
|
|
inherit (cfg.hdfs.journalnode) restartIfChanged;
|
|
|
|
|
|
|
|
serviceConfig = {
|
|
|
|
User = "hdfs";
|
|
|
|
SyslogIdentifier = "hdfs-journalnode";
|
|
|
|
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode";
|
|
|
|
Restart = "always";
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [
|
|
|
|
8480 # dfs.journalnode.http-address
|
|
|
|
8485 # dfs.journalnode.rpc-address
|
|
|
|
]);
|
|
|
|
})
|
|
|
|
(mkIf cfg.hdfs.zkfc.enable {
|
|
|
|
systemd.services.hdfs-zkfc = {
|
|
|
|
description = "Hadoop HDFS ZooKeeper failover controller";
|
|
|
|
wantedBy = [ "multi-user.target" ];
|
|
|
|
inherit (cfg.hdfs.zkfc) restartIfChanged;
|
|
|
|
|
|
|
|
serviceConfig = {
|
|
|
|
User = "hdfs";
|
|
|
|
SyslogIdentifier = "hdfs-zkfc";
|
|
|
|
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc";
|
|
|
|
Restart = "always";
|
|
|
|
};
|
|
|
|
};
|
|
|
|
})
|
|
|
|
(mkIf cfg.hdfs.httpfs.enable {
|
|
|
|
systemd.services.hdfs-httpfs = {
|
|
|
|
description = "Hadoop httpfs";
|
|
|
|
wantedBy = [ "multi-user.target" ];
|
|
|
|
inherit (cfg.hdfs.httpfs) restartIfChanged;
|
|
|
|
|
|
|
|
environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
|
|
|
|
|
|
|
|
preStart = ''
|
|
|
|
mkdir -p $HTTPFS_TEMP
|
|
|
|
'';
|
|
|
|
|
|
|
|
serviceConfig = {
|
|
|
|
User = "httpfs";
|
|
|
|
SyslogIdentifier = "hdfs-httpfs";
|
|
|
|
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs";
|
|
|
|
Restart = "always";
|
|
|
|
};
|
|
|
|
};
|
|
|
|
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [
|
|
|
|
14000 # httpfs.http.port
|
|
|
|
]);
|
|
|
|
})
|
2020-04-24 23:36:52 +00:00
|
|
|
(mkIf (
|
2021-12-06 16:07:01 +00:00
|
|
|
cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
|
2020-04-24 23:36:52 +00:00
|
|
|
) {
|
|
|
|
users.users.hdfs = {
|
|
|
|
description = "Hadoop HDFS user";
|
|
|
|
group = "hadoop";
|
|
|
|
uid = config.ids.uids.hdfs;
|
|
|
|
};
|
|
|
|
})
|
2021-12-06 16:07:01 +00:00
|
|
|
(mkIf cfg.hdfs.httpfs.enable {
|
|
|
|
users.users.httpfs = {
|
|
|
|
description = "Hadoop HTTPFS user";
|
|
|
|
group = "hadoop";
|
|
|
|
isSystemUser = true;
|
|
|
|
};
|
|
|
|
})
|
2020-04-24 23:36:52 +00:00
|
|
|
];
|
|
|
|
}
|