diff --git a/CHANGELOG.md b/CHANGELOG.md index d2c6da18..c024962e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,13 @@ All notable changes to this project will be documented in this file. ## [Unreleased] -## [0.1.0] - 2021-10-27 +- `operator-rs` `0.3.0` → `0.4.0` ([#20]). +- Adapted pod image and container command to docker image ([#20]). +- Adapted documentation to represent new workflow with docker images ([#20]). + +[#20]: https://github.com/stackabletech/hdfs-operator/pull/20 +## [0.1.0] - 2021-10-27 ### Changed - Switched to operator-rs tag 0.3.0 ([#13]) diff --git a/Cargo.lock b/Cargo.lock index d1e9378e..8a8bee0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -117,9 +117,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" dependencies = [ "jobserver", ] @@ -770,9 +770,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.63.1" +version = "0.63.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c19c08adecde7d68052bfccf9f8ae663f680380e297f20249cef7943df66f54" +checksum = "75e877325e5540a3041b519bd7ee27a858691f9f816cf533d652cbb33cbfea45" dependencies = [ "k8s-openapi", "kube-client", @@ -783,9 +783,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.63.1" +version = "0.63.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da7ca26f7b912055aec302c376de4f3e1c749d121fbed91088203848e3dbd978" +checksum = "bb8e1a36f17c63e263ba0ffa2c0658de315c75decad983d83aaeafeda578cc78" dependencies = [ "base64", "bytes", @@ -822,9 +822,9 @@ dependencies = [ [[package]] name = "kube-core" -version = "0.63.1" +version = "0.63.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56229a53d7ce86e3e31c4aaf18a957b6f68305126ebfb12523312b2c8a43f19c" +checksum = "a91e572d244436fbc0d0b5a4829d96b9d623e08eb6b5d1e80418c1fab10b162a" dependencies = [ "chrono", "form_urlencoded", @@ -839,9 +839,9 @@ dependencies = [ [[package]] name = "kube-derive" -version = "0.63.1" +version = "0.63.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a9c6f93a170382c384eddf05ba165a96b38ecc63db8814d750db2ee349bd89" +checksum = "2034f57f3db36978ef366f45f1e263e623d9a6a8fcc6a6b1ef8879a213e1d2c4" dependencies = [ "darling", "proc-macro2", @@ -852,9 +852,9 @@ dependencies = [ [[package]] name = "kube-runtime" -version = "0.63.1" +version = "0.63.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a168bfeebab8913a0fca198c1f30d8d8c5f04d3eee1645aa5619a27a0a656c14" +checksum = "6018cf8410f9d460be3a3ac35deef63b71c860c368016d7bf6871994343728b4" dependencies = [ "dashmap", "derivative", @@ -880,9 +880,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.105" +version = "0.2.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869d572136620d55835903746bcb5cdc54cb2851fd0aeec53220b4bb65ef3013" +checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" [[package]] name = "libgit2-sys" @@ -1030,9 +1030,9 @@ checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "openssl" -version = "0.10.36" +version = "0.10.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" +checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" dependencies = [ "bitflags", "cfg-if", @@ -1050,9 +1050,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.67" +version = "0.9.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69df2d8dfc6ce3aaf44b40dec6f487d5a886516cf6879c49e98e0710f310a058" +checksum = "c6517987b3f8226b5da3661dad65ff7f300cc59fb5ea8333ca191fc65fde3edf" dependencies = [ "autocfg", "cc", @@ -1198,8 +1198,8 @@ dependencies = [ [[package]] name = "product-config" -version = "0.2.0-nightly" -source = "git+https://github.com/stackabletech/product-config.git?branch=main#97734dfa78c5e96922b2fc99bbd0cf2a1b7ac89d" +version = "0.2.0" +source = "git+https://github.com/stackabletech/product-config.git?tag=0.2.0#e32e33d9094e09b1af29045e05a4ab17c511cedb" dependencies = [ "java-properties", "regex", @@ -1652,8 +1652,8 @@ dependencies = [ [[package]] name = "stackable-operator" -version = "0.3.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.3.0#a0a1d10260f7921d436a0cd7ba6ce957368e42fb" +version = "0.4.0" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.4.0#50c3ee9564b1d3eb9d6e43c5e87c2102afbacc27" dependencies = [ "async-trait", "backoff", @@ -1789,9 +1789,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -1831,9 +1831,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2dd85aeaba7b68df939bd357c6afb36c87951be9e80bf9c859f2fc3e9fca0fd" +checksum = "114383b041aa6212c579467afa0075fbbdd0718de036100bc0ba7961d8cb9095" dependencies = [ "proc-macro2", "quote", @@ -1863,9 +1863,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d3725d3efa29485e87311c5b699de63cde14b00ed4d256b8318aa30ca452cd" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", diff --git a/README.adoc b/README.adoc index aff102cc..328a8be3 100644 --- a/README.adoc +++ b/README.adoc @@ -2,6 +2,4 @@ This is a Kubernetes Operator to manage Apache HDFS ensembles. -It is written by https://www.stackable.de[Stackable] in Rust, and it is supposed to be used with the https://github.com/stackabletech/agent[Stackable Agent] instead of the Kubernetes kubelet. - The docs can be found in the `docs` subdirectory, and they are published together with docs for all other Stackable products at https://docs.stackable.tech. \ No newline at end of file diff --git a/deploy/config-spec/properties.yaml b/deploy/config-spec/properties.yaml index bd67c192..4f7c7785 100644 --- a/deploy/config-spec/properties.yaml +++ b/deploy/config-spec/properties.yaml @@ -7,11 +7,6 @@ spec: examples: - "ldap://[2001:db8::7]/c=GB?objectClass?one" comment: "Specified in https://tools.ietf.org/html/rfc3986#appendix-B" - - unit: &unitDirectory - name: "directory" - regex: "^/|(/[\\w-]+)+$" - examples: - - "/tmp/xyz" properties: - property: @@ -156,22 +151,3 @@ properties: required: true asOfVersion: "0.0.0" description: "The datanode http server address and port." - - ################################################################################################### - # env vars - ################################################################################################### - - property: &javaHome - propertyNames: - - name: "JAVA_HOME" - kind: - type: "env" - datatype: - type: "string" - unit: *unitDirectory - roles: - - name: "namenode" - required: true - - name: "datanode" - required: true - asOfVersion: "0.0.0" - description: "Points to the Java installation folder." diff --git a/deploy/crd/hdfscluster.crd.yaml b/deploy/crd/hdfscluster.crd.yaml index 5bc7d2ff..cad5d21f 100644 --- a/deploy/crd/hdfscluster.crd.yaml +++ b/deploy/crd/hdfscluster.crd.yaml @@ -85,9 +85,6 @@ spec: required: - port type: object - javaHome: - nullable: true - type: string metricsPort: format: uint16 minimum: 0.0 @@ -167,9 +164,6 @@ spec: required: - port type: object - javaHome: - nullable: true - type: string metricsPort: format: uint16 minimum: 0.0 @@ -275,9 +269,6 @@ spec: required: - port type: object - javaHome: - nullable: true - type: string metricsPort: format: uint16 minimum: 0.0 @@ -343,9 +334,6 @@ spec: required: - port type: object - javaHome: - nullable: true - type: string metricsPort: format: uint16 minimum: 0.0 diff --git a/docs/modules/ROOT/pages/building.adoc b/docs/modules/ROOT/pages/building.adoc index c6fe7384..f6fd7cef 100644 --- a/docs/modules/ROOT/pages/building.adoc +++ b/docs/modules/ROOT/pages/building.adoc @@ -1,6 +1,6 @@ = Building the Operator This operator is written in Rust. -It is developed against the latest stable Rust release (1.54 at the time of writing). +It is developed against the latest stable Rust release (1.56 at the time of writing). cargo build diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index 3824f96c..e6ece9fc 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -2,4 +2,9 @@ This is an operator for Kubernetes that can manage https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html[Apache HDFS] clusters. -WARNING: This operator does _not_ work with containers/container images. It relies on the https://github.com/stackabletech/agent/[Stackable Agent] to run on "bare metal" via systemd. +WARNING: This operator only works with images from the https://repo.stackable.tech/#browse/browse:docker:v2%2Fstackable%2Fhadoop[Stackable] repository + +[source] +---- +docker pull docker.stackable.tech/stackable/hadoop: +---- \ No newline at end of file diff --git a/docs/modules/ROOT/pages/usage.adoc b/docs/modules/ROOT/pages/usage.adoc index 217044f6..8cc87bfd 100644 --- a/docs/modules/ROOT/pages/usage.adoc +++ b/docs/modules/ROOT/pages/usage.adoc @@ -18,11 +18,10 @@ To create a single node Apache HDFS (v3.2.2) cluster with Prometheus metrics exp default: selector: matchLabels: - kubernetes.io/arch: stackable-linux + kubernetes.io/os: linux replicas: 1 config: - javaHome: /usr/lib/jvm/java-11-openjdk-amd64/ - dfsNamenodeNameDir: file:///tmp/hdfs/nn + dfsNamenodeNameDir: /stackable/data ipcAddress: port: 9000 httpAddress: @@ -33,11 +32,10 @@ To create a single node Apache HDFS (v3.2.2) cluster with Prometheus metrics exp default: selector: matchLabels: - kubernetes.io/arch: stackable-linux + kubernetes.io/os: linux replicas: 1 config: - javaHome: /usr/lib/jvm/java-11-openjdk-amd64/ - dfsDatanodeNameDir: file:///tmp/hdfs/dn + dfsDatanodeNameDir: /stackable/data httpAddress: port: 61000 metricsPort: 51000 diff --git a/examples/simple-hdfs-cluster.yaml b/examples/simple-hdfs-cluster.yaml index 0de80ae9..8bbddc49 100644 --- a/examples/simple-hdfs-cluster.yaml +++ b/examples/simple-hdfs-cluster.yaml @@ -9,11 +9,10 @@ spec: default: selector: matchLabels: - kubernetes.io/os: stackable-linux + kubernetes.io/os: linux replicas: 1 config: - javaHome: /usr/lib/jvm/java-11-openjdk-amd64/ - dfsNamenodeNameDir: file:///tmp/hdfs/nn + dfsNamenodeNameDir: /stackable/hadoop/namenode/data ipcAddress: port: 9000 httpAddress: @@ -24,11 +23,10 @@ spec: default: selector: matchLabels: - kubernetes.io/os: stackable-linux + kubernetes.io/os: linux replicas: 1 config: - javaHome: /usr/lib/jvm/java-11-openjdk-amd64/ - dfsDatanodeNameDir: file:///tmp/hdfs/dn + dfsDatanodeNameDir: /stackable/hadoop/datannode/data httpAddress: port: 61000 metricsPort: 51000 diff --git a/rust/crd/Cargo.toml b/rust/crd/Cargo.toml index 64c56fb8..265ad7a1 100644 --- a/rust/crd/Cargo.toml +++ b/rust/crd/Cargo.toml @@ -8,7 +8,7 @@ repository = "https://github.com/stackabletech/hdfs-operator" version = "0.2.0-nightly" [dependencies] -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.3.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.4.0" } duplicate = "0.3.0" semver = "1.0" diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 208b69eb..9aa52316 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -46,10 +46,9 @@ pub const DFS_DATA_NODE_HTTP_ADDRESS: &str = "dfs.datanode.http.address"; pub const DFS_DATA_NODE_ADDRESS: &str = "dfs.datanode.address"; pub const METRICS_PORT_PROPERTY: &str = "metricsPort"; -pub const JAVA_HOME: &str = "JAVA_HOME"; pub const CONFIG_MAP_TYPE_DATA: &str = "data"; -pub const CONFIG_DIR_NAME: &str = "conf"; +pub const CONFIG_DIR_NAME: &str = "/stackable/conf"; pub const HDFS_SITE_XML: &str = "hdfs-site.xml"; pub const CORE_SITE_XML: &str = "core-site.xml"; @@ -150,26 +149,26 @@ impl HdfsRole { /// * `version` - Current specified cluster version /// * `auto_format_fs` - Format directory via 'start-namenode' script /// - pub fn get_command(&self, version: &HdfsVersion, auto_format_fs: bool) -> Vec { + pub fn get_command(&self, auto_format_fs: bool) -> Vec { match &self { HdfsRole::DataNode => vec![ - format!("{}/bin/hdfs", version.package_name()), + "bin/hdfs".to_string(), "--config".to_string(), - format!("{{{{configroot}}}}/{}", CONFIG_DIR_NAME), + CONFIG_DIR_NAME.to_string(), "datanode".to_string(), ], HdfsRole::NameNode => { if auto_format_fs { vec![ - format!("{}/stackable/bin/start-namenode", version.package_name()), + "bin/start-namenode".to_string(), "--config".to_string(), - format!("{{{{configroot}}}}/{}", CONFIG_DIR_NAME), + CONFIG_DIR_NAME.to_string(), ] } else { vec![ - format!("{}/bin/hdfs", version.package_name()), + "bin/hdfs".to_string(), "--config".to_string(), - format!("{{{{configroot}}}}/{}", CONFIG_DIR_NAME), + CONFIG_DIR_NAME.to_string(), "namenode".to_string(), ] } @@ -236,7 +235,6 @@ impl HasClusterExecutionStatus for HdfsCluster { pub struct NameNodeConfig { pub dfs_namenode_name_dir: Option, pub dfs_replication: Option, - pub java_home: Option, pub metrics_port: Option, pub ipc_address: Option, pub http_address: Option, @@ -247,7 +245,6 @@ pub struct NameNodeConfig { pub struct DataNodeConfig { pub dfs_datanode_name_dir: Option, pub dfs_replication: Option, - pub java_home: Option, pub metrics_port: Option, pub ipc_address: Option, pub http_address: Option, @@ -264,10 +261,6 @@ impl Configuration for NameNodeConfig { ) -> Result>, ConfigError> { let mut result = BTreeMap::new(); - if let Some(java_home) = &self.java_home { - result.insert(JAVA_HOME.to_string(), Some(java_home.to_string())); - } - if let Some(metrics_port) = self.metrics_port { result.insert( METRICS_PORT_PROPERTY.to_string(), @@ -335,10 +328,6 @@ impl Configuration for DataNodeConfig { ) -> Result>, ConfigError> { let mut result = BTreeMap::new(); - if let Some(java_home) = &self.java_home { - result.insert(JAVA_HOME.to_string(), Some(java_home.to_string())); - } - if let Some(metrics_port) = self.metrics_port { result.insert( METRICS_PORT_PROPERTY.to_string(), diff --git a/rust/operator-binary/Cargo.toml b/rust/operator-binary/Cargo.toml index 9039c518..bf658302 100644 --- a/rust/operator-binary/Cargo.toml +++ b/rust/operator-binary/Cargo.toml @@ -9,7 +9,7 @@ version = "0.2.0-nightly" build = "build.rs" [dependencies] -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.3.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.4.0" } stackable-hdfs-crd = { path = "../crd" } stackable-hdfs-operator = { path = "../operator" } @@ -19,7 +19,7 @@ tracing = "0.1" [build-dependencies] built = { version = "0.5", features = ["chrono", "git2"] } -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.3.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.4.0" } stackable-hdfs-crd = { path = "../crd" } [package.metadata.deb] diff --git a/rust/operator/Cargo.toml b/rust/operator/Cargo.toml index 37ab9d93..24f95341 100644 --- a/rust/operator/Cargo.toml +++ b/rust/operator/Cargo.toml @@ -8,7 +8,7 @@ repository = "https://github.com/stackabletech/hdfs-operator" version = "0.2.0-nightly" [dependencies] -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.3.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.4.0" } stackable-hdfs-crd = { path = "../crd" } async-trait = "0.1" diff --git a/rust/operator/src/lib.rs b/rust/operator/src/lib.rs index 7c635f58..c0210d22 100644 --- a/rust/operator/src/lib.rs +++ b/rust/operator/src/lib.rs @@ -8,12 +8,13 @@ use stackable_hdfs_crd::discovery::{ }; use stackable_hdfs_crd::{ HdfsAddress, HdfsCluster, HdfsClusterSpec, HdfsRole, HdfsVersion, APP_NAME, CONFIG_DIR_NAME, - CORE_SITE_XML, DATA_PORT, DFS_DATA_NODE_ADDRESS, DFS_DATA_NODE_HTTP_ADDRESS, - DFS_DATA_NODE_IPC_ADDRESS, DFS_NAME_NODE_HTTP_ADDRESS, FS_DEFAULT, HDFS_SITE_XML, HTTP_PORT, - IPC_PORT, METRICS_PORT, METRICS_PORT_PROPERTY, + CORE_SITE_XML, DATA_PORT, DFS_DATA_NODE_ADDRESS, DFS_DATA_NODE_DATA_DIR, + DFS_DATA_NODE_HTTP_ADDRESS, DFS_DATA_NODE_IPC_ADDRESS, DFS_NAME_NODE_HTTP_ADDRESS, + DFS_NAME_NODE_NAME_DIR, FS_DEFAULT, HDFS_SITE_XML, HTTP_PORT, IPC_PORT, METRICS_PORT, + METRICS_PORT_PROPERTY, }; use stackable_operator::builder::{ - ContainerBuilder, ContainerPortBuilder, ObjectMetaBuilder, PodBuilder, + ContainerBuilder, ObjectMetaBuilder, PodBuilder, PodSecurityContextBuilder, VolumeBuilder, }; use stackable_operator::client::Client; use stackable_operator::command::materialize_command; @@ -59,6 +60,10 @@ use strum::IntoEnumIterator; use tracing::error; use tracing::{debug, info, trace, warn}; +/// The docker image we default to. This needs to be adapted if the operator does not work +/// with images 0.0.1, 0.1.0 etc. anymore and requires e.g. a new major version like 1(.0.0). +const DEFAULT_IMAGE_VERSION: &str = "0"; + const FINALIZER_NAME: &str = "hdfs.stackable.tech/cleanup"; const ID_LABEL: &str = "hdfs.stackable.tech/id"; const SHOULD_BE_SCRAPED: &str = "monitoring.stackable.tech/should_be_scraped"; @@ -416,6 +421,7 @@ impl HdfsState { let mut ipc_port: Option = None; let mut http_port: Option = None; let mut data_port: Option = None; + let mut data_dir: Option<&String> = None; let spec: &HdfsClusterSpec = &self.context.resource.spec; let version: &HdfsVersion = &spec.version; @@ -433,6 +439,7 @@ impl HdfsState { if file_name == HDFS_SITE_XML && role == &HdfsRole::NameNode => { http_port = HdfsAddress::port(config.get(DFS_NAME_NODE_HTTP_ADDRESS))?; + data_dir = config.get(DFS_NAME_NODE_NAME_DIR); } PropertyNameKind::File(file_name) if file_name == HDFS_SITE_XML && role == &HdfsRole::DataNode => @@ -440,6 +447,7 @@ impl HdfsState { ipc_port = HdfsAddress::port(config.get(DFS_DATA_NODE_IPC_ADDRESS))?; http_port = HdfsAddress::port(config.get(DFS_DATA_NODE_HTTP_ADDRESS))?; data_port = HdfsAddress::port(config.get(DFS_DATA_NODE_ADDRESS))?; + data_dir = config.get(DFS_DATA_NODE_DATA_DIR); } PropertyNameKind::Env => { for (property_name, property_value) in config { @@ -455,15 +463,15 @@ impl HdfsState { HdfsRole::NameNode => { cb.add_env_var( "HDFS_NAMENODE_OPTS".to_string(), - format!("-javaagent:{{{{packageroot}}}}/{}/stackable/bin/jmx_prometheus_javaagent-0.16.1.jar={}:{{{{packageroot}}}}/{}/stackable/conf/jmx_hdfs_namenode.yaml", - version.package_name(), property_value, version.package_name()) + format!("-javaagent:/stackable/jmx/jmx_prometheus_javaagent-0.16.1.jar={}:/stackable/jmx/namenode.yaml", + property_value) ); } HdfsRole::DataNode => { cb.add_env_var( "HDFS_DATANODE_OPTS".to_string(), - format!("-javaagent:{{{{packageroot}}}}/{}/stackable/bin/jmx_prometheus_javaagent-0.16.1.jar={}:{{{{packageroot}}}}/{}/stackable/conf/jmx_hdfs_datanode.yaml", - version.package_name(), property_value, version.package_name()) + format!("-javaagent:/stackable/jmx/jmx_prometheus_javaagent-0.16.1.jar={}:/stackable/jmx/datanode.yaml", + property_value) ); } } @@ -476,14 +484,13 @@ impl HdfsState { _ => {} } } - // set bin dir - cb.add_env_var( - "HDFS_BIN_DIR", - format!("{{{{packageroot}}}}/{}/bin", version.package_name()), - ); - cb.image(format!("hadoop:{}", version.to_string())); - cb.command(role.get_command(version, spec.auto_format_fs.unwrap_or(true))); + cb.image(format!( + "docker.stackable.tech/stackable/hadoop:{}-stackable{}", + version.to_string(), + DEFAULT_IMAGE_VERSION + )); + cb.command(role.get_command(spec.auto_format_fs.unwrap_or(true))); let pod_name = name_utils::build_resource_name( pod_id.app(), @@ -503,10 +510,13 @@ impl HdfsState { ); recommended_labels.insert(ID_LABEL.to_string(), pod_id.id().to_string()); + let mut pod_builder = PodBuilder::new(); + // One mount for the config directory if let Some(config_map_data) = config_maps.get(CM_TYPE_CONFIG) { if let Some(name) = config_map_data.metadata.name.as_ref() { - cb.add_configmapvolume(name, CONFIG_DIR_NAME.to_string()); + cb.add_volume_mount("config", CONFIG_DIR_NAME); + pod_builder.add_volume(VolumeBuilder::new("config").with_config_map(name).build()); } else { return Err(error::Error::MissingConfigMapNameError { cm_type: CM_TYPE_CONFIG, @@ -519,45 +529,47 @@ impl HdfsState { }); } + // One mount for the data directory + if let Some(dir) = data_dir { + cb.add_volume_mount("data", dir); + pod_builder.add_volume( + VolumeBuilder::new("data") + .with_host_path(dir, Some("DirectoryOrCreate".to_string())) + .build(), + ); + // we need to create this as root, otherwise we have no permissions for the host path. + pod_builder.security_context( + PodSecurityContextBuilder::new() + .run_as_user(0) + .fs_group(0) + .run_as_group(0) + .build(), + ); + } + let mut annotations = BTreeMap::new(); // only add metrics container port and annotation if available if let Some(metrics_port) = metrics_port { annotations.insert(SHOULD_BE_SCRAPED.to_string(), "true".to_string()); - cb.add_container_port( - ContainerPortBuilder::new(metrics_port.parse()?) - .name(METRICS_PORT) - .build(), - ); + cb.add_container_port(METRICS_PORT, metrics_port.parse()?); } // add ipc port if available if let Some(ipc_port) = ipc_port { - cb.add_container_port( - ContainerPortBuilder::new(ipc_port.parse()?) - .name(IPC_PORT) - .build(), - ); + cb.add_container_port(IPC_PORT, ipc_port.parse()?); } // add http port if available if let Some(http_port) = http_port { - cb.add_container_port( - ContainerPortBuilder::new(http_port.parse()?) - .name(HTTP_PORT) - .build(), - ); + cb.add_container_port(HTTP_PORT, http_port.parse()?); } // add data port if available if let Some(data_port) = data_port { - cb.add_container_port( - ContainerPortBuilder::new(data_port.parse()?) - .name(DATA_PORT) - .build(), - ); + cb.add_container_port(DATA_PORT, data_port.parse()?); } - let pod = PodBuilder::new() + let pod = pod_builder .metadata( ObjectMetaBuilder::new() .generate_name(pod_name) @@ -567,9 +579,10 @@ impl HdfsState { .ownerreference_from_resource(&self.context.resource, Some(true), Some(true))? .build()?, ) - .add_stackable_agent_tolerations() .add_container(cb.build()) .node_name(node_name) + // TODO: first iteration we are using host network + .host_network(true) .build()?; Ok(self.context.client.create(&pod).await?)