forked from xtruder/kubenix
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathflake.nix
112 lines (100 loc) · 3.66 KB
/
flake.nix
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
{
description = "Kubernetes resource builder using nix";
inputs = {
flake-compat.url = "github:edolstra/flake-compat";
flake-compat.flake = false;
flake-utils.url = "github:numtide/flake-utils";
flake-utils.inputs.nixpkgs.follows = "nixpgks";
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
devshell.url = "github:numtide/devshell";
devshell.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = {
self,
nixpkgs,
...
} @ inputs:
(inputs.flake-utils.lib.eachSystem ["x86_64-linux"] (
#inputs.flake-utils.lib.eachDefaultSystem (
system: let
pkgs = import inputs.nixpkgs {
overlays = [
self.overlays.default
inputs.devshell.overlay
];
config.allowUnsupportedSystem = true;
inherit system;
};
inherit (pkgs) lib;
kubenix = {
lib = import ./lib {inherit lib pkgs;};
evalModules = self.evalModules.${system};
modules = self.nixosModules.kubenix;
};
# evalModules with same interface as lib.evalModules and kubenix as
# special argument
evalModules = attrs @ {
module ? null,
modules ? [module],
...
}: let
lib' = lib.extend (lib: _self: import ./lib/upstreamables.nix {inherit lib pkgs;});
attrs' = builtins.removeAttrs attrs ["module"];
in
lib'.evalModules (lib.recursiveUpdate
{
modules =
modules
++ [
{
config._module.args = {
inherit pkgs;
name = "default";
};
}
];
specialArgs = {inherit kubenix;
inherit pkgs;
};
}
attrs');
in {
inherit evalModules pkgs;
jobs = import ./jobs {inherit pkgs;};
devShells.default = import ./devshell {inherit pkgs inputs;};
packages = inputs.flake-utils.lib.flattenTree {
inherit (pkgs) kubernetes kubectl;
};
checks = let
wasSuccess = suite:
if suite.success
then pkgs.runCommandNoCC "testing-suite-config-assertions-for-${suite.name}-succeeded" {} "echo success > $out"
else pkgs.runCommandNoCC "testing-suite-config-assertions-for-${suite.name}-failed" {} "exit 1";
mkExamples = attrs:
(import ./examples {inherit evalModules;})
({registry = "docker.io/gatehub";} // attrs);
mkK8STests = attrs:
(import ./tests {inherit evalModules;})
({registry = "docker.io/gatehub";} // attrs);
in {
# TODO: access "success" derivation with nice testing utils for nice output
nginx-example = wasSuccess (mkExamples {}).nginx-deployment.config.testing;
tests-k8s-1_19 = wasSuccess (mkK8STests {k8sVersion = "1.19";});
tests-k8s-1_20 = wasSuccess (mkK8STests {k8sVersion = "1.20";});
tests-k8s-1_21 = wasSuccess (mkK8STests {k8sVersion = "1.21";});
tests-k8s-1_23 = wasSuccess (mkK8STests {k8sVersion = "1.23";});
};
}
))
// {
nixosModules.kubenix = import ./modules;
overlays.default = _final: prev: {
kubenix.evalModules = self.evalModules.${prev.system};
# up to date versions of their nixpkgs equivalents
# kubernetes =
# prev.callPackage ./pkgs/applications/networking/cluster/kubernetes
# {};
# kubectl = prev.callPackage ./pkgs/applications/networking/cluster/kubectl {};
};
};
}