-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path.envrc-aws-common.sh
102 lines (85 loc) · 2.95 KB
/
.envrc-aws-common.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#!/usr/bin/env bash
if [ -z "${local_SNAPSHOT}" ]; then
# shellcheck disable=SC2148 source=/.envrc-clusters.sh
source_url "https://raw.githubusercontent.com/sourcesense/envrc-framework/v0.19.1/.envrc-clusters.sh" "sha256-NSm9CFIhxnqTZ41sWfGu9OWl9bxvD9I4V5_IPl38OOA="
else
# shellcheck disable=SC1091 source="${local_SNAPSHOT}"/.envrc-clusters.sh
source "${local_SNAPSHOT}"/.envrc-clusters.sh
fi
if type direnv >/dev/null 2>&1; then
# shellcheck disable=SC1091
. <(direnv stdlib)
else
echo "Could not load direnv stdlib" >&2
exit 1
fi
use_cp aws
pre_work_on_cluster()
{
export POD_OVERRIDES=''
}
test_vpn()
{
log "No check on VPN"
}
set_region()
{
local resource_region="$1"
export RESOURCE_REGION="$resource_region"
}
set_aws_account_id()
{
local aws_account_id="$1"
export AWS_ACCOUNT_ID="$aws_account_id"
}
set_cluster_name()
{
local cluster_name="$1"
export CLUSTER_NAME="$cluster_name"
}
set_aws_profile()
{
export AWS_PROFILE="$CLUSTER_NAME-$CLUSTER_REGION-$AWS_ACCOUNT_ID"
}
get_credentials()
{
clusterName="${CLUSTER_NAME?Must specify cluster name in CLUSTER_NAME}"
clusterRegion="${CLUSTER_REGION?Must specify cluster region in CLUSTER_REGION}"
kubeConfig="${KUBECONFIG?Must specify kube config in KUBECONFIG}"
log "Putting credentials for cluster $(ab "${clusterName}") in kubeconfig file $(ab "${kubeConfig/$HOME/\~}"), it could take a while, please be patient and ignore direnv warnings..."
KUBECONFIG=$kubeConfig aws eks update-kubeconfig --region "${clusterRegion}" --name "${clusterName}" --alias "${clusterName}" 2>/dev/null
if [ -s "${kubeConfig}" ]; then
log "Successfully got credentials from AWS and created kubeconfig: $(ab "${kubeConfig/$HOME/\~}")"
else
whine "Couldn't get credentials from AWS, please retry. Aborting"
fi
}
ensure_logged_in()
{
# Nothing to do in interactive mode
# Left as an extension point for non-interactive mode
:
}
setup_kubeconfig()
{
parentDir="$HOME/.kube/profiles/aws"
mkdir -p "$parentDir"
KUBECONFIG="$parentDir/${AWS_SSO_ID:-$AWS_ACCOUNT_ID}-${CLUSTER_NAME}"
if [ ! -s "${KUBECONFIG}" ]; then
get_credentials
chmod go-r "${KUBECONFIG}"
fi
if [ -n "${NAMESPACE}" ]; then
namespaceKubeconfig="${KUBECONFIG}-${NAMESPACE}"
if [ ! -f "${namespaceKubeconfig}" ]; then
yq e ".contexts[].context.namespace=\"${NAMESPACE}\"" "${KUBECONFIG}" >"${namespaceKubeconfig}"
chmod go-r "${namespaceKubeconfig}"
log "Successfully created env specific kubeconfig: $(ab "${namespaceKubeconfig/$HOME/\~}")"
fi
KUBECONFIG="${namespaceKubeconfig}"
fi
export KUBECONFIG
ensure_logged_in
status=$(kubectl version -o json 2>/dev/null | jq -r ".serverVersion.gitVersion")
[ "$status" = "null" ] && whine "Cannot connect to cluster $(ab "${CLUSTER_NAME}"). Try remove your kubeconfig file $(ab "${KUBECONFIG/$HOME/\~}")"
}