tests/e2e: Add a full lifecycle e2e test and restructure

- new full lifecycle test also tests stop/start functionality
- cleaned up a bit
pull/212/head
iwilltry42 4 years ago
parent 141109defc
commit f9862be237
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
  1. 28
      tests/common.sh
  2. 1
      tests/dind.sh
  3. 10
      tests/test_basic.sh
  4. 51
      tests/test_full_lifecycle.sh
  5. 20
      tests/test_multi_master.sh

@ -31,7 +31,7 @@ abort() {
}
command_exists() {
command -v $1 >/dev/null 2>&1
command -v "$1" >/dev/null 2>&1
}
failed() {
@ -57,23 +57,41 @@ check_url() {
curl -L --silent -k --output /dev/null --fail "$1"
}
check_k3d_clusters() {
# check_clusters verifies that given clusters are reachable
check_clusters() {
[ -n "$EXE" ] || abort "EXE is not defined"
for c in "$@" ; do
$EXE --verbose get kubeconfig "$c" --switch
#[ -n "$kc" ] || abort "could not obtain a kubeconfig for $c"
$EXE get kubeconfig "$c" --switch
if kubectl cluster-info ; then
passed "cluster $c is reachable"
else
warn "could not obtain cluster info for $c. Kubeconfig:\n$(kubectl config view)"
docker ps -a
return 1
fi
done
return 0
}
# check_multi_node verifies that a cluster runs with an expected number of nodes
check_multi_node() {
cluster=$1
expectedNodeCount=$2
$EXE get kubeconfig "$cluster" --switch
nodeCount=$(kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers | wc -l)
if [[ $nodeCount == $expectedNodeCount ]]; then
passed "cluster $cluster has $expectedNodeCount nodes, as expected"
else
warn "cluster $cluster has incorrect number of nodes: $nodeCount != $expectedNodeCount"
kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers
docker ps -a
return 1
fi
return 0
}
check_registry() {
check_url $REGISTRY/v2/_catalog
check_url "$REGISTRY/v2/_catalog"
}
check_volume_exists() {

@ -15,6 +15,7 @@ k3de2e=$(docker run -d \
--privileged \
-e EXE="$K3D_EXE" \
-e CI="true" \
-e LOG_LEVEL="$LOG_LEVEL" \
--name "k3d-e2e-runner-$TIMESTAMP" \
k3d:$K3D_IMAGE_TAG)

@ -7,15 +7,15 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source "$CURR_DIR/common.sh"
info "Creating two clusters..."
$EXE --verbose create cluster c1 --wait --timeout 60s --api-port 6443 || failed "could not create cluster c1"
$EXE --verbose create cluster c2 --wait --timeout 60s --api-port 6444 || failed "could not create cluster c2"
$EXE create cluster c1 --wait --timeout 60s --api-port 6443 || failed "could not create cluster c1"
$EXE create cluster c2 --wait --timeout 60s --api-port 6444 || failed "could not create cluster c2"
info "Checking we have access to both clusters..."
check_k3d_clusters "c1" "c2" || failed "error checking cluster"
check_clusters "c1" "c2" || failed "error checking cluster"
info "Deleting clusters..."
$EXE --verbose delete cluster c1 || failed "could not delete the cluster c1"
$EXE --verbose delete cluster c2 || failed "could not delete the cluster c2"
$EXE delete cluster c1 || failed "could not delete the cluster c1"
$EXE delete cluster c2 || failed "could not delete the cluster c2"
exit 0

@ -0,0 +1,51 @@
#!/bin/bash
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; }
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
clustername="lifecycletest"
info "Creating cluster $clustername..."
$EXE create cluster "$clustername" --workers 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5
# 1. check initial access to the cluster
info "Checking that we have access to the cluster..."
check_clusters "$clustername" || failed "error checking cluster"
info "Checking that we have 2 nodes online..."
check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
# 2. stop the cluster
info "Stopping cluster..."
$EXE stop cluster "$clustername"
info "Checking that cluster was stopped"
check_clusters "$clustername" && failed "cluster was not stopped, since we still have access"
# 3. start the cluster
info "Starting cluster..."
$EXE start cluster "$clustername"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5
info "Checking that we have access to the cluster..."
check_clusters "$clustername" || failed "error checking cluster"
info "Checking that we have 2 nodes online..."
check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
# Cleanup
info "Deleting cluster $clustername..."
$EXE delete cluster "$clustername" || failed "could not delete the cluster $clustername"
exit 0

@ -7,30 +7,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source "$CURR_DIR/common.sh"
info "Creating cluster multimaster..."
$EXE --verbose create cluster "multimaster" --masters 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multimaster"
$EXE create cluster "multimaster" --masters 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multimaster"
info "Checking that we have access to the cluster..."
check_k3d_clusters "multimaster" || failed "error checking cluster"
check_clusters "multimaster" || failed "error checking cluster"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5
info "Checking that we have 3 master nodes online..."
check_multi_master() {
for c in "$@" ; do
$EXE get kubeconfig "$c" --switch
nodeCount=$(kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers | wc -l)
if [[ $nodeCount == 3 ]]; then
passed "cluster $c has 3 nodes, as expected"
else
warn "cluster $c has incorrect number of nodes: $nodeCount != 3"
kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers
return 1
fi
done
return 0
}
check_multi_master "multimaster"
check_multi_node "multimaster" 3 || failed "failed to verify number of nodes"
info "Deleting cluster multimaster..."
$EXE delete cluster "multimaster" || failed "could not delete the cluster multimaster"

Loading…
Cancel
Save