Merge pull request #1 from rancher/main

Merge base repo
pull/313/head
Ivan Vandot 4 years ago committed by GitHub
commit 5da94fef92
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 7
      .dockerignore
  2. 455
      .drone.yml
  3. 4
      .github/ISSUE_TEMPLATE/bug_report.md
  4. 5
      .github/ISSUE_TEMPLATE/feature_request.md
  5. 6
      .gitignore
  6. 47
      .travis.yml
  7. 6
      Dockerfile
  8. 145
      Makefile
  9. 35
      README.md
  10. 23
      cmd/cluster/cluster.go
  11. 160
      cmd/cluster/clusterCreate.go
  12. 59
      cmd/cluster/clusterDelete.go
  13. 129
      cmd/cluster/clusterList.go
  14. 43
      cmd/cluster/clusterStart.go
  15. 36
      cmd/cluster/clusterStop.go
  16. 128
      cmd/get/getCluster.go
  17. 14
      cmd/image/image.go
  18. 60
      cmd/image/imageImport.go
  19. 16
      cmd/kubeconfig/kubeconfig.go
  20. 53
      cmd/kubeconfig/kubeconfigGet.go
  21. 140
      cmd/kubeconfig/kubeconfigMerge.go
  22. 22
      cmd/node/node.go
  23. 44
      cmd/node/nodeCreate.go
  24. 31
      cmd/node/nodeDelete.go
  25. 51
      cmd/node/nodeList.go
  26. 23
      cmd/node/nodeStart.go
  27. 23
      cmd/node/nodeStop.go
  28. 41
      cmd/root.go
  29. 97
      cmd/util/completion.go
  30. 32
      cmd/util/filter.go
  31. 35
      cmd/util/ports.go
  32. 38
      cmd/util/volumes.go
  33. 14
      docs/faq/faq.md
  34. 16
      docs/faq/v1vsv3-comparison.md
  35. 23
      docs/index.md
  36. 20
      docs/internals/defaults.md
  37. 6
      docs/internals/networking.md
  38. 215
      docs/static/asciicast/20200715_k3d.01.cast
  39. 33
      docs/static/css/extra.css
  40. BIN
      docs/static/img/favicons_black_blue/android-chrome-192x192.png
  41. BIN
      docs/static/img/favicons_black_blue/android-chrome-512x512.png
  42. BIN
      docs/static/img/favicons_black_blue/apple-touch-icon.png
  43. BIN
      docs/static/img/favicons_black_blue/favicon-16x16.png
  44. BIN
      docs/static/img/favicons_black_blue/favicon-32x32.png
  45. BIN
      docs/static/img/favicons_black_blue/favicon.ico
  46. BIN
      docs/static/img/favicons_black_blue/favicon.png
  47. BIN
      docs/static/img/favicons_black_green/android-chrome-192x192.png
  48. BIN
      docs/static/img/favicons_black_green/android-chrome-512x512.png
  49. BIN
      docs/static/img/favicons_black_green/apple-touch-icon.png
  50. BIN
      docs/static/img/favicons_black_green/favicon-16x16.png
  51. BIN
      docs/static/img/favicons_black_green/favicon-32x32.png
  52. BIN
      docs/static/img/favicons_black_green/favicon.ico
  53. 91
      docs/static/img/k3d-black.svg
  54. 2
      docs/usage/.pages
  55. 75
      docs/usage/commands.md
  56. 12
      docs/usage/guides/exposing_services.md
  57. 184
      docs/usage/guides/registries.md
  58. 34
      docs/usage/kubeconfig.md
  59. 25
      docs/usage/multimaster.md
  60. 25
      docs/usage/multiserver.md
  61. 35
      go.mod
  62. 129
      go.sum
  63. 2
      install.sh
  64. 2
      main.go
  65. 3
      mkdocs.yml
  66. 290
      pkg/cluster/cluster.go
  67. 2
      pkg/cluster/clusterName.go
  68. 109
      pkg/cluster/kubeconfig.go
  69. 39
      pkg/cluster/loadbalancer.go
  70. 193
      pkg/cluster/node.go
  71. 33
      pkg/runtimes/containerd/image.go
  72. 5
      pkg/runtimes/containerd/kubeconfig.go
  73. 6
      pkg/runtimes/containerd/network.go
  74. 26
      pkg/runtimes/containerd/node.go
  75. 11
      pkg/runtimes/containerd/util.go
  76. 11
      pkg/runtimes/containerd/volume.go
  77. 23
      pkg/runtimes/docker/container.go
  78. 54
      pkg/runtimes/docker/image.go
  79. 9
      pkg/runtimes/docker/kubeconfig.go
  80. 11
      pkg/runtimes/docker/network.go
  81. 70
      pkg/runtimes/docker/node.go
  82. 44
      pkg/runtimes/docker/translate.go
  83. 8
      pkg/runtimes/docker/translate_test.go
  84. 55
      pkg/runtimes/docker/util.go
  85. 36
      pkg/runtimes/docker/volume.go
  86. 44
      pkg/runtimes/runtime.go
  87. 173
      pkg/tools/tools.go
  88. 135
      pkg/types/types.go
  89. 2
      pkg/util/files.go
  90. 2
      pkg/util/randomString.go
  91. 10
      proxy/Dockerfile
  92. 6
      proxy/templates/nginx.tmpl
  93. 21
      tests/common.sh
  94. 32
      tests/dind.sh
  95. 2
      tests/runner.sh
  96. 15
      tests/test_basic.sh
  97. 20
      tests/test_full_lifecycle.sh
  98. 14
      tests/test_multi_master.sh
  99. 24
      thoughts.md
  100. 13
      tools/Dockerfile
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,7 @@
.github/
.local/
bin/
_dist/
tools/
proxy/
site/

@ -0,0 +1,455 @@
---
###########################################
##### k3d CLI/binary release pipeline #####
###########################################
kind: pipeline
type: docker
name: main
platform:
os: linux
arch: amd64
steps:
- name: lint
image: golang:1.14
commands:
- make ci-setup
- make check-fmt lint
when:
event:
- push
- pull_request
- tag
- name: test
image: docker:19.03
volumes:
- name: dockersock
path: /var/run
commands:
- apk add git bash curl sudo jq make
- sleep 5 # give docker enough time to start
- make e2e
when:
event:
- push
- pull_request
- tag
- name: build
image: golang:1.14
environment:
GIT_TAG: "${DRONE_TAG}"
commands:
- make ci-setup
- make build-cross
depends_on:
- lint
- test
when:
branch:
- main
event:
- push
- tag
- name: pre-release
image: plugins/github-release
settings:
api_key:
from_secret: github_token
files:
- _dist/*
checksum:
- sha256
prerelease: true
depends_on:
- lint
- test
- build
when:
event:
- tag
ref:
include:
- "refs/tags/*rc*"
- "refs/tags/*beta*"
- "refs/tags/*alpha*"
- name: release
image: plugins/github-release
settings:
api_key:
from_secret: github_token
files:
- _dist/*
checksum:
- sha256
depends_on:
- lint
- test
- build
when:
event:
- tag
ref:
exclude:
- "refs/tags/*rc*"
- "refs/tags/*beta*"
- "refs/tags/*alpha*"
services:
# Starting the docker service to be used by dind
- name: docker
image: docker:19.03-dind
privileged: true
volumes:
- name: dockersock
path: /var/run
volumes:
- name: dockersock
temp: {}
---
#########################
##### Documentation #####
#########################
kind: pipeline
type: docker
name: docs
platform:
os: linux
arch: amd64
steps:
- name: build
image: python:3.8
commands:
- python3 -m pip install -r docs/requirements.txt
- mkdocs build --verbose --clean --strict
when:
branch:
- main
event:
- push
- name: publish
image: plugins/gh-pages
settings:
password:
from_secret: github_token
username: rancherio-gh-m
pages_directory: site/
target_branch: gh-pages
when:
branch:
- main
event:
- push
trigger:
event:
- push
branch:
- main
---
#####################
##### k3d-proxy #####
#####################
kind: pipeline
type: docker
name: proxy_linux_amd64
platform:
os: linux
arch: amd64
steps:
- name: build_push
image: plugins/docker
settings:
repo: rancher/k3d-proxy
tags:
- latest-linux-amd64
- "${DRONE_TAG}-linux-amd64"
dockerfile: proxy/Dockerfile
context: proxy/
username:
from_secret: docker_username
password:
from_secret: docker_password
when:
event:
- tag
trigger:
event:
- tag
depends_on:
- main
---
kind: pipeline
type: docker
name: proxy_linux_arm
platform:
os: linux
arch: arm
steps:
- name: build_push
image: plugins/docker
settings:
repo: rancher/k3d-proxy
tags:
- latest-linux-arm
- "${DRONE_TAG}-linux-arm"
dockerfile: proxy/Dockerfile
context: proxy/
username:
from_secret: docker_username
password:
from_secret: docker_password
build_args:
- ARCH=arm
when:
event:
- tag
trigger:
event:
- tag
depends_on:
- main
---
kind: pipeline
type: docker
name: proxy_linux_arm64
platform:
os: linux
arch: arm64
steps:
- name: build_push
image: plugins/docker
settings:
repo: rancher/k3d-proxy
tags:
- latest-linux-arm64
- "${DRONE_TAG}-linux-arm64"
dockerfile: proxy/Dockerfile
context: proxy/
username:
from_secret: docker_username
password:
from_secret: docker_password
build_args:
- ARCH=arm64
when:
event:
- tag
trigger:
event:
- tag
depends_on:
- main
---
kind: pipeline
type: docker
name: proxy_manifest
platform:
os: linux
arch: amd64
steps:
- name: push_manifest
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
target: "rancher/k3d-proxy:${DRONE_TAG}"
template: "rancher/k3d-proxy:${DRONE_TAG}-OS-ARCH"
platforms:
- linux/amd64
- linux/arm
- linux/arm64
when:
event:
- tag
trigger:
event:
- tag
depends_on:
- main
- proxy_linux_amd64
- proxy_linux_arm
- proxy_linux_arm64
---
#####################
##### k3d-tools #####
#####################
kind: pipeline
type: docker
name: tools_linux_amd64
platform:
os: linux
arch: amd64
steps:
- name: build_push
image: plugins/docker
settings:
repo: rancher/k3d-tools
tags:
- latest-linux-amd64
- "${DRONE_TAG}-linux-amd64"
dockerfile: tools/Dockerfile
context: tools/
username:
from_secret: docker_username
password:
from_secret: docker_password
when:
event:
- tag
trigger:
event:
- tag
depends_on:
- main
---
kind: pipeline
type: docker
name: tools_linux_arm
platform:
os: linux
arch: arm
steps:
- name: build_push
image: plugins/docker
settings:
repo: rancher/k3d-tools
tags:
- latest-linux-arm
- "${DRONE_TAG}-linux-arm"
dockerfile: tools/Dockerfile
context: tools/
username:
from_secret: docker_username
password:
from_secret: docker_password
when:
event:
- tag
trigger:
event:
- tag
depends_on:
- main
---
kind: pipeline
type: docker
name: tools_linux_arm64
platform:
os: linux
arch: arm64
steps:
- name: build_push
image: plugins/docker
settings:
repo: rancher/k3d-tools
tags:
- latest-linux-arm64
- "${DRONE_TAG}-linux-arm64"
dockerfile: tools/Dockerfile
context: tools/
username:
from_secret: docker_username
password:
from_secret: docker_password
when:
event:
- tag
trigger:
event:
- tag
depends_on:
- main
---
kind: pipeline
type: docker
name: tools_manifest
platform:
os: linux
arch: amd64
steps:
- name: push_manifest
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
target: "rancher/k3d-tools:${DRONE_TAG}"
template: "rancher/k3d-tools:${DRONE_TAG}-OS-ARCH"
platforms:
- linux/amd64
- linux/arm
- linux/arm64
when:
event:
- tag
trigger:
event:
- tag
depends_on:
- main
- tools_linux_amd64
- tools_linux_arm
- tools_linux_arm64

@ -10,7 +10,7 @@ assignees: ''
**What did you do?**
- How was the cluster created?
- `k3d create -x A -y B`
- `k3d cluster create -x A -y B`
- What did you do afterwards?
- k3d commands?
@ -31,7 +31,7 @@ If applicable, add screenshots or terminal output (code block) to help explain y
**Which version of `k3d`?**
- output of `k3d --version`
- output of `k3d version`
**Which version of docker?**

@ -15,8 +15,9 @@ Please link to the issue/PR here and explain how your request is related to it.
Do you need...
- a new command (next to e.g. `create`, `delete`, etc. used via `k3d <your-command>`)?
- a new flag for a command (e.g. `k3d create --<your-flag>`)?
- a new noun (next to e.g. `cluster`, `node`, etc. used via `k3d <noun>`)?
- a new verb (next to e.g. `cluster create`, `node start`, etc. used via `k3d <noun> <verb>`)
- a new flag for a command (e.g. `k3d cluster create --<your-flag>`)?
- which command?
- different functionality for an existing command/flag
- which command or flag?

6
.gitignore vendored

@ -6,6 +6,8 @@
*.dylib
# Output folders
tools/bin/
tools/_dist/
bin/
_dist/
site/
@ -18,4 +20,6 @@ site/
# Editors
.vscode/
.local/
.local/
.idea/
*.iml

@ -1,47 +0,0 @@
matrix:
include:
- dist: bionic
language: go
env:
- GO111MODULE=on
go:
- 1.14.x
git:
depth: 1
install: true
before_script:
- make ci-setup
script:
- make -e "E2E_SKIP=test_multi_master" ci-tests build-cross
deploy:
provider: releases
skip_cleanup: true
api_key:
secure: VFb0jmL6t+cuKWAGC5OLeseTJrK3CakD5Laeyp0JVG/o3/cYgHP0lIeDmJJMZL/Luxm5aL8QHsCRfQoMBAnj6Q8hchI9rbcYhFZzuFJYyDxjcrPm0+kM3yiK14lQZNvksy2ZSsO7D63c8x9sQVrEus4idVTUoxrpSXLM2eVjl6W0O2RdZvLsxgaLPwV1ufpihrqbXdEUjt/YSYpHiC5gS3o+FcyMGucJQdN/L7p6jyAqVgg4+t8bdyWj6+MEG4p8lmWhhbGzDo38iMxtCBu+nDHRsbivay3eJZ643VguX0lj62Vt5KUTcVJntmZqQ2UF6FoEVUPOegkrSeoiMuOH1+nYwcsfMFijMkrcFhb6bAisJJd6agdhFWXiSwL88FQkJh0DqeA0tFFIzDbTS/AZTY4Li8bWng3aCBgSXiMzIBf0es+wMDw0gwhfH44Y/RAsKSQJ/Lln00AaVzkOkOWOmu5Ks0CVYDy0M5QDQOCW2E9TIb7WdIMh3aNCkZi+rGovigejJv3vUZqkN03Og07Hbrjgfg28iY3isIt3soOrVqek2hJJFnKjUhhv2OhJm3z6FpTyMViUtSmJ+LTiBjpyiWC4QuaITDadCJTxZQwobhI+18c2Zi5/HjTX1pgD1wk3quv9R4bGjVINenefG6xxaNj+CeFTfrQnnHuXOL50828=
file:
- _dist/k3d-darwin-amd64
- _dist/k3d-linux-386
- _dist/k3d-linux-amd64
- _dist/k3d-linux-arm
- _dist/k3d-linux-arm64
- _dist/k3d-windows-amd64.exe
on:
repo: rancher/k3d
tags: true
- dist: bionic
language: python
python: 3.8
before_script:
- python3 -m pip install -r docs/requirements.txt
script:
- mkdocs build --verbose --clean --strict
deploy:
provider: pages
skip_cleanup: true
github_token:
secure: VFb0jmL6t+cuKWAGC5OLeseTJrK3CakD5Laeyp0JVG/o3/cYgHP0lIeDmJJMZL/Luxm5aL8QHsCRfQoMBAnj6Q8hchI9rbcYhFZzuFJYyDxjcrPm0+kM3yiK14lQZNvksy2ZSsO7D63c8x9sQVrEus4idVTUoxrpSXLM2eVjl6W0O2RdZvLsxgaLPwV1ufpihrqbXdEUjt/YSYpHiC5gS3o+FcyMGucJQdN/L7p6jyAqVgg4+t8bdyWj6+MEG4p8lmWhhbGzDo38iMxtCBu+nDHRsbivay3eJZ643VguX0lj62Vt5KUTcVJntmZqQ2UF6FoEVUPOegkrSeoiMuOH1+nYwcsfMFijMkrcFhb6bAisJJd6agdhFWXiSwL88FQkJh0DqeA0tFFIzDbTS/AZTY4Li8bWng3aCBgSXiMzIBf0es+wMDw0gwhfH44Y/RAsKSQJ/Lln00AaVzkOkOWOmu5Ks0CVYDy0M5QDQOCW2E9TIb7WdIMh3aNCkZi+rGovigejJv3vUZqkN03Og07Hbrjgfg28iY3isIt3soOrVqek2hJJFnKjUhhv2OhJm3z6FpTyMViUtSmJ+LTiBjpyiWC4QuaITDadCJTxZQwobhI+18c2Zi5/HjTX1pgD1wk3quv9R4bGjVINenefG6xxaNj+CeFTfrQnnHuXOL50828=
local_dir: site
on:
repo: rancher/k3d
branch: master

@ -3,10 +3,8 @@ WORKDIR /app
COPY . .
RUN make build && bin/k3d version
FROM docker:19.03-dind
# TODO: we could create a different stage for e2e tests
RUN apk add bash curl sudo
FROM docker:19.03-dind as dind
RUN apk add bash curl sudo jq git make
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \
chmod +x ./kubectl && \
mv ./kubectl /usr/local/bin/kubectl

@ -1,33 +1,53 @@
###################################
# #
# CONFIGURATION #
# #
###################################
########## Shell/Terminal Settings ##########
SHELL := /bin/bash
# Build targets
TARGETS ?= darwin/amd64 linux/amd64 linux/386 linux/arm linux/arm64 windows/amd64
TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-386.tar.gz linux-386.tar.gz.sha256 linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm64.tar.gz linux-arm64.tar.gz.sha256 windows-amd64.zip windows-amd64.zip.sha256
# determine if make is being executed from interactive terminal
INTERACTIVE:=$(shell [ -t 0 ] && echo 1)
# Use Go Modules for everything
export GO111MODULE=on
########## Tags ##########
# get git tag
GIT_TAG := $(shell git describe --tags)
GIT_TAG ?= $(shell git describe --tags)
ifeq ($(GIT_TAG),)
GIT_TAG := $(shell git describe --always)
endif
# get latest k3s version: grep the tag JSON field, extract the tag and replace + with - (difference between git and dockerhub tags)
ifneq (${GITHUB_API_TOKEN},)
K3S_TAG := $(shell curl --silent -H "Authorization: token: ${GITHUB_API_TOKEN}" "https://api.github.com/repos/rancher/k3s/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' | sed -E 's/\+/\-/')
else
K3S_TAG := $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' | sed -E 's/\+/\-/')
endif
# Docker image tag derived from Git tag
K3D_IMAGE_TAG := $(GIT_TAG)
# get latest k3s version: grep the tag and replace + with - (difference between git and dockerhub tags)
K3S_TAG := $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable" | egrep -o '/v[^ ]+"' | sed -E 's/\/|\"//g' | sed -E 's/\+/\-/')
ifeq ($(K3S_TAG),)
$(warning K3S_TAG undefined: couldn't get latest k3s image tag!)
$(warning Output of curl: $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest"))
$(warning Output of curl: $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable"))
$(error exiting)
endif
# determine if make is being executed from interactive terminal
INTERACTIVE:=$(shell [ -t 0 ] && echo 1)
########## Source Options ##########
# DIRS defines a single level directly, we only look at *.go in this directory.
# REC_DIRS defines a source code tree. All go files are analyzed recursively.
DIRS := .
REC_DIRS := cmd
# E2E test settings
########## Test Settings ##########
E2E_LOG_LEVEL ?= WARN
E2E_SKIP ?=
E2E_RUNNER_START_TIMEOUT ?= 10
########## Go Build Options ##########
# Build targets
TARGETS ?= darwin/amd64 linux/amd64 linux/386 linux/arm linux/arm64 windows/amd64
TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-386.tar.gz linux-386.tar.gz.sha256 linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm64.tar.gz linux-arm64.tar.gz.sha256 windows-amd64.zip windows-amd64.zip.sha256
# Go options
GO ?= go
@ -35,52 +55,72 @@ PKG := $(shell go mod vendor)
TAGS :=
TESTS := .
TESTFLAGS :=
LDFLAGS := -w -s -X github.com/rancher/k3d/version.Version=${GIT_TAG} -X github.com/rancher/k3d/version.K3sVersion=${K3S_TAG}
LDFLAGS := -w -s -X github.com/rancher/k3d/v3/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v3/version.K3sVersion=${K3S_TAG}
GCFLAGS :=
GOFLAGS :=
BINDIR := $(CURDIR)/bin
BINARIES := k3d
K3D_IMAGE_TAG := $(GIT_TAG)
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
GO_SRC := $(foreach dir,$(DIRS),$(wildcard $(dir)/*.go))
GO_SRC += $(foreach dir,$(REC_DIRS),$(shell find $(dir) -name "*.go"))
########## Required Tools ##########
# Go Package required
PKG_GOX := github.com/mitchellh/gox@v1.0.1
PKG_GOLANGCI_LINT_VERSION := 1.25.0
PKG_GOLANGCI_LINT_VERSION := 1.28.3
PKG_GOLANGCI_LINT_SCRIPT := https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
PKG_GOLANGCI_LINT := github.com/golangci/golangci-lint/cmd/golangci-lint@v${PKG_GOLANGCI_LINT_VERSION}
########## Linting Options ##########
# configuration adjustments for golangci-lint
GOLANGCI_LINT_DISABLED_LINTERS := "" # disabling typecheck, because it currently (06.09.2019) fails with Go 1.13
# Use Go Modules for everything
export GO111MODULE=on
# go source directories.
# DIRS defines a single level directly, we only look at *.go in this directory.
# REC_DIRS defines a source code tree. All go files are analyzed recursively.
DIRS := .
REC_DIRS := cmd
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
GO_SRC := $(foreach dir,$(DIRS),$(wildcard $(dir)/*.go))
GO_SRC += $(foreach dir,$(REC_DIRS),$(shell find $(dir) -name "*.go"))
# Rules for directory list as input for the golangci-lint program
LINT_DIRS := $(DIRS) $(foreach dir,$(REC_DIRS),$(dir)/...)
#############################
# #
# TARGETS #
# #
#############################
.PHONY: all build build-cross clean fmt check-fmt lint check extra-clean install-tools
all: clean fmt check build
############################
########## Builds ##########
############################
# debug builds
build-debug: GCFLAGS+="all=-N -l"
build-debug: build
# default build target for the local platform
build:
CGO_ENABLED=0 $(GO) build $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o '$(BINDIR)/$(BINARIES)'
CGO_ENABLED=0 $(GO) build $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -gcflags '$(GCFLAGS)' -o '$(BINDIR)/$(BINARIES)'
# cross-compilation for all targets
build-cross: LDFLAGS += -extldflags "-static"
build-cross:
CGO_ENABLED=0 gox -parallel=3 -output="_dist/$(BINARIES)-{{.OS}}-{{.Arch}}" -osarch='$(TARGETS)' $(GOFLAGS) $(if $(TAGS),-tags '$(TAGS)',) -ldflags '$(LDFLAGS)'
build-docker: Dockerfile
@echo "Building Docker image k3d:$(K3D_IMAGE_TAG)"
docker build -t k3d:$(K3D_IMAGE_TAG) .
# build a specific docker target ( '%' matches the target as specified in the Dockerfile)
build-docker-%:
@echo "Building Docker image k3d:$(K3D_IMAGE_TAG)-$*"
docker build . -t k3d:$(K3D_IMAGE_TAG)-$* --target $*
# build helper images
build-helper-images:
@echo "Building docker image rancher/k3d-proxy:$(GIT_TAG)"
docker build proxy/ -f proxy/Dockerfile -t rancher/k3d-proxy:$(GIT_TAG)
@echo "Building docker image rancher/k3d-tools:$(GIT_TAG)"
docker build --no-cache tools/ -f tools/Dockerfile -t rancher/k3d-tools:$(GIT_TAG) --build-arg GIT_TAG=$(GIT_TAG)
##############################
########## Cleaning ##########
##############################
clean:
@rm -rf $(BINDIR) _dist/
@ -89,13 +129,14 @@ extra-clean: clean
$(GO) clean -i $(PKG_GOX)
$(GO) clean -i $(PKG_GOLANGCI_LINT)
##########################################
########## Formatting & Linting ##########
##########################################
# fmt will fix the golang source style in place.
fmt:
@gofmt -s -l -w $(GO_SRC)
e2e: build-docker
@echo "Running e2e tests in k3d:$(K3D_IMAGE_TAG)"
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_SKIP="$(E2E_SKIP)" tests/dind.sh "${K3D_IMAGE_TAG}"
# check-fmt returns an error code if any source code contains format error.
check-fmt:
@test -z $(shell gofmt -s -l $(GO_SRC) | tee /dev/stderr) || echo "[WARN] Fix formatting issues with 'make fmt'"
@ -105,6 +146,28 @@ lint:
check: check-fmt lint
###########################
########## Tests ##########
###########################
e2e: build-docker-dind
@echo "Running e2e tests in k3d:$(K3D_IMAGE_TAG)"
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_SKIP="$(E2E_SKIP)" E2E_RUNNER_START_TIMEOUT=$(E2E_RUNNER_START_TIMEOUT) tests/dind.sh "${K3D_IMAGE_TAG}-dind"
ci-tests: fmt check e2e
##########################
########## Misc ##########
##########################
drone:
@echo "Running drone pipeline locally with branch=main and event=push"
drone exec --trusted --branch main --event push
#########################################
########## Setup & Preparation ##########
#########################################
# Check for required executables
HAS_GOX := $(shell command -v gox 2> /dev/null)
HAS_GOLANGCI := $(shell command -v golangci-lint)
@ -128,6 +191,10 @@ endif
endif
endif
# In the CI system, we need...
# - golangci-lint for linting (lint)
# - gox for cross-compilation (build-cross)
# - kubectl for E2E-tests (e2e)
ci-setup:
@echo "Installing Go tools..."
curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b ${GOPATH}/bin v$(PKG_GOLANGCI_LINT_VERSION)
@ -136,6 +203,4 @@ ci-setup:
@echo "Installing kubectl..."
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
ci-tests: fmt check e2e
mv ./kubectl /usr/local/bin/kubectl

@ -1,11 +1,14 @@
# [![k3d](docs/static/img/k3d_logo_black_blue.svg)](https://k3d.io/)
[![Build Status](https://travis-ci.com/rancher/k3d.svg?branch=master)](https://travis-ci.com/rancher/k3d)
[![Go Report Card](https://goreportcard.com/badge/github.com/rancher/k3d)](https://goreportcard.com/report/github.com/rancher/k3d)
[![License](https://img.shields.io/github/license/rancher/k3d)](./LICENSE.md)
![Downloads](https://img.shields.io/github/downloads/rancher/k3d/total.svg)
[![Build Status](https://img.shields.io/drone/build/rancher/k3d/main?logo=drone&server=https%3A%2F%2Fdrone-publish.rancher.io&style=flat-square)](https://drone-publish.rancher.io/rancher/k3d)
[![License](https://img.shields.io/github/license/rancher/k3d?style=flat-square)](./LICENSE.md)
![Downloads](https://img.shields.io/github/downloads/rancher/k3d/total.svg?style=flat-square)
**Please Note:** `master` is now v3.0.0 and the code for v1.x can be found in the `master-v1` branch!
[![Go Module](https://img.shields.io/badge/Go%20Module-github.com%2Francher%2Fk3d%2Fv3-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/rancher/k3d/v3)
[![Go version](https://img.shields.io/github/go-mod/go-version/rancher/k3d?logo=go&logoColor=white&style=flat-square)](./go.mod)
[![Go Report Card](https://goreportcard.com/badge/github.com/rancher/k3d?style=flat-square)](https://goreportcard.com/report/github.com/rancher/k3d)
**Please Note:** `main` is now v3.0.0 and the code for v1.x can be found in the `main-v1` branch!
## [k3s in docker](https://k3d.io)
@ -13,7 +16,7 @@ k3s is the lightweight Kubernetes distribution by Rancher: [rancher/k3s](https:/
k3d creates containerized k3s clusters. This means, that you can spin up a multi-node k3s cluster on a single machine using docker.
[![asciicast](https://asciinema.org/a/330413.svg)](https://asciinema.org/a/330413)
[![asciicast](https://asciinema.org/a/347570.svg)](https://asciinema.org/a/347570)
## Learning
@ -40,11 +43,11 @@ k3d creates containerized k3s clusters. This means, that you can spin up a multi
You have several options there:
- use the install script to grab the latest release:
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/master/install.sh | bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/master/install.sh | bash`
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- use the install script to grab a specific release (via `TAG` environment variable):
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/master/install.sh | TAG=v3.0.0-beta.0 bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/master/install.sh | TAG=v3.0.0-beta.0 bash`
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
- use [Homebrew](https://brew.sh): `brew install k3d` (Homebrew is available for MacOS and Linux)
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
@ -56,7 +59,7 @@ or...
## Build
1. Clone this repo, e.g. via `go get -u github.com/rancher/k3d`
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v3@main`
2. Inside the repo run
- 'make install-tools' to make sure required go packages are installed
3. Inside the repo run one of the following commands
@ -70,10 +73,10 @@ Check out what you can do via `k3d help` or check the docs @ [k3d.io](https://k3
Example Workflow: Create a new cluster and use it with `kubectl`
1. `k3d create cluster CLUSTER_NAME` to create a new single-node cluster (= 1 container running k3s)
2. `k3d get kubeconfig CLUSTER_NAME --switch` to update your default kubeconfig and switch the current-context to the new one
1. `k3d cluster create CLUSTER_NAME` to create a new single-node cluster (= 1 container running k3s + 1 loadbalancer container)
2. `k3d kubeconfig merge CLUSTER_NAME --switch-context` to update your default kubeconfig and switch the current-context to the new one
3. execute some commands like `kubectl get pods --all-namespaces`
4. `k3d delete cluster CLUSTER_NAME` to delete the default cluster
4. `k3d cluster delete CLUSTER_NAME` to delete the default cluster
## Connect
@ -84,3 +87,7 @@ Example Workflow: Create a new cluster and use it with `kubectl`
## History
This repository is based on [@zeerorg](https://github.com/zeerorg/)'s [zeerorg/k3s-in-docker](https://github.com/zeerorg/k3s-in-docker), reimplemented in Go by [@iwilltry42](https://github.com/iwilltry42/) in [iwilltry42/k3d](https://github.com/iwilltry42/k3d), which got adopted by Rancher in[rancher/k3d](https://github.com/rancher/k3d).
## Related Projects
- [k3x](https://github.com/inercia/k3x): a graphics interface (for Linux) to k3d.

@ -19,22 +19,21 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package get
package cluster
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdGet returns a new cobra command
func NewCmdGet() *cobra.Command {
// NewCmdCluster returns a new cobra command
func NewCmdCluster() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "get",
Short: "Get a resource.",
Long: `Get a resource.`,
Use: "cluster",
Short: "Manage cluster(s)",
Long: `Manage cluster(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
log.Errorln("Couldn't get help text")
@ -44,9 +43,13 @@ func NewCmdGet() *cobra.Command {
}
// add subcommands
cmd.AddCommand(NewCmdGetCluster())
cmd.AddCommand(NewCmdGetNode())
cmd.AddCommand(NewCmdGetKubeconfig())
cmd.AddCommand(NewCmdClusterCreate())
cmd.AddCommand(NewCmdClusterStart())
cmd.AddCommand(NewCmdClusterStop())
cmd.AddCommand(NewCmdClusterDelete())
cmd.AddCommand(NewCmdClusterList())
// add flags
// done
return cmd

@ -1,4 +1,4 @@
/*Package create ...
/*
Copyright © 2020 The k3d Author(s)
@ -20,7 +20,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package create
package cluster
import (
"fmt"
@ -30,54 +30,55 @@ import (
"github.com/spf13/cobra"
cliutil "github.com/rancher/k3d/cmd/util"
"github.com/rancher/k3d/pkg/cluster"
k3dCluster "github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/version"
cliutil "github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
k3dCluster "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/version"
log "github.com/sirupsen/logrus"
)
const createClusterDescription = `
const clusterCreateDescription = `
Create a new k3s cluster with containerized nodes (k3s in docker).
Every cluster will consist of at least 2 containers:
- 1 master node container (k3s)
- 1 loadbalancer container as the entrypoint to the cluster (nginx)
Every cluster will consist of one or more containers:
- 1 (or more) server node container (k3s)
- (optionally) 1 loadbalancer container as the entrypoint to the cluster (nginx)
- (optionally) 1 (or more) agent node containers (k3s)
`
// NewCmdCreateCluster returns a new cobra command
func NewCmdCreateCluster() *cobra.Command {
// NewCmdClusterCreate returns a new cobra command
func NewCmdClusterCreate() *cobra.Command {
createClusterOpts := &k3d.CreateClusterOpts{}
var updateKubeconfig bool
createClusterOpts := &k3d.ClusterCreateOpts{}
var updateDefaultKubeconfig, updateCurrentContext bool
// create new command
cmd := &cobra.Command{
Use: "cluster NAME",
Short: "Create a new k3s cluster in docker",
Long: createClusterDescription,
Use: "create NAME",
Short: "Create a new cluster",
Long: clusterCreateDescription,
Args: cobra.RangeArgs(0, 1), // exactly one cluster name can be set (default: k3d.DefaultClusterName)
Run: func(cmd *cobra.Command, args []string) {
// parse args and flags
cluster := parseCreateClusterCmd(cmd, args, createClusterOpts)
// check if a cluster with that name exists already
if _, err := k3dCluster.GetCluster(cluster, runtimes.SelectedRuntime); err == nil {
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, cluster); err == nil {
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", cluster.Name)
}
// create cluster
if updateKubeconfig {
log.Debugln("'--update-kubeconfig set: enabling wait-for-master")
cluster.CreateClusterOpts.WaitForMaster = true
if updateDefaultKubeconfig || updateCurrentContext {
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-server")
cluster.CreateClusterOpts.WaitForServer = true
}
if err := k3dCluster.CreateCluster(cmd.Context(), cluster, runtimes.SelectedRuntime); err != nil {
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
// rollback if creation failed
log.Errorln(err)
log.Errorln("Failed to create cluster >>> Rolling Back")
if err := k3dCluster.DeleteCluster(cluster, runtimes.SelectedRuntime); err != nil {
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
log.Errorln(err)
log.Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
}
@ -85,53 +86,53 @@ func NewCmdCreateCluster() *cobra.Command {
}
log.Infof("Cluster '%s' created successfully!", cluster.Name)
if updateKubeconfig {
if updateDefaultKubeconfig || updateCurrentContext {
log.Debugf("Updating default kubeconfig with a new context for cluster %s", cluster.Name)
if _, err := k3dCluster.GetAndWriteKubeConfig(runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: false}); err != nil {
log.Fatalln(err)
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
log.Warningln(err)
}
}
// print information on how to use the cluster with kubectl
log.Infoln("You can now use it like this:")
if updateKubeconfig {
if updateDefaultKubeconfig && !updateCurrentContext {
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name))
} else {
} else if !updateCurrentContext {
if runtime.GOOS == "windows" {
log.Debugf("GOOS is %s", runtime.GOOS)
fmt.Printf("$env:KUBECONFIG=(%s get kubeconfig %s)\n", os.Args[0], cluster.Name)
fmt.Printf("$env:KUBECONFIG=(%s kubeconfig get %s)\n", os.Args[0], cluster.Name)
} else {
fmt.Printf("export KUBECONFIG=$(%s get kubeconfig %s)\n", os.Args[0], cluster.Name)
fmt.Printf("export KUBECONFIG=$(%s kubeconfig get %s)\n", os.Args[0], cluster.Name)
}
fmt.Println("kubectl cluster-info")
}
fmt.Println("kubectl cluster-info")
},
}
/*********
* Flags *
*********/
cmd.Flags().StringP("api-port", "a", k3d.DefaultAPIPort, "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `--api-port [HOST:]HOSTPORT`)\n - Example: `k3d create -m 3 -a 0.0.0.0:6550`")
cmd.Flags().IntP("masters", "m", 1, "Specify how many masters you want to create")
cmd.Flags().IntP("workers", "w", 0, "Specify how many workers you want to create")
cmd.Flags().String("api-port", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`")
cmd.Flags().IntP("servers", "s", 1, "Specify how many servers you want to create")
cmd.Flags().IntP("agents", "a", 0, "Specify how many agents you want to create")
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
cmd.Flags().String("network", "", "Join an existing network")
cmd.Flags().String("secret", "", "Specify a cluster secret. By default, we generate one.")
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `--volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d create -w 2 -v /my/path@worker[0,1] -v /tmp/test:/tmp/other@master[0]`")
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@worker[0] -p 8081@worker[1]`")
cmd.Flags().BoolVar(&createClusterOpts.WaitForMaster, "wait", false, "Wait for the master(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`")
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
cmd.Flags().BoolVar(&createClusterOpts.WaitForServer, "wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
cmd.Flags().BoolVar(&updateKubeconfig, "update-kubeconfig", false, "Directly update the default kubeconfig with the new cluster's context")
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the master nodes")
cmd.Flags().BoolVar(&updateDefaultKubeconfig, "update-default-kubeconfig", true, "Directly update the default kubeconfig with the new cluster's context")
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (implies --update-default-kubeconfig)")
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
/* Image Importing */
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
/* Multi Master Configuration */
/* Multi Server Configuration */
// multi-master - datastore
// TODO: implement multi-master setups with external data store
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi master clusters)")
// multi-server - datastore
// TODO: implement multi-server setups with external data store
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi server clusters)")
/*
cmd.Flags().String("datastore-network", "", "Specify container network where we can find the datastore-endpoint (add a connection)")
@ -142,8 +143,8 @@ func NewCmdCreateCluster() *cobra.Command {
*/
/* k3s */
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on master nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on worker nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")
/* Subcommands */
@ -152,7 +153,7 @@ func NewCmdCreateCluster() *cobra.Command {
}
// parseCreateClusterCmd parses the command input into variables required to create a cluster
func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts *k3d.CreateClusterOpts) *k3d.Cluster {
func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts *k3d.ClusterCreateOpts) *k3d.Cluster {
/********************************
* Parse and validate arguments *
@ -180,14 +181,14 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
image = version.GetK3sVersion(true)
}
// --masters
masterCount, err := cmd.Flags().GetInt("masters")
// --servers
serverCount, err := cmd.Flags().GetInt("servers")
if err != nil {
log.Fatalln(err)
}
// --workers
workerCount, err := cmd.Flags().GetInt("workers")
// --agents
agentCount, err := cmd.Flags().GetInt("agents")
if err != nil {
log.Fatalln(err)
}
@ -202,12 +203,12 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
network.Name = networkName
network.External = true
}
if networkName == "host" && (masterCount+workerCount) > 1 {
if networkName == "host" && (serverCount+agentCount) > 1 {
log.Fatalln("Can only run a single node in hostnetwork mode")
}
// --secret
secret, err := cmd.Flags().GetString("secret")
// --token
token, err := cmd.Flags().GetString("token")
if err != nil {
log.Fatalln(err)
}
@ -234,6 +235,11 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
if exposeAPI.HostIP == "" {
exposeAPI.HostIP = k3d.DefaultAPIHost
}
if networkName == "host" {
// in hostNetwork mode, we're not going to map a hostport. Here it should always use 6443.
// Note that hostNetwork mode is super inflexible and since we don't change the backend port (on the container), it will only be one hostmode cluster allowed.
exposeAPI.Port = k3d.DefaultAPIPort
}
// --volume
volumeFlags, err := cmd.Flags().GetStringArray("volume")
@ -252,7 +258,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
// validate the specified volume mount and return it in SRC:DEST format
volume, err = cliutil.ValidateVolumeMount(volume)
volume, err = cliutil.ValidateVolumeMount(runtimes.SelectedRuntime, volume)
if err != nil {
log.Fatalln(err)
}
@ -309,7 +315,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
cluster := &k3d.Cluster{
Name: clustername,
Network: network,
Secret: secret,
Token: token,
CreateClusterOpts: createClusterOpts,
ExposeAPI: exposeAPI,
}
@ -317,31 +323,31 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
// generate list of nodes
cluster.Nodes = []*k3d.Node{}
// MasterLoadBalancer
// ServerLoadBalancer
if !createClusterOpts.DisableLoadBalancer {
cluster.MasterLoadBalancer = &k3d.Node{
cluster.ServerLoadBalancer = &k3d.Node{
Role: k3d.LoadBalancerRole,
}
}
/****************
* Master Nodes *
* Server Nodes *
****************/
for i := 0; i < masterCount; i++ {
for i := 0; i < serverCount; i++ {
node := k3d.Node{
Role: k3d.MasterRole,
Role: k3d.ServerRole,
Image: image,
Args: createClusterOpts.K3sServerArgs,
MasterOpts: k3d.MasterOpts{},
ServerOpts: k3d.ServerOpts{},
}
// TODO: by default, we don't expose an API port: should we change that?
// -> if we want to change that, simply add the exposeAPI struct here
// first master node will be init node if we have more than one master specified but no external datastore
if i == 0 && masterCount > 1 {
node.MasterOpts.IsInit = true
// first server node will be init node if we have more than one server specified but no external datastore
if i == 0 && serverCount > 1 {
node.ServerOpts.IsInit = true
cluster.InitNode = &node
}
@ -350,12 +356,12 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
/****************
* Worker Nodes *
* Agent Nodes *
****************/
for i := 0; i < workerCount; i++ {
for i := 0; i < agentCount; i++ {
node := k3d.Node{
Role: k3d.WorkerRole,
Role: k3d.AgentRole,
Image: image,
Args: createClusterOpts.K3sAgentArgs,
}
@ -375,11 +381,17 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
// append ports
nodeCount := serverCount + agentCount
nodeList := cluster.Nodes
if !createClusterOpts.DisableLoadBalancer {
nodeCount++
nodeList = append(nodeList, cluster.ServerLoadBalancer)
}
for portmap, filters := range portFilterMap {
if len(filters) == 0 && (masterCount+workerCount) > 1 {
log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node.", portmap)
if len(filters) == 0 && (nodeCount) > 1 {
log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node (including the loadbalancer, if there is any).", portmap)
}
nodes, err := cliutil.FilterNodes(append(cluster.Nodes, cluster.MasterLoadBalancer), filters)
nodes, err := cliutil.FilterNodes(nodeList, filters)
if err != nil {
log.Fatalln(err)
}

@ -19,43 +19,61 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package delete
package cluster
import (
"github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"fmt"
"os"
"path"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3dutil "github.com/rancher/k3d/v3/pkg/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdDeleteCluster returns a new cobra command
func NewCmdDeleteCluster() *cobra.Command {
// NewCmdClusterDelete returns a new cobra command
func NewCmdClusterDelete() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "cluster (NAME | --all)",
Short: "Delete a cluster.",
Long: `Delete a cluster.`,
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 only if --all is set
Use: "delete [NAME [NAME ...] | --all]",
Aliases: []string{"del", "rm"},
Short: "Delete cluster(s).",
Long: `Delete cluster(s).`,
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 = default cluster name
ValidArgsFunction: util.ValidArgsAvailableClusters,
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("delete cluster called")
clusters := parseDeleteClusterCmd(cmd, args)
if len(clusters) == 0 {
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := cluster.DeleteCluster(c, runtimes.SelectedRuntime); err != nil {
if err := cluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
log.Fatalln(err)
}
log.Infoln("Removing cluster details from default kubeconfig")
if err := cluster.RemoveClusterFromDefaultKubeConfig(c); err != nil {
log.Infoln("Removing cluster details from default kubeconfig...")
if err := cluster.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
log.Warnln("Failed to remove cluster details from default kubeconfig")
log.Warnln(err)
}
log.Infoln("Removing standalone kubeconfig file (if there is one)...")
configDir, err := k3dutil.GetConfigDirOrCreate()
if err != nil {
log.Warnf("Failed to delete kubeconfig file: %+v", err)
} else {
kubeconfigfile := path.Join(configDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
if err := os.Remove(kubeconfigfile); err != nil {
if !os.IsNotExist(err) {
log.Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
}
}
}
log.Infof("Successfully deleted cluster %s!", c.Name)
}
@ -82,19 +100,20 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
clusters, err = cluster.GetClusters(runtimes.SelectedRuntime)
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
return clusters
}
if len(args) < 1 {
log.Fatalln("Expecting at least one cluster name if `--all` is not set")
clusternames := []string{k3d.DefaultClusterName}
if len(args) != 0 {
clusternames = args
}
for _, name := range args {
cluster, err := cluster.GetCluster(&k3d.Cluster{Name: name}, runtimes.SelectedRuntime)
for _, name := range clusternames {
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
if err != nil {
log.Fatalln(err)
}

@ -0,0 +1,129 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cluster
import (
"context"
"fmt"
"os"
"strings"
"github.com/rancher/k3d/v3/cmd/util"
k3cluster "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
"github.com/liggitt/tabwriter"
)
// TODO : deal with --all flag to manage differentiate started cluster and stopped cluster like `docker ps` and `docker ps -a`
type clusterFlags struct {
noHeader bool
token bool
}
// NewCmdClusterList returns a new cobra command
func NewCmdClusterList() *cobra.Command {
clusterFlags := clusterFlags{}
// create new command
cmd := &cobra.Command{
Use: "list [NAME [NAME...]]",
Aliases: []string{"ls", "get"},
Short: "List cluster(s)",
Long: `List cluster(s).`,
Run: func(cmd *cobra.Command, args []string) {
clusters := buildClusterList(cmd.Context(), args)
PrintClusters(clusters, clusterFlags)
},
ValidArgsFunction: util.ValidArgsAvailableClusters,
}
// add flags
cmd.Flags().BoolVar(&clusterFlags.noHeader, "no-headers", false, "Disable headers")
cmd.Flags().BoolVar(&clusterFlags.token, "token", false, "Print k3s cluster token")
// add subcommands
// done
return cmd
}
func buildClusterList(ctx context.Context, args []string) []*k3d.Cluster {
var clusters []*k3d.Cluster
var err error
if len(args) == 0 {
// cluster name not specified : get all clusters
clusters, err = k3cluster.ClusterList(ctx, runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
} else {
for _, clusterName := range args {
// cluster name specified : get specific cluster
retrievedCluster, err := k3cluster.ClusterGet(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
if err != nil {
log.Fatalln(err)
}
clusters = append(clusters, retrievedCluster)
}
}
return clusters
}
// PrintPrintClusters : display list of cluster
func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
tabwriter := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', tabwriter.RememberWidths)
defer tabwriter.Flush()
if !flags.noHeader {
headers := []string{"NAME", "SERVERS", "AGENTS", "LOADBALANCER"} // TODO: getCluster: add status column
if flags.token {
headers = append(headers, "TOKEN")
}
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
if err != nil {
log.Fatalln("Failed to print headers")
}
}
k3cluster.SortClusters(clusters)
for _, cluster := range clusters {
serverCount := cluster.ServerCount()
agentCount := cluster.AgentCount()
hasLB := cluster.HasLoadBalancer()
if flags.token {
fmt.Fprintf(tabwriter, "%s\t%d\t%d\t%t\t%s\n", cluster.Name, serverCount, agentCount, hasLB, cluster.Token)
} else {
fmt.Fprintf(tabwriter, "%s\t%d\t%d\t%t\n", cluster.Name, serverCount, agentCount, hasLB)
}
}
}

@ -19,45 +19,51 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package start
package cluster
import (
"github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
"time"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v3/pkg/types"
"github.com/spf13/cobra"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// NewCmdStartCluster returns a new cobra command
func NewCmdStartCluster() *cobra.Command {
// NewCmdClusterStart returns a new cobra command
func NewCmdClusterStart() *cobra.Command {
startClusterOpts := types.ClusterStartOpts{}
// create new command
cmd := &cobra.Command{
Use: "cluster (NAME [NAME...] | --all)",
Short: "Start an existing k3d cluster",
Long: `Start an existing k3d cluster`,
Use: "start [NAME [NAME...] | --all]",
Long: `Start existing k3d cluster(s)`,
Short: "Start existing k3d cluster(s)",
ValidArgsFunction: util.ValidArgsAvailableClusters,
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("start cluster called")
clusters := parseStartClusterCmd(cmd, args)
if len(clusters) == 0 {
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := cluster.StartCluster(c, runtimes.SelectedRuntime); err != nil {
if err := cluster.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
log.Fatalln(err)
}
}
}
log.Debugln("...Finished")
},
}
// add flags
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
cmd.Flags().BoolVar(&startClusterOpts.WaitForServer, "wait", false, "Wait for the server(s) (and loadbalancer) to be ready before returning.")
cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
// add subcommands
@ -73,19 +79,20 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
clusters, err = cluster.GetClusters(runtimes.SelectedRuntime)
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
return clusters
}
if len(args) < 1 {
log.Fatalln("Expecting at least one cluster name if `--all` is not set")
clusternames := []string{k3d.DefaultClusterName}
if len(args) != 0 {
clusternames = args
}
for _, name := range args {
cluster, err := cluster.GetCluster(&k3d.Cluster{Name: name}, runtimes.SelectedRuntime)
for _, name := range clusternames {
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
if err != nil {
log.Fatalln(err)
}

@ -19,40 +19,39 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package stop
package cluster
import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// NewCmdStopCluster returns a new cobra command
func NewCmdStopCluster() *cobra.Command {
// NewCmdClusterStop returns a new cobra command
func NewCmdClusterStop() *cobra.Command {
// create new command
cmd := &cobra.Command{
Use: "cluster (NAME [NAME...] | --all)",
Short: "Stop an existing k3d cluster",
Long: `Stop an existing k3d cluster.`,
Use: "stop [NAME [NAME...] | --all]",
Short: "Stop existing k3d cluster(s)",
Long: `Stop existing k3d cluster(s).`,
ValidArgsFunction: util.ValidArgsAvailableClusters,
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("stop cluster called")
clusters := parseStopClusterCmd(cmd, args)
if len(clusters) == 0 {
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := cluster.StopCluster(c, runtimes.SelectedRuntime); err != nil {
if err := cluster.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
log.Fatalln(err)
}
}
}
log.Debugln("...Finished")
},
}
@ -73,19 +72,20 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
clusters, err = cluster.GetClusters(runtimes.SelectedRuntime)
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
return clusters
}
if len(args) < 1 {
log.Fatalln("Expecting at least one cluster name if `--all` is not set")
clusternames := []string{k3d.DefaultClusterName}
if len(args) != 0 {
clusternames = args
}
for _, name := range args {
cluster, err := cluster.GetCluster(&k3d.Cluster{Name: name}, runtimes.SelectedRuntime)
for _, name := range clusternames {
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
if err != nil {
log.Fatalln(err)
}

@ -1,128 +0,0 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package get
import (
"fmt"
"os"
"sort"
"strings"
"github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
"github.com/liggitt/tabwriter"
)
// NewCmdGetCluster returns a new cobra command
func NewCmdGetCluster() *cobra.Command {
// create new command
cmd := &cobra.Command{
Use: "cluster [NAME [NAME...]]",
Aliases: []string{"clusters"},
Short: "Get cluster",
Long: `Get cluster.`,
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("get cluster called")
clusters, headersOff := parseGetClusterCmd(cmd, args)
var existingClusters []*k3d.Cluster
if clusters == nil { // Option a) no cluster name specified -> get all clusters
found, err := cluster.GetClusters(runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
existingClusters = append(existingClusters, found...)
} else { // Option b) cluster name specified -> get specific cluster
found, err := cluster.GetCluster(clusters, runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
existingClusters = append(existingClusters, found)
}
// print existing clusters
printClusters(existingClusters, headersOff)
},
}
// add flags
cmd.Flags().Bool("no-headers", false, "Disable headers")
// add subcommands
// done
return cmd
}
func parseGetClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, bool) {
// --no-headers
headersOff, err := cmd.Flags().GetBool("no-headers")
if err != nil {
log.Fatalln(err)
}
// Args = cluster name
if len(args) == 0 {
return nil, headersOff
}
cluster := &k3d.Cluster{Name: args[0]}
return cluster, headersOff
}
func printClusters(clusters []*k3d.Cluster, headersOff bool) {
tabwriter := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', tabwriter.RememberWidths)
defer tabwriter.Flush()
if !headersOff {
headers := []string{"NAME", "MASTERS", "WORKERS"} // TODO: getCluster: add status column
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
if err != nil {
log.Fatalln("Failed to print headers")
}
}
sort.Slice(clusters, func(i, j int) bool {
return clusters[i].Name < clusters[j].Name
})
for _, cluster := range clusters {
masterCount := 0
workerCount := 0
for _, node := range cluster.Nodes {
if node.Role == k3d.MasterRole {
masterCount++
} else if node.Role == k3d.WorkerRole {
workerCount++
}
}
fmt.Fprintf(tabwriter, "%s\t%d\t%d\n", cluster.Name, masterCount, workerCount)
}
}

@ -19,21 +19,21 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package load
package image
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdLoad returns a new cobra command
func NewCmdLoad() *cobra.Command {
// NewCmdImage returns a new cobra command
func NewCmdImage() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "load",
Short: "Load a resource [image].",
Long: `Load a resource [image].`,
Use: "image",
Short: "Handle container images.",
Long: `Handle container images.`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
log.Errorln("Couldn't get help text")
@ -43,7 +43,7 @@ func NewCmdLoad() *cobra.Command {
}
// add subcommands
cmd.AddCommand(NewCmdLoadImage())
cmd.AddCommand(NewCmdImageImport())
// add flags

@ -19,38 +19,42 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package load
package image
import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/pkg/runtimes"
"github.com/rancher/k3d/pkg/tools"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v3/pkg/tools"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// NewCmdLoadImage returns a new cobra command
func NewCmdLoadImage() *cobra.Command {
// NewCmdImageImport returns a new cobra command
func NewCmdImageImport() *cobra.Command {
loadImageOpts := k3d.ImageImportOpts{}
// create new command
cmd := &cobra.Command{
Use: "image [IMAGE [IMAGE...]]",
Short: "Load an image from docker into a k3d cluster.",
Long: `Load an image from docker into a k3d cluster.`,
Args: cobra.MinimumNArgs(1),
Use: "import [IMAGE | ARCHIVE [IMAGE | ARCHIVE...]]",
Short: "Import image(s) from docker into k3d cluster(s).",
Long: `Import image(s) from docker into k3d cluster(s).`,
Aliases: []string{"images"},
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
images, clusters, keepTarball := parseLoadImageCmd(cmd, args)
log.Debugf("Load images [%+v] from runtime [%s] into clusters [%+v]", runtimes.SelectedRuntime, images, clusters)
images, clusters := parseLoadImageCmd(cmd, args)
log.Debugf("Load images [%+v] from runtime [%s] into clusters [%+v]", images, runtimes.SelectedRuntime, clusters)
for _, cluster := range clusters {
log.Debugf("Loading images into '%s'", cluster.Name)
if err := tools.LoadImagesIntoCluster(runtimes.SelectedRuntime, images, &cluster, keepTarball); err != nil {
log.Infof("Loading images into '%s'", cluster.Name)
if err := tools.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
log.Errorf("Failed to load images into cluster '%s'", cluster.Name)
log.Errorln(err)
}
}
log.Debugln("Finished loading images into clusters")
log.Info("DONE")
},
}
@ -58,12 +62,12 @@ func NewCmdLoadImage() *cobra.Command {
* Flags *
*********/
cmd.Flags().StringArrayP("cluster", "c", []string{k3d.DefaultClusterName}, "Select clusters to load the image to.")
cmd.Flags().BoolP("keep-tarball", "k", false, "Do not delete the tarball which contains the saved images from the shared volume")
cmd.Flags().StringP("tar", "t", "", "Import image from local tarball")
if err := cmd.MarkFlagFilename("tar", ".tar"); err != nil {
log.Fatalln("Failed to mark --tar flag as filename")
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
log.Fatalln("Failed to register flag completion for '--cluster'", err)
}
cmd.Flags().BoolVarP(&loadImageOpts.KeepTar, "keep-tarball", "k", false, "Do not delete the tarball containing the saved images from the shared volume")
/* Subcommands */
// done
@ -71,21 +75,7 @@ func NewCmdLoadImage() *cobra.Command {
}
// parseLoadImageCmd parses the command input into variables required to create a cluster
func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Cluster, bool) {
// --tar
localTarball, err := cmd.Flags().GetString("tar")
if err != nil {
log.Fatalln(err)
}
if cmd.Flags().Changed("tar") { // TODO: loadImage: implement import from local tarball
log.Fatalf("--tar flag not supported yet '%s'", localTarball)
}
// --keep-tarball
keepTarball, err := cmd.Flags().GetBool("keep-tarball")
if err != nil {
log.Fatalln(err)
}
func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Cluster) {
// --cluster
clusterNames, err := cmd.Flags().GetStringArray("cluster")
@ -103,5 +93,5 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
log.Fatalln("No images specified!")
}
return images, clusters, keepTarball
return images, clusters
}

@ -19,21 +19,21 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package create
package kubeconfig
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdCreate returns a new cobra command
func NewCmdCreate() *cobra.Command {
// NewCmdKubeconfig returns a new cobra command
func NewCmdKubeconfig() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "create",
Short: "Create a resource [cluster, node].",
Long: `Create a resource [cluster, node].`,
Use: "kubeconfig",
Short: "Manage kubeconfig(s)",
Long: `Manage kubeconfig(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
log.Errorln("Couldn't get help text")
@ -43,8 +43,8 @@ func NewCmdCreate() *cobra.Command {
}
// add subcommands
cmd.AddCommand(NewCmdCreateCluster())
cmd.AddCommand(NewCmdCreateNode())
cmd.AddCommand(NewCmdKubeconfigGet())
cmd.AddCommand(NewCmdKubeconfigMerge())
// add flags

@ -19,38 +19,42 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package get
package kubeconfig
import (
"fmt"
"os"
"github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
log "github.com/sirupsen/logrus"
)
type getKubeconfigFlags struct {
all bool
output string
all bool
}
// NewCmdGetKubeconfig returns a new cobra command
func NewCmdGetKubeconfig() *cobra.Command {
// NewCmdKubeconfigGet returns a new cobra command
func NewCmdKubeconfigGet() *cobra.Command {
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{}
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{
UpdateExisting: true,
UpdateCurrentContext: true,
OverwriteExisting: true,
}
getKubeconfigFlags := getKubeconfigFlags{}
// create new command
cmd := &cobra.Command{
Use: "kubeconfig [CLUSTER [CLUSTER [...]] | --all]", // TODO: getKubeconfig: allow more than one cluster name or even --all
Short: "Get kubeconfig",
Long: `Get kubeconfig.`,
Use: "get [CLUSTER [CLUSTER [...]] | --all]",
Short: "Get kubeconfig from cluster(s).",
Long: `Get kubeconfig from cluster(s).`,
ValidArgsFunction: util.ValidArgsAvailableClusters,
Args: func(cmd *cobra.Command, args []string) error {
if (len(args) < 1 && !getKubeconfigFlags.all) || (len(args) > 0 && getKubeconfigFlags.all) {
return fmt.Errorf("Need to specify one or more cluster names *or* set `--all` flag")
@ -63,13 +67,17 @@ func NewCmdGetKubeconfig() *cobra.Command {
// generate list of clusters
if getKubeconfigFlags.all {
clusters, err = cluster.GetClusters(runtimes.SelectedRuntime)
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
} else {
for _, clusterName := range args {
clusters = append(clusters, &k3d.Cluster{Name: clusterName})
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
if err != nil {
log.Fatalln(err)
}
clusters = append(clusters, retrievedCluster)
}
}
@ -77,17 +85,13 @@ func NewCmdGetKubeconfig() *cobra.Command {
errorGettingKubeconfig := false
for _, c := range clusters {
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
if getKubeconfigFlags.output, err = cluster.GetAndWriteKubeConfig(runtimes.SelectedRuntime, c, getKubeconfigFlags.output, &writeKubeConfigOptions); err != nil {
fmt.Println("---") // YAML document separator
if _, err := cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
log.Errorln(err)
errorGettingKubeconfig = true
}
}
// only print kubeconfig file path if output is not stdout ("-")
if getKubeconfigFlags.output != "-" {
fmt.Println(getKubeconfigFlags.output)
}
// return with non-zero exit code, if there was an error for one of the clusters
if errorGettingKubeconfig {
os.Exit(1)
@ -96,13 +100,6 @@ func NewCmdGetKubeconfig() *cobra.Command {
}
// add flags
cmd.Flags().StringVarP(&getKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
if err := cmd.MarkFlagFilename("output"); err != nil {
log.Fatalln("Failed to mark flag --output as filename")
}
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing KubeConfig")
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "switch", "s", false, "Switch to new context")
cmd.Flags().BoolVar(&writeKubeConfigOptions.OverwriteExisting, "overwrite", false, "[Careful!] Overwrite existing file, ignoring its contents")
cmd.Flags().BoolVarP(&getKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
// done

@ -0,0 +1,140 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package kubeconfig
import (
"fmt"
"os"
"path"
"strings"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3dutil "github.com/rancher/k3d/v3/pkg/util"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
log "github.com/sirupsen/logrus"
)
type mergeKubeconfigFlags struct {
all bool
output string
targetDefault bool
}
// NewCmdKubeconfigMerge returns a new cobra command
func NewCmdKubeconfigMerge() *cobra.Command {
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{}
mergeKubeconfigFlags := mergeKubeconfigFlags{}
// create new command
cmd := &cobra.Command{
Use: "merge [CLUSTER [CLUSTER [...]] | --all]",
Aliases: []string{"write"},
Long: `Merge/Write kubeconfig(s) from cluster(s) into existing kubeconfig/file.`,
Short: "Merge/Write kubeconfig(s) from cluster(s) into existing kubeconfig/file.",
ValidArgsFunction: util.ValidArgsAvailableClusters,
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
var clusters []*k3d.Cluster
var err error
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
log.Fatalln("Cannot use both '--output' and '--merge-default-kubeconfig' at the same time")
}
// generate list of clusters
if mergeKubeconfigFlags.all {
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
} else {
clusternames := []string{k3d.DefaultClusterName}
if len(args) != 0 {
clusternames = args
}
for _, clusterName := range clusternames {
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
if err != nil {
log.Fatalln(err)
}
clusters = append(clusters, retrievedCluster)
}
}
// get kubeconfigs from all clusters
errorGettingKubeconfig := false
var outputs []string
outputDir, err := k3dutil.GetConfigDirOrCreate()
if err != nil {
log.Errorln(err)
log.Fatalln("Failed to save kubeconfig to local directory")
}
for _, c := range clusters {
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
output := mergeKubeconfigFlags.output
if output == "" && !mergeKubeconfigFlags.targetDefault {
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
}
output, err = cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
if err != nil {
log.Errorln(err)
errorGettingKubeconfig = true
} else {
outputs = append(outputs, output)
}
}
// only print kubeconfig file path if output is not stdout ("-")
if mergeKubeconfigFlags.output != "-" {
fmt.Println(strings.Join(outputs, ":"))
}
// return with non-zero exit code, if there was an error for one of the clusters
if errorGettingKubeconfig {
os.Exit(1)
}
},
}
// add flags
cmd.Flags().StringVarP(&mergeKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
if err := cmd.MarkFlagFilename("output"); err != nil {
log.Fatalln("Failed to mark flag --output as filename")
}
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "merge-default-kubeconfig", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "switch-context", "s", true, "Switch to new context")
cmd.Flags().BoolVar(&writeKubeConfigOptions.OverwriteExisting, "overwrite", false, "[Careful!] Overwrite existing file, ignoring its contents")
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
// done
return cmd
}

@ -19,22 +19,21 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package delete
package node
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdDelete returns a new cobra command
func NewCmdDelete() *cobra.Command {
// NewCmdNode returns a new cobra command
func NewCmdNode() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "delete",
Short: "Delete a resource.",
Long: `Delete a resource.`,
Use: "node",
Short: "Manage node(s)",
Long: `Manage node(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
log.Errorln("Couldn't get help text")
@ -44,8 +43,13 @@ func NewCmdDelete() *cobra.Command {
}
// add subcommands
cmd.AddCommand(NewCmdDeleteCluster())
cmd.AddCommand(NewCmdDeleteNode())
cmd.AddCommand(NewCmdNodeCreate())
cmd.AddCommand(NewCmdNodeStart())
cmd.AddCommand(NewCmdNodeStop())
cmd.AddCommand(NewCmdNodeDelete())
cmd.AddCommand(NewCmdNodeList())
// add flags
// done
return cmd

@ -19,50 +19,58 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package create
package node
import (
"fmt"
"time"
"github.com/spf13/cobra"
k3dc "github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/version"
"github.com/rancher/k3d/v3/cmd/util"
k3dc "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/version"
log "github.com/sirupsen/logrus"
)
// NewCmdCreateNode returns a new cobra command
func NewCmdCreateNode() *cobra.Command {
// NewCmdNodeCreate returns a new cobra command
func NewCmdNodeCreate() *cobra.Command {
createNodeOpts := k3d.NodeCreateOpts{}
// create new command
cmd := &cobra.Command{
Use: "node NAME",
Use: "create NAME",
Short: "Create a new k3s node in docker",
Long: `Create a new containerized k3s node (k3s in docker).`,
Args: cobra.ExactArgs(1), // exactly one name accepted // TODO: if not specified, inherit from cluster that the node shall belong to, if that is specified
Run: func(cmd *cobra.Command, args []string) {
nodes, cluster := parseCreateNodeCmd(cmd, args)
for _, node := range nodes {
if err := k3dc.AddNodeToCluster(runtimes.SelectedRuntime, node, cluster); err != nil {
log.Errorf("Failed to add node '%s' to cluster '%s'", node.Name, cluster.Name)
log.Errorln(err)
}
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name)
log.Errorln(err)
}
},
}
// add flags
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
cmd.Flags().String("role", string(k3d.WorkerRole), "Specify node role [master, worker]")
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
log.Fatalln("Failed to register flag completion for '--role'", err)
}
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Select the cluster that the node shall connect to.")
if err := cmd.MarkFlagRequired("cluster"); err != nil {
log.Fatalln("Failed to mark required flag '--cluster'")
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
log.Fatalln("Failed to register flag completion for '--cluster'", err)
}
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)")
cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.")
cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
// done
return cmd
}
@ -78,7 +86,6 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl
}
// --role
// TODO: createNode: for --role=master, update the nginx config and add TLS-SAN and server connection, etc.
roleStr, err := cmd.Flags().GetString("role")
if err != nil {
log.Errorln("No node role specified")
@ -112,6 +119,9 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl
Name: fmt.Sprintf("%s-%s-%d", k3d.DefaultObjectNamePrefix, args[0], i),
Role: role,
Image: image,
Labels: map[string]string{
k3d.LabelRole: roleStr,
},
}
nodes = append(nodes, node)
}

@ -19,27 +19,28 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package delete
package node
import (
"github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdDeleteNode returns a new cobra command
func NewCmdDeleteNode() *cobra.Command {
// NewCmdNodeDelete returns a new cobra command
func NewCmdNodeDelete() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "node (NAME | --all)",
Short: "Delete a node.",
Long: `Delete a node.`,
Args: cobra.MinimumNArgs(1), // at least one node has to be specified
Use: "delete (NAME | --all)",
Short: "Delete node(s).",
Long: `Delete node(s).`,
Args: cobra.MinimumNArgs(1), // at least one node has to be specified
ValidArgsFunction: util.ValidArgsAvailableNodes,
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("delete node called")
nodes := parseDeleteNodeCmd(cmd, args)
@ -47,13 +48,11 @@ func NewCmdDeleteNode() *cobra.Command {
log.Infoln("No nodes found")
} else {
for _, node := range nodes {
if err := cluster.DeleteNode(runtimes.SelectedRuntime, node); err != nil {
if err := cluster.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
log.Fatalln(err)
}
}
}
log.Debugln("...Finished")
},
}
@ -75,7 +74,7 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
nodes, err = cluster.GetNodes(runtimes.SelectedRuntime)
nodes, err = cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
@ -87,7 +86,7 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
}
for _, name := range args {
node, err := cluster.GetNode(&k3d.Node{Name: name}, runtimes.SelectedRuntime)
node, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
if err != nil {
log.Fatalln(err)
}

@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package get
package node
import (
"fmt"
@ -28,39 +28,43 @@ import (
"strings"
"github.com/liggitt/tabwriter"
"github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
)
// NewCmdGetNode returns a new cobra command
func NewCmdGetNode() *cobra.Command {
// NewCmdNodeList returns a new cobra command
func NewCmdNodeList() *cobra.Command {
// create new command
cmd := &cobra.Command{
Use: "node NAME", // TODO: getNode: allow one or more names or --all flag
Short: "Get node",
Aliases: []string{"nodes"},
Long: `Get node.`,
Use: "list [NAME [NAME...]]",
Aliases: []string{"ls", "get"},
Short: "List node(s)",
Long: `List node(s).`,
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
ValidArgsFunction: util.ValidArgsAvailableNodes,
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("get node called")
node, headersOff := parseGetNodeCmd(cmd, args)
nodes, headersOff := parseGetNodeCmd(cmd, args)
var existingNodes []*k3d.Node
if node == nil { // Option a) no name specified -> get all nodes
found, err := cluster.GetNodes(runtimes.SelectedRuntime)
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
found, err := cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
existingNodes = append(existingNodes, found...)
} else { // Option b) cluster name specified -> get specific cluster
found, err := cluster.GetNode(node, runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
for _, node := range nodes {
found, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
if err != nil {
log.Fatalln(err)
}
existingNodes = append(existingNodes, found)
}
existingNodes = append(existingNodes, found)
}
// print existing clusters
printNodes(existingNodes, headersOff)
@ -76,7 +80,7 @@ func NewCmdGetNode() *cobra.Command {
return cmd
}
func parseGetNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, bool) {
func parseGetNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, bool) {
// --no-headers
headersOff, err := cmd.Flags().GetBool("no-headers")
if err != nil {
@ -88,9 +92,12 @@ func parseGetNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, bool) {
return nil, headersOff
}
node := &k3d.Node{Name: args[0]} // TODO: validate name first?
nodes := []*k3d.Node{}
for _, name := range args {
nodes = append(nodes, &k3d.Node{Name: name})
}
return node, headersOff
return nodes, headersOff
}
func printNodes(nodes []*k3d.Node, headersOff bool) {
@ -111,6 +118,6 @@ func printNodes(nodes []*k3d.Node, headersOff bool) {
})
for _, node := range nodes {
fmt.Fprintf(tabwriter, "%s\t%s\t%s\n", strings.TrimPrefix(node.Name, "/"), string(node.Role), node.Labels["k3d.cluster"])
fmt.Fprintf(tabwriter, "%s\t%s\t%s\n", strings.TrimPrefix(node.Name, "/"), string(node.Role), node.Labels[k3d.LabelClusterName])
}
}

@ -19,28 +19,29 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package start
package node
import (
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
)
// NewCmdStartNode returns a new cobra command
func NewCmdStartNode() *cobra.Command {
// NewCmdNodeStart returns a new cobra command
func NewCmdNodeStart() *cobra.Command {
// create new command
cmd := &cobra.Command{
Use: "node NAME", // TODO: startNode: allow one or more names or --all
Short: "Start an existing k3d node",
Long: `Start an existing k3d node.`,
Use: "start NAME", // TODO: startNode: allow one or more names or --all
Short: "Start an existing k3d node",
Long: `Start an existing k3d node.`,
ValidArgsFunction: util.ValidArgsAvailableNodes,
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("start node called")
node := parseStartNodeCmd(cmd, args)
if err := runtimes.SelectedRuntime.StartNode(node); err != nil {
if err := runtimes.SelectedRuntime.StartNode(cmd.Context(), node); err != nil {
log.Fatalln(err)
}
},
@ -52,7 +53,7 @@ func NewCmdStartNode() *cobra.Command {
// parseStartNodeCmd parses the command input into variables required to start a node
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
// node name // TODO: startNode: allow node filters, e.g. `k3d start nodes mycluster@worker` to start all worker nodes of cluster 'mycluster'
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
if len(args) == 0 || len(args[0]) == 0 {
log.Fatalln("No node name given")
}

@ -19,29 +19,30 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package stop
package node
import (
"github.com/rancher/k3d/pkg/runtimes"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/spf13/cobra"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// NewCmdStopNode returns a new cobra command
func NewCmdStopNode() *cobra.Command {
// NewCmdNodeStop returns a new cobra command
func NewCmdNodeStop() *cobra.Command {
// create new command
cmd := &cobra.Command{
Use: "node NAME", // TODO: stopNode: allow one or more names or --all",
Short: "Stop an existing k3d node",
Long: `Stop an existing k3d node.`,
Use: "stop NAME", // TODO: stopNode: allow one or more names or --all",
Short: "Stop an existing k3d node",
Long: `Stop an existing k3d node.`,
ValidArgsFunction: util.ValidArgsAvailableNodes,
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("stop node called")
node := parseStopNodeCmd(cmd, args)
if err := runtimes.SelectedRuntime.StopNode(node); err != nil {
if err := runtimes.SelectedRuntime.StopNode(cmd.Context(), node); err != nil {
log.Fatalln(err)
}
},
@ -53,7 +54,7 @@ func NewCmdStopNode() *cobra.Command {
// parseStopNodeCmd parses the command input into variables required to stop a node
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
// node name // TODO: allow node filters, e.g. `k3d stop nodes mycluster@worker` to stop all worker nodes of cluster 'mycluster'
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
if len(args) == 0 || len(args[0]) == 0 {
log.Fatalln("No node name given")
}

@ -30,14 +30,12 @@ import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/cmd/create"
"github.com/rancher/k3d/cmd/delete"
"github.com/rancher/k3d/cmd/get"
"github.com/rancher/k3d/cmd/load"
"github.com/rancher/k3d/cmd/start"
"github.com/rancher/k3d/cmd/stop"
"github.com/rancher/k3d/pkg/runtimes"
"github.com/rancher/k3d/version"
"github.com/rancher/k3d/v3/cmd/cluster"
"github.com/rancher/k3d/v3/cmd/image"
"github.com/rancher/k3d/v3/cmd/kubeconfig"
"github.com/rancher/k3d/v3/cmd/node"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v3/version"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/writer"
@ -47,7 +45,6 @@ import (
type RootFlags struct {
debugLogging bool
version bool
runtime string
}
var flags = RootFlags{}
@ -65,6 +62,10 @@ All Nodes of a k3d cluster are part of the same docker network.`,
Run: func(cmd *cobra.Command, args []string) {
if flags.version {
printVersion()
} else {
if err := cmd.Usage(); err != nil {
log.Fatalln(err)
}
}
},
}
@ -73,8 +74,7 @@ All Nodes of a k3d cluster are part of the same docker network.`,
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
log.Errorln(err)
os.Exit(1)
log.Fatalln(err)
}
}
@ -84,24 +84,21 @@ func init() {
// add persistent flags (present to all subcommands)
// rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.k3d/config.yaml)")
rootCmd.PersistentFlags().BoolVar(&flags.debugLogging, "verbose", false, "Enable verbose output (debug logging)")
rootCmd.PersistentFlags().StringVarP(&flags.runtime, "runtime", "r", "docker", "Choose a container runtime environment [docker, containerd]")
// add local flags
rootCmd.Flags().BoolVar(&flags.version, "version", false, "Show k3d and default k3s version")
// add subcommands
rootCmd.AddCommand(NewCmdCompletion())
rootCmd.AddCommand(create.NewCmdCreate())
rootCmd.AddCommand(delete.NewCmdDelete())
rootCmd.AddCommand(get.NewCmdGet())
rootCmd.AddCommand(stop.NewCmdStop())
rootCmd.AddCommand(start.NewCmdStart())
rootCmd.AddCommand(load.NewCmdLoad())
rootCmd.AddCommand(cluster.NewCmdCluster())
rootCmd.AddCommand(kubeconfig.NewCmdKubeconfig())
rootCmd.AddCommand(node.NewCmdNode())
rootCmd.AddCommand(image.NewCmdImage())
rootCmd.AddCommand(&cobra.Command{
Use: "version",
Short: "Print k3d version",
Long: "Print k3d version",
Short: "Show k3d and default k3s version",
Long: "Show k3d and default k3s version",
Run: func(cmd *cobra.Command, args []string) {
printVersion()
},
@ -147,7 +144,7 @@ func initLogging() {
}
func initRuntime() {
runtime, err := runtimes.GetRuntime(flags.runtime)
runtime, err := runtimes.GetRuntime("docker")
if err != nil {
log.Fatalln(err)
}
@ -163,7 +160,7 @@ func printVersion() {
// Completion
var completionFunctions = map[string]func(io.Writer) error{
"bash": rootCmd.GenBashCompletion,
"zsh": rootCmd.GenZshCompletion, // FIXME: zsh completion requires https://github.com/spf13/cobra/pull/899 due to square brackets in our help texts
"zsh": rootCmd.GenZshCompletion,
"psh": rootCmd.GenPowerShellCompletion,
"powershell": rootCmd.GenPowerShellCompletion,
}

@ -0,0 +1,97 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
import (
"context"
"strings"
k3dcluster "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// ValidArgsAvailableClusters is used for shell completion: proposes the list of existing clusters
func ValidArgsAvailableClusters(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
var clusters []*k3d.Cluster
clusters, err := k3dcluster.ClusterList(context.Background(), runtimes.SelectedRuntime)
if err != nil {
log.Errorln("Failed to get list of clusters for shell completion")
return nil, cobra.ShellCompDirectiveError
}
clusterLoop:
for _, cluster := range clusters {
for _, arg := range args {
if arg == cluster.Name { // only clusters, that are not in the args yet
continue clusterLoop
}
}
if strings.HasPrefix(cluster.Name, toComplete) {
completions = append(completions, cluster.Name)
}
}
return completions, cobra.ShellCompDirectiveDefault
}
// ValidArgsAvailableNodes is used for shell completion: proposes the list of existing nodes
func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
var nodes []*k3d.Node
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
if err != nil {
log.Errorln("Failed to get list of nodes for shell completion")
return nil, cobra.ShellCompDirectiveError
}
nodeLoop:
for _, node := range nodes {
for _, arg := range args {
if arg == node.Name { // only clusters, that are not in the args yet
continue nodeLoop
}
}
if strings.HasPrefix(node.Name, toComplete) {
completions = append(completions, node.Name)
}
}
return completions, cobra.ShellCompDirectiveDefault
}
// ValidArgsNodeRoles is used for shell completion: proposes the list of possible node roles
func ValidArgsNodeRoles(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
roles := []string{string(k3d.ServerRole), string(k3d.AgentRole)}
for _, role := range roles {
if strings.HasPrefix(role, toComplete) {
completions = append(completions, role)
}
}
return completions, cobra.ShellCompDirectiveDefault
}

@ -28,13 +28,13 @@ import (
log "github.com/sirupsen/logrus"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
"regexp"
)
// Regexp pattern to match node filters
var filterRegexp = regexp.MustCompile(`^(?P<group>master|worker|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
var filterRegexp = regexp.MustCompile(`^(?P<group>server|agent|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
func SplitFiltersFromFlag(flag string) (string, []string, error) {
@ -67,21 +67,21 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) {
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
if len(filters) == 0 || len(filters[0]) == 0 {
log.Warnln("No filter specified")
log.Warnln("No node filter specified")
return nodes, nil
}
// map roles to subsets
masterNodes := []*k3d.Node{}
workerNodes := []*k3d.Node{}
var masterlb *k3d.Node
serverNodes := []*k3d.Node{}
agentNodes := []*k3d.Node{}
var serverlb *k3d.Node
for _, node := range nodes {
if node.Role == k3d.MasterRole {
masterNodes = append(masterNodes, node)
} else if node.Role == k3d.WorkerRole {
workerNodes = append(workerNodes, node)
if node.Role == k3d.ServerRole {
serverNodes = append(serverNodes, node)
} else if node.Role == k3d.AgentRole {
agentNodes = append(agentNodes, node)
} else if node.Role == k3d.LoadBalancerRole {
masterlb = node
serverlb = node
}
}
@ -110,12 +110,12 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
// Choose the group of nodes to operate on
groupNodes := []*k3d.Node{}
if submatches["group"] == string(k3d.MasterRole) {
groupNodes = masterNodes
} else if submatches["group"] == string(k3d.WorkerRole) {
groupNodes = workerNodes
if submatches["group"] == string(k3d.ServerRole) {
groupNodes = serverNodes
} else if submatches["group"] == string(k3d.AgentRole) {
groupNodes = agentNodes
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
filteredNodes = append(filteredNodes, masterlb)
filteredNodes = append(filteredNodes, serverlb)
return filteredNodes, nil // early exit if filtered group is the loadbalancer
}

@ -27,7 +27,7 @@ import (
"strconv"
"strings"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -54,14 +54,27 @@ func ParseAPIPort(portString string) (k3d.ExposeAPI, error) {
}
// Verify 'port' is an integer and within port ranges
if exposeAPI.Port == "" || exposeAPI.Port == "random" {
log.Debugf("API-Port Mapping didn't specify hostPort, choosing one randomly...")
freePort, err := GetFreePort()
if err != nil || freePort == 0 {
log.Warnf("Failed to get random free port:\n%+v", err)
log.Warnf("Falling back to default port %s (may be blocked though)...", k3d.DefaultAPIPort)
exposeAPI.Port = k3d.DefaultAPIPort
} else {
exposeAPI.Port = strconv.Itoa(freePort)
log.Debugf("Got free port for API: '%d'", freePort)
}
}
p, err := strconv.Atoi(exposeAPI.Port)
if err != nil {
log.Errorln("Failed to parse port mapping")
return exposeAPI, err
}
if p < 0 || p > 65535 {
log.Errorln("Failed to parse API Port specification")
return exposeAPI, fmt.Errorf("port value '%d' out of range", p)
return exposeAPI, fmt.Errorf("Port value '%d' out of range", p)
}
return exposeAPI, nil
@ -72,3 +85,21 @@ func ParseAPIPort(portString string) (k3d.ExposeAPI, error) {
func ValidatePortMap(portmap string) (string, error) {
return portmap, nil // TODO: ValidatePortMap: add validation of port mapping
}
// GetFreePort tries to fetch an open port from the OS-Kernel
func GetFreePort() (int, error) {
tcpAddress, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
log.Errorln("Failed to resolve address")
return 0, err
}
tcpListener, err := net.ListenTCP("tcp", tcpAddress)
if err != nil {
log.Errorln("Failed to create TCP Listener")
return 0, err
}
defer tcpListener.Close()
return tcpListener.Addr().(*net.TCPAddr).Port, nil
}

@ -25,19 +25,26 @@ import (
"fmt"
"os"
"strings"
"github.com/rancher/k3d/v3/pkg/runtimes"
log "github.com/sirupsen/logrus"
)
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
// - SRC: source directory/file -> tests: must exist
// - DEST: source directory/file -> tests: must be absolute path
func ValidateVolumeMount(volumeMount string) (string, error) {
func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string, error) {
src := ""
dest := ""
// validate 'SRC[:DEST]' substring
split := strings.Split(volumeMount, ":")
if len(split) < 1 || len(split) > 2 {
return "", fmt.Errorf("Invalid volume mount '%s': only one ':' allowed", volumeMount)
if len(split) < 1 {
return "", fmt.Errorf("No volume/path specified")
}
if len(split) > 3 {
return "", fmt.Errorf("Invalid volume mount '%s': maximal 2 ':' allowed", volumeMount)
}
// we only have SRC specified -> DEST = SRC
@ -51,8 +58,15 @@ func ValidateVolumeMount(volumeMount string) (string, error) {
// verify that the source exists
if src != "" {
if _, err := os.Stat(src); err != nil {
return "", fmt.Errorf("Failed to stat file/dir that you're trying to mount: '%s' in '%s'", src, volumeMount)
// a) named volume
isNamedVolume := true
if err := verifyNamedVolume(runtime, src); err != nil {
isNamedVolume = false
}
if !isNamedVolume {
if _, err := os.Stat(src); err != nil {
log.Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
}
}
}
@ -61,5 +75,17 @@ func ValidateVolumeMount(volumeMount string) (string, error) {
return "", fmt.Errorf("Volume mount destination doesn't appear to be an absolute path: '%s' in '%s'", dest, volumeMount)
}
return fmt.Sprintf("%s:%s", src, dest), nil
return volumeMount, nil
}
// verifyNamedVolume checks whether a named volume exists in the runtime
func verifyNamedVolume(runtime runtimes.Runtime, volumeName string) error {
volumeName, err := runtime.GetVolume(volumeName)
if err != nil {
return err
}
if volumeName == "" {
return fmt.Errorf("Failed to find named volume '%s'", volumeName)
}
return nil
}

@ -3,11 +3,11 @@
## Issues with BTRFS
- As [@jaredallard](https://github.com/jaredallard) [pointed out](https://github.com/rancher/k3d/pull/48), people running `k3d` on a system with **btrfs**, may need to mount `/dev/mapper` into the nodes for the setup to work.
- This will do: `k3d create cluster CLUSTER_NAME -v /dev/mapper:/dev/mapper`
- This will do: `k3d cluster create CLUSTER_NAME -v /dev/mapper:/dev/mapper`
## Issues with ZFS
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-master setups (e.g. `k3d create cluster multimaster --masters 3`) fails, because the initializing master node (server flag `--cluster-init`) errors out with the following log:
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-server setups (e.g. `k3d cluster create multiserver --servers 3`) fails, because the initializing server node (server flag `--cluster-init`) errors out with the following log:
```bash
starting kubernetes: preparing server: start cluster and https: raft_init(): io: create I/O capabilities probe file: posix_allocate: operation not supported on socket
```
@ -23,4 +23,12 @@
- Possible [fix/workaround by @zer0def](https://github.com/rancher/k3d/issues/133#issuecomment-549065666):
- use a docker storage driver which cleans up properly (e.g. overlay2)
- clean up or expand docker root filesystem
- change the kubelet's eviction thresholds upon cluster creation: `k3d create cluster --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
- change the kubelet's eviction thresholds upon cluster creation: `k3d cluster create --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
## Restarting a multi-server cluster or the initializing server node fails
- What you do: You create a cluster with more than one server node and later, you either stop `server-0` or stop/start the whole cluster
- What fails: After the restart, you cannot connect to the cluster anymore and `kubectl` will give you a lot of errors
- What causes this issue: it's a [known issue with dqlite in `k3s`](https://github.com/rancher/k3s/issues/1391) which doesn't allow the initializing server node to go down
- What's the solution: Hopefully, this will be solved by the planned [replacement of dqlite with embedded etcd in k3s](https://github.com/rancher/k3s/pull/1770)
- Related issues: [#262](https://github.com/rancher/k3d/issues/262)

@ -10,7 +10,7 @@
- --command -> planned: keep
- --shell -> planned: keep (or second arg)
- auto, bash, zsh
- create -> `k3d create cluster CLUSTERNAME`
- create -> `k3d cluster create CLUSTERNAME`
- --name -> dropped, implemented via arg
- --volume -> implemented
- --port -> implemented
@ -30,7 +30,7 @@
- --registry-volume -> TBD
- --registries-file -> TBD
- --enable-registry-cache -> TBD
- (add-node) -> `k3d create node NODENAME`
- (add-node) -> `k3d node create NODENAME`
- --role -> implemented
- --name -> dropped, implemented as arg
- --count -> implemented as `--replicas`
@ -41,23 +41,23 @@
- --k3s -> TBD
- --k3s-secret -> TBD
- --k3s-token -> TBD
- delete -> `k3d delete cluster CLUSTERNAME`
- delete -> `k3d cluster delete CLUSTERNAME`
- --name -> dropped, implemented as arg
- --all -> implemented
- --prune -> TBD
- --keep-registry-volume -> TBD
- stop -> `k3d stop cluster CLUSTERNAME`
- stop -> `k3d cluster stop CLUSTERNAME`
- --name -> dropped, implemented as arg
- --all -> implemented
- start -> `k3d start cluster CLUSTERNAME`
- start -> `k3d cluster start CLUSTERNAME`
- --name -> dropped, implemented as arg
- --all -> implemented
- list -> dropped, implemented as `k3d get clusters`
- get-kubeconfig -> `k3d get kubeconfig CLUSTERNAME`
- get-kubeconfig -> `k3d kubeconfig get|merge CLUSTERNAME`
- --name -> dropped, implemented as arg
- --all -> implemented
- --overwrite -> implemented
- import-images -> `k3d load image [--cluster CLUSTERNAME] [--keep] IMAGES`
- import-images -> `k3d image import [--cluster CLUSTERNAME] [--keep] IMAGES`
- --name -> implemented as `--cluster`
- --no-remove -> implemented as `--keep`
- --no-remove -> implemented as `--keep-tarball`
```

@ -2,12 +2,14 @@
![k3d](static/img/k3d_logo_black_blue.svg)
**This page is targeting k3d v3.0.0 and newer!**
k3d is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s) (Rancher Lab's minimal Kubernetes distribution) in docker.
k3d makes it very easy to create single- and multi-node [k3s](https://github.com/rancher/k3s) clusters in docker, e.g. for local development on Kubernetes.
??? Tip "View a quick demo"
<asciinema-player src="/static/asciicast/20200515_k3d.01.cast" cols=200 rows=32></asciinema-player>
<asciinema-player src="/static/asciicast/20200715_k3d.01.cast" cols=200 rows=32></asciinema-player>
## Learning
@ -31,30 +33,31 @@ k3d makes it very easy to create single- and multi-node [k3s](https://github.com
You have several options there:
- use the install script to grab the latest release:
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/master/install.sh | bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/master/install.sh | bash`
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- use the install script to grab a specific release (via `TAG` environment variable):
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/master/install.sh | TAG=v3.0.0-beta.0 bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/master/install.sh | TAG=v3.0.0-beta.0 bash`
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
- use [Homebrew](https://brew.sh): `#!bash brew install k3d` (Homebrew is available for MacOS and Linux)
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
- install via [AUR](https://aur.archlinux.org/) package [rancher-k3d-bin](https://aur.archlinux.org/packages/rancher-k3d-bin/): `yay -S rancher-k3d-bin`
- grab a release from the [release tab](https://github.com/rancher/k3d/releases) and install it yourself.
- install via go: `#!bash go install github.com/rancher/k3d` (**Note**: this will give you unreleased/bleeding-edge changes)
- use [arkade](https://github.com/alexellis/arkade): `arkade get k3d`
## Quick Start
Create a cluster named `mycluster` with just a single master node:
Create a cluster named `mycluster` with just a single server node:
```bash
k3d create cluster mycluster
k3d cluster create mycluster
```
Get the new cluster's connection details merged into your default kubeconfig (usually specified using the `KUBECONFIG` environment variable or the default path `#!bash $HOME/.kube/config`) and directly switch to the new context:
```bash
k3d get kubeconfig mycluster --switch
k3d kubeconfig merge mycluster --switch-context
```
Use the new cluster with [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), e.g.:
@ -62,3 +65,7 @@ Use the new cluster with [`kubectl`](https://kubernetes.io/docs/tasks/tools/inst
```bash
kubectl get nodes
```
## Related Projects
- [k3x](https://github.com/inercia/k3x): a graphics interface (for Linux) to k3d.

@ -1,12 +1,12 @@
# Defaults
* multiple master nodes
* by default, when `--master` > 1 and no `--datastore-x` option is set, the first master node (master-0) will be the initializing master node
* the initializing master node will have the `--cluster-init` flag appended
* all other master nodes will refer to the initializing master node via `--server https://<init-node>:6443`
* API-Ports
* by default, we don't expose any API-Port (no host port mapping)
* kubeconfig
* if no output is set explicitly (via the `--output` flag), we use the default loading rules to get the default kubeconfig:
* First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
* Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
- multiple server nodes
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
- the initializing server node will have the `--cluster-init` flag appended
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
- API-Ports
- by default, we don't expose any API-Port (no host port mapping)
- kubeconfig
- if `--[update|merge]-default-kubeconfig` is set, we use the default loading rules to get the default kubeconfig:
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)

@ -13,9 +13,9 @@ Existing networks won't be managed by k3d together with the cluster lifecycle.
### `host` network
When using the `--network` flag to connect to the host network (i.e. `k3d create cluster --network host`),
you won't be able to create more than **one master node**.
An edge case would be one master node (with agent disabled) and one worker node.
When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`),
you won't be able to create more than **one server node**.
An edge case would be one server node (with agent disabled) and one agent node.
### `bridge` network

@ -0,0 +1,215 @@
{"version":2,"width":213,"height":47,"timestamp":1594792376,"theme":{},"env":{"SHELL":"/bin/zsh","TERM":"xterm-256color"}}
[2.221,"o","\u001b[H\u001b[2J\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K"]
[2.881,"o","k"]
[3.002,"o","\u0008k3"]
[3.187,"o","d"]
[3.32,"o"," "]
[3.42,"o","v"]
[3.53,"o","e"]
[3.662,"o","r"]
[3.827,"o","s"]
[3.913,"o","i"]
[4.113,"o","o"]
[4.245,"o","n"]
[4.443,"o","\u001b[?2004l\r\r\n"]
[4.471,"o","k3d version v3.0.0\r\nk3s version v1.18.6-k3s1 (default)\r\n"]
[4.473,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
[5.719,"o","k"]
[5.829,"o","\u0008k3"]
[6.038,"o","d"]
[6.235,"o"," "]
[6.75,"o","c"]
[6.852,"o","l"]
[7.094,"o","u"]
[7.193,"o","s"]
[7.534,"o","t"]
[7.71,"o","e"]
[7.831,"o","r"]
[8.029,"o"," "]
[8.268,"o","l"]
[8.491,"o","i"]
[8.634,"o","s"]
[8.796,"o","t"]
[8.943,"o","\u001b[?2004l\r\r\n"]
[8.971,"o","NAME SERVERS AGENTS\r\n"]
[8.972,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
[9.756,"o","k"]
[9.854,"o","\u0008k3"]
[10.05,"o","d"]
[10.448,"o"," "]
[10.811,"o","c"]
[10.91,"o","l"]
[11.119,"o","u"]
[11.174,"o","s"]
[11.284,"o","t"]
[11.416,"o","e"]
[11.515,"o","r"]
[11.648,"o"," "]
[11.746,"o","c"]
[11.988,"o","r"]
[12.054,"o","e"]
[12.087,"o","a"]
[12.208,"o","t"]
[12.285,"o","e"]
[13.055,"o"," "]
[13.77,"o","d"]
[13.858,"o","e"]
[13.99,"o","m"]
[14.133,"o","o"]
[14.321,"o"," "]
[14.629,"o","-"]
[14.801,"o","-"]
[14.867,"o","s"]
[15.065,"o","e"]
[15.111,"o","r"]
[15.343,"o","v"]
[15.409,"o","e"]
[15.519,"o","r"]
[15.706,"o","s"]
[15.905,"o"," "]
[16.74,"o","3"]
[16.914,"o"," "]
[17.137,"o","-"]
[17.291,"o","-"]
[17.44,"o","a"]
[17.586,"o","g"]
[17.687,"o","e"]
[17.763,"o","n"]
[17.84,"o","t"]
[18.017,"o","s"]
[18.259,"o"," "]
[18.379,"o","3"]
[19.137,"o","\u001b[?2004l\r\r\n"]
[19.222,"o","\u001b[36mINFO\u001b[0m[0000] Created network 'k3d-demo' \r\n"]
[19.225,"o","\u001b[36mINFO\u001b[0m[0000] Created volume 'k3d-demo-images' \r\n"]
[19.225,"o","\u001b[36mINFO\u001b[0m[0000] Creating initializing server node \r\n\u001b[36mINFO\u001b[0m[0000] Creating node 'k3d-demo-server-0' \r\n"]
[30.266,"o","\u001b[36mINFO\u001b[0m[0011] Creating node 'k3d-demo-server-1' \r\n"]
[31.634,"o","\u001b[36mINFO\u001b[0m[0012] Creating node 'k3d-demo-server-2' \r\n"]
[32.125,"o","\u001b[36mINFO\u001b[0m[0012] Creating node 'k3d-demo-agent-0' \r\n"]
[32.519,"o","\u001b[36mINFO\u001b[0m[0013] Creating node 'k3d-demo-agent-1' \r\n"]
[33.096,"o","\u001b[36mINFO\u001b[0m[0013] Creating node 'k3d-demo-agent-2' \r\n"]
[33.577,"o","\u001b[36mINFO\u001b[0m[0014] Creating LoadBalancer 'k3d-demo-serverlb' \r\n"]
[38.248,"o","\u001b[36mINFO\u001b[0m[0019] Cluster 'demo' created successfully! \r\n"]
[38.33,"o","\u001b[36mINFO\u001b[0m[0019] You can now use it like this: \r\nkubectl cluster-info\r\n"]
[38.335,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
[38.336,"o","\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
[40.212,"o","k"]
[40.465,"o","\u0008ku"]
[40.63,"o","b"]
[40.773,"o","e"]
[40.916,"o","c"]
[41.169,"o","t"]
[41.302,"o","l"]
[41.53,"o"," "]
[41.686,"o","c"]
[41.774,"o","o"]
[41.928,"o","n"]
[42.093,"o","f"]
[42.213,"o","i"]
[42.401,"o","g"]
[42.565,"o"," "]
[42.686,"o","v"]
[42.807,"o","i"]
[42.895,"o","e"]
[43.082,"o","w"]
[43.27,"o","\u001b[?2004l\r\r\n"]
[43.304,"o","apiVersion: v1\r\nclusters:\r\n- cluster:\r\n certificate-authority-data: DATA+OMITTED\r\n server: https://0.0.0.0:38365\r\n name: k3d-demo\r\ncontexts:\r\n- context:\r\n cluster: k3d-demo\r\n user: admin@k3d-demo\r\n name: k3d-demo\r\ncurrent-context: k3d-demo\r\nkind: Config\r\npreferences: {}\r\nusers:\r\n- name: admin@k3d-demo\r\n user:\r\n password: 1c22f8175521452403719784fa0b124f\r\n username: admin\r\n"]
[43.305,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
[43.305,"o","\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
[45.754,"o","k"]
[45.843,"o","\u0008k3"]
[45.987,"o","d"]
[46.075,"o"," "]
[46.24,"o","c"]
[46.36,"o","l"]
[46.569,"o","u"]
[46.658,"o","s"]
[46.768,"o","t"]
[46.91,"o","e"]
[46.977,"o","r"]
[47.042,"o"," "]
[47.252,"o","l"]
[47.472,"o","i"]
[47.56,"o","s"]
[47.681,"o","t"]
[47.846,"o","\u001b[?2004l\r\r\n"]
[47.88,"o","NAME SERVERS AGENTS\r\ndemo 3 3\r\n"]
[47.881,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
[47.882,"o","\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
[48.517,"o","k"]
[48.616,"o","\u0008k3"]
[48.77,"o","d"]
[48.968,"o"," "]
[50.133,"o","n"]
[50.276,"o","o"]
[50.33,"o","d"]
[50.386,"o","e"]
[50.462,"o"," "]
[50.605,"o","l"]
[50.792,"o","i"]
[50.881,"o","s"]
[51.046,"o","t"]
[51.255,"o","\u001b[?2004l\r\r\n"]
[51.266,"o","NAME ROLE CLUSTER\r\nk3d-demo-agent-0 agent "]
[51.266,"o","demo\r\nk3d-demo-agent-1 agent demo\r\nk3d-demo-agent-2 agent demo\r\nk3d-demo-server-0 server demo\r\nk3d-demo-server-1 server demo\r\nk3d-demo-server-2 server demo\r\nk3d-demo-serverlb loadbalancer demo\r\n"]
[51.267,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
[51.267,"o","\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
[58.525,"o","k"]
[58.822,"o","\u0008ku"]
[58.999,"o","b"]
[59.109,"o","e"]
[59.217,"o","c"]
[59.48,"o","t"]
[59.582,"o","l"]
[59.791,"o"," "]
[59.876,"o","g"]
[59.978,"o","e"]
[60.109,"o","t"]
[60.285,"o"," "]
[60.495,"o","n"]
[60.626,"o","o"]
[60.725,"o","d"]
[60.791,"o","e"]
[61,"o","s"]
[61.176,"o","\u001b[?2004l\r\r\n"]
[61.334,"o","NAME STATUS ROLES AGE VERSION\r\nk3d-demo-server-2 Ready master 22s v1.18.4+k3s1\r\nk3d-demo-agent-2 Ready \u003cnone\u003e 26s v1.18.4+k3s1\r\nk3d-demo-agent-0 Ready \u003cnone\u003e "]
[61.334,"o","27s v1.18.4+k3s1\r\nk3d-demo-agent-1 Ready \u003cnone\u003e 27s v1.18.4+k3s1\r\nk3d-demo-server-1 Ready master 19s v1.18.4+k3s1\r\nk3d-demo-server-0 Ready master 32s v1.18.4+k3s1\r\n"]
[61.336,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
[63.901,"o","k"]
[64.08,"o","\u0008k3"]
[64.297,"o","d"]
[64.464,"o"," "]
[64.674,"o","c"]
[64.84,"o","l"]
[65.126,"o","u"]
[65.245,"o","s"]
[65.411,"o","t"]
[65.553,"o","e"]
[65.675,"o","r"]
[65.918,"o"," "]
[66.18,"o","d"]
[66.302,"o","e"]
[66.499,"o","l"]
[66.687,"o","e"]
[66.874,"o","t"]
[66.984,"o","e"]
[67.794,"o"," "]
[67.919,"o","d"]
[67.985,"o","e"]
[68.084,"o","m"]
[68.257,"o","o"]
[68.623,"o","\u001b[?2004l\r\r\n"]
[68.637,"o","\u001b[36mINFO\u001b[0m[0000] Deleting cluster 'demo' \r\n"]
[69.822,"o","\u001b[36mINFO\u001b[0m[0001] Deleted k3d-demo-serverlb \r\n"]
[70.309,"o","\u001b[36mINFO\u001b[0m[0001] Deleted k3d-demo-agent-2 \r\n"]
[71.017,"o","\u001b[36mINFO\u001b[0m[0002] Deleted k3d-demo-agent-1 \r\n"]
[71.645,"o","\u001b[36mINFO\u001b[0m[0003] Deleted k3d-demo-agent-0 \r\n"]
[72.274,"o","\u001b[36mINFO\u001b[0m[0003] Deleted k3d-demo-server-2 \r\n"]
[73.264,"o","\u001b[36mINFO\u001b[0m[0004] Deleted k3d-demo-server-1 \r\n"]
[74.155,"o","\u001b[36mINFO\u001b[0m[0005] Deleted k3d-demo-server-0 \r\n"]
[74.155,"o","\u001b[36mINFO\u001b[0m[0005] Deleting cluster network '2324ab59566f5bca41df87cbf7b65e14539cfe9feea1087f143eec45ac91652f' \r\n"]
[74.35,"o","\u001b[36mINFO\u001b[0m[0005] Deleting image volume 'k3d-demo-images' \r\n"]
[74.36,"o","\u001b[36mINFO\u001b[0m[0005] Removing cluster details from default kubeconfig... \r\n"]
[74.362,"o","\u001b[36mINFO\u001b[0m[0005] Removing standalone kubeconfig file (if there is one)... \r\n\u001b[36mINFO\u001b[0m[0005] Successfully deleted cluster demo! \r\n"]
[74.363,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
[75.795,"o","\u001b[?2004l\r\r\n"]

@ -0,0 +1,33 @@
.md-header-nav__button.md-logo img, .md-header-nav__button.md-logo svg {
width: 3rem;
height: 3rem;
}
.md-header-nav__button.md-logo {
margin: 0;
padding: 0;
}
.md-header {
height: 3rem;
}
.md-header-nav {
align-items: center;
}
.md-ellipsis {
display: inline;
}
.md-header-nav__topic {
position: relative;
}
[data-md-color-primary=black] .md-tabs {
background-color: #0DCEFF;
}
.md-tabs {
color: black;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 8.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 8.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 711 B

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 7.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 657 B

After

Width:  |  Height:  |  Size: 920 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

@ -0,0 +1,91 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 24.1.2, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 165.5865631 62.7499199" style="enable-background:new 0 0 165.5865631 62.7499199;" xml:space="preserve">
<g>
<path d="M1.5732723,59.1749115c0.9831573,0.1782036,1.2510706,0.4461174,1.4298817,0.8037415
c0.1788108,0.4473305,0.1788108,0.8049507,0.0897086,1.2522812v0.0884972
c-0.1788111,0.7152443-0.7152441,1.0728645-1.5195903,1.0728645c-0.6261414-0.1782036-1.0728657-0.4473305-1.3407791-0.8934479
c-0.2685195-0.3576202-0.2685195-0.8049507-0.178811-1.2522812C0.3215957,59.5325317,0.8580286,59.1749115,1.5732723,59.1749115z"
/>
<path d="M0.1427847,40.3142967c0-0.3576241,0.0897086-0.6255379,0.2685195-0.8946609L30.9813118,0.537039
C31.2492237,0.1794171,31.6965542,0,32.14328,0c0.3576202,0,0.6255341,0.0897086,0.8940544,0.2691256
c0.8043442,0.6255354,0.8934479,1.2510705,0.2679138,2.0560229L3.0928626,40.8501205v14.92799
c0,0.8037376-0.7152441,1.5189819-1.5195903,1.5189819s-1.4304876-0.7152443-1.4304876-1.5189819
C0.1427847,55.7781105,0.1427847,40.3142967,0.1427847,40.3142967z"/>
<path d="M0.1427847,27.3526211V1.6984011c0-0.8037401,0.6261414-1.518984,1.4304876-1.518984
s1.5195903,0.7152438,1.5195903,1.518984v25.6542206c0,0.8049526-0.7152441,1.5201969-1.5195903,1.5201969
S0.1427847,28.1575737,0.1427847,27.3526211z"/>
<path d="M10.9587259,18.2350807c0,0.8049526-0.7152433,1.5201969-1.5195894,1.5201969s-1.4304876-0.7152443-1.4304876-1.5201969
V9.6545792c0-0.9831572,0.4473305-1.4304876,1.4304876-1.4304876c0.8940544,0,1.5195894,0.5358267,1.5195894,1.4304876V18.2350807z
"/>
<path d="M8.0086489,60.9618073V44.4251289c0-0.4461174,0.0897083-0.7140312,0.2685194-0.8934479l5.2740135-6.7038956
c0.2679138-0.3576241,0.7146378-0.5358276,1.1619682-0.5358276c0.5358267,0,0.8934488,0.1782036,1.1619682,0.5358276
l17.8768539,23.4187813c0.5364342,0.8049507,0.4473305,1.4304848-0.2679138,2.0560226
c-0.2679138,0.1794167-0.5364342,0.2691231-0.8940544,0.2691231c-0.4467239,0-0.8934498-0.1794167-1.1619682-0.5370369
L14.71315,40.1348763l-3.7544241,4.7375832v16.0893478c0,0.8049545-0.7152433,1.4304886-1.5195894,1.4304886
C8.4559793,62.3922958,8.0086489,61.9449654,8.0086489,60.9618073z"/>
<path d="M44.4788094,2.3251486L22.0425816,30.749424l22.5253315,29.3189354
c0.2685204,0.2679138,0.3576241,0.6255379,0.2685204,0.9831581c-0.0897102,0.4473305-0.2685204,0.8049507-0.6261406,1.0728645
c-0.1788139,0.1782074-0.4467239,0.2679138-0.8934517,0.2679138c-0.4473305,0-0.8049507-0.1782036-1.1619682-0.5358276
L19.0034008,31.6428719c-0.4467239-0.6255341-0.4467239-1.1613617,0-1.7868958L42.2439766,0.537039
C42.5118904,0.1794171,42.8695107,0,43.3168411,0c0.4467278,0,0.7146378,0.0897086,0.9831581,0.2691256
C45.1043472,0.8946609,45.1043472,1.6099048,44.4788094,2.3251486z"/>
<path d="M76.4738541,2.1457314c0.0897064,0.3576219,0.0897064,0.7152438-0.0891037,1.072866
c-0.2679138,0.3576219-0.536438,0.6255352-0.9831619,0.7152438c-7.86586,1.9663143-13.1398773,8.0446739-13.1398773,16.2675533
c0,0.8049526-0.7152405,1.5201969-1.5195885,1.5201969c-0.9831581,0-1.4304886-0.5358276-1.4304886-1.5201969
c0-9.9212799,6.3468819-16.6251755,15.3747139-19.1285286C75.6695023,1.0728657,76.2059402,1.0728657,76.4738541,2.1457314z"/>
<path d="M60.9209328,41.297451c0.804348,0,1.5195923,0.7152443,1.5195923,1.5189857
c0,9.6545792,8.670208,16.6263885,17.8774605,16.9840088c0.4467239,0.0884972,0.8043442,0.2679138,1.072258,0.5358276
c0.2685165,0.2679138,0.3576202,0.6255341,0.2685165,1.0728645c0,0.8934517-0.4473267,1.3407822-1.4304886,1.3407822
c-11.0838547-0.7152443-20.7378273-8.4920082-20.7378273-19.9334831
C59.4904442,42.0126953,60.1165886,41.297451,60.9209328,41.297451z"/>
<path d="M85.1446686,7.151226c5.2740097,0,10.548027,1.3407793,13.676918,5.900156
c1.0716476,1.5189838,1.6971893,2.8597631,1.9663086,4.022337c0.1782074,1.25107,0.2679138,1.876606,0.2679138,1.9663143
c0,5.0054951-1.8778152,9.9212818-6.5256882,12.2464294c4.6478729,2.9494724,6.4359818,6.6141872,6.4359818,12.155508
c0,4.3799591-3.3070908,8.5817146-7.0615158,10.548027c-2.4136429,1.2510719-5.2740173,1.876606-8.7599182,1.876606h-0.2685242
c-8.8490219,0-17.2513123-3.1276741-17.2513123-13.0501671c0-0.8037415,0.7152405-1.5189857,1.5195847-1.5189857
c0.8940582,0,1.4298859,0.6255379,1.4298859,1.5189857c0,8.402298,7.7767563,10.1006966,14.4806519,10.1006966h0.0897141
c5.5419235,0,10.9953537-2.1445198,12.6040497-7.8652573c0.1782074-0.9831581,0.2679138-1.520195,0.2679138-1.6099052
c0-1.2510681-0.1794205-2.50214-0.4473343-3.753212c-1.1613617-3.6659279-3.664711-6.6153984-7.7767563-6.8833122
c-0.6255341-0.1782036-1.3407822-0.8049507-1.3407822-1.4304867c0-0.8934498,0.5364304-1.3407803,1.3407822-1.5189838
c0.4473267-0.0897102,0.8940506-0.1794186,1.3407745-0.3576221c0.4473343-0.0897083,0.8940582-0.2679138,1.3407822-0.5370388
c4.1120453-1.6086941,5.6322403-5.631031,5.6322403-9.8315735c0-1.6099052-0.8049545-3.2185965-1.7881088-4.4696674
c-2.1457291-3.0391798-5.8995514-4.5593767-11.1735611-4.5593767c-6.4359818,0-14.7485733,2.4136448-14.7485733,10.1006975
c0,0.9843693-0.4473267,1.5201969-1.4304886,1.5201969c-0.8043442,0-1.5195923-0.7152443-1.5195923-1.5201969
C67.4460144,10.8159418,77.1890945,7.151226,85.1446686,7.151226z"/>
<path d="M80.5858994,32.9836502c1.7874985,0.4473305,3.3070908,0.8946609,4.6478729,1.4304886
c2.4136429,1.2510719,4.2011414,2.5033531,5.8104401,4.7375832c0.6255417,1.0728645,1.2510757,2.3239365,1.8772125,3.8441315
c0.0897141,0.3576202,0.0897141,0.7152443,0,1.0728645c-0.1781998,0.4461174-0.4473267,0.7140312-0.8049469,0.8037415
c-0.7146378,0-1.608696,0-1.9663162-0.8037415c-2.1451263-6.0783577-5.7207413-7.151226-11.3517685-8.4920044
c-0.8049545-0.0897102-1.1625748-0.6255341-1.1625748-1.4304886v-5.0952015c0-0.8037415,0.3576202-1.3407803,1.0728683-1.4304886
c1.5195847-0.4461174,2.8603668-0.8037395,4.0223312-1.1613617c4.1120529-1.25107,5.8995514-3.5762196,7.4191437-7.3294315
c0.2685165-0.6267471,0.7152405-0.8946609,1.4304886-0.8946609c0.8934479,0.1794167,1.1625748,0.4473305,1.3407745,0.8049526
c0.0897141,0.4473305,0.0897141,0.8049526,0,1.1613617c-2.6815567,6.4359837-6.0783539,8.3138008-12.3355255,9.922493V32.9836502z"
/>
<path d="M78.440773,1.2522829c0.1788101-0.7152438,0.7146378-1.0728657,1.5195847-1.0728657
c0.9831619,0.1782048,1.2510757,0.4473304,1.4298859,0.8049524C81.5690536,1.4304876,81.5690536,1.7881097,81.47995,2.23544
v0.0897086c-0.2685165,0.7140317-0.8049545,1.0716536-1.5195923,1.0716536
C78.5298767,3.0391803,77.9934387,2.3251486,78.440773,1.2522829z"/>
<path d="M120.5359039,61.1412277V9.5648708c0-0.8049526,0.6255341-1.5189838,1.4304886-1.5189838h15.4638138
c8.2228699,0,14.8382721,4.9157858,18.2350769,12.2452164c1.3407745,3.3070946,2.0560303,6.9718094,2.0560303,10.9953594
c0,10.0994854-4.5593872,19.6643581-14.66008,22.7932453c-1.0728607,0-1.6983948-0.3576202-1.8766022-0.9831581
c-0.0897064-0.4473305-0.0897064-0.8049507,0.0897064-1.1625748c0.1782074-0.3576202,0.4461212-0.6255341,0.8934479-0.7152405
c8.9381256-2.4136467,12.6028442-11.4414787,12.6028442-19.932272c0-8.671423-3.3070984-15.7329397-11.4414825-19.2182388
c-1.9663086-0.7152443-3.9326324-1.0728664-5.898941-1.0728664h-13.9448318v50.1458702
c0,0.8037376-0.7152481,1.5189819-1.5189819,1.5189819C121.161438,62.6602097,120.5359039,61.9449654,120.5359039,61.1412277z"/>
<path d="M129.7431488,3.3968022c-0.8049469,0-1.5201874-0.6255352-1.5201874-1.4292755
c0-0.8049524,0.7152405-1.5201962,1.5201874-1.5201962h9.6533661c10.2801208,0,19.3964539,6.9718089,23.3302917,16.2687664
c1.8766022,4.4696674,2.8597565,9.5636597,2.8597565,15.1959019c0,16.7148857-8.6702118,30.6597137-26.1900482,30.6597137
h-9.5636597c-0.8049469,0-1.5201874-0.6267471-1.5201874-1.4304848v-44.782753c0-0.9831562,0.5358276-1.520196,1.5201874-1.520196
c0.9831543,0,1.4292755,0.5370398,1.4292755,1.520196v43.2625542h8.1343842
c16.6263885,0,23.2405853-13.1398773,23.2405853-27.7090302c0-11.6208954-4.7375946-23.2405777-16.3572693-27.2629128
c-2.6815643-0.8049524-5.0054932-1.2522829-6.883316-1.2522829C139.3965149,3.3968031,129.7431488,3.3968031,129.7431488,3.3968022
z"/>
<path d="M136.2676239,54.1694183c-0.2679138-0.3576241-0.2679138-0.8049545-0.0884857-1.252285
c0.0884857-0.5358276,0.3576202-0.8934479,0.8037415-0.9831581c0.4473267-0.0897064,0.8049469-0.0897064,1.2522736-0.0897064
c0.4461212,0.1794167,0.714035,0.4473305,0.8934479,0.8049507c0.1782074,0.4473305,0.1782074,0.8049545,0.0897064,1.2510719
v0.0897064c-0.2691193,0.7152443-0.8049469,1.0728683-1.5201874,1.0728683
C136.9828796,54.8834496,136.5367584,54.6155357,136.2676239,54.1694183z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 8.6 KiB

@ -2,5 +2,5 @@ title: Usage
arrange:
- commands.md
- kubeconfig.md
- multimaster.md
- multiserver.md
- guides

@ -2,59 +2,66 @@
```bash
k3d
--runtime # choose the container runtime (default: docker)
--verbose # enable verbose (debug) logging (default: false)
create
cluster [CLUSTERNAME] # default cluster name is 'k3s-default'
-a, --api-port # specify the port on which the cluster will be accessible (e.g. via kubectl)
--version # show k3d and k3s version
-h, --help # show help text
version # show k3d and k3s version
help [COMMAND] # show help text for any command
completion [bash | zsh | (psh | powershell)] # generate completion scripts for common shells
cluster [CLUSTERNAME] # default cluster name is 'k3s-default'
create
--api-port # specify the port on which the cluster will be accessible (e.g. via kubectl)
-i, --image # specify which k3s image should be used for the nodes
--k3s-agent-arg # add additional arguments to the k3s agent (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
--k3s-server-arg # add additional arguments to the k3s server (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
-m, --masters # specify how many master nodes you want to create
-s, --servers # specify how many server nodes you want to create
--network # specify a network you want to connect to
--no-image-volume # disable the creation of a volume for storing images (used for the 'k3d load image' command)
-p, --port # add some more port mappings
--secret # specify a cluster secret (default: auto-generated)
--token # specify a cluster token (default: auto-generated)
--timeout # specify a timeout, after which the cluster creation will be interrupted and changes rolled back
--update-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
--update-default-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
--switch-context # (implies --update-default-kubeconfig) automatically sets the current-context of your default kubeconfig to the new cluster's context
-v, --volume # specify additional bind-mounts
--wait # enable waiting for all master nodes to be ready before returning
-w, --workers # specify how many worker nodes you want to create
node NODENAME # Create new nodes (and add them to existing clusters)
--wait # enable waiting for all server nodes to be ready before returning
-a, --agents # specify how many agent nodes you want to create
start CLUSTERNAME # start a (stopped) cluster
-a, --all # start all clusters
--wait # wait for all servers and server-loadbalancer to be up before returning
--timeout # maximum waiting time for '--wait' before canceling/returning
stop CLUSTERNAME # stop a cluster
-a, --all # stop all clusters
delete CLUSTERNAME # delete an existing cluster
-a, --all # delete all existing clusters
list [CLUSTERNAME [CLUSTERNAME ...]]
--no-headers # do not print headers
--token # show column with cluster tokens
node
create NODENAME # Create new nodes (and add them to existing clusters)
-c, --cluster # specify the cluster that the node shall connect to
-i, --image # specify which k3s image should be used for the node(s)
--replicas # specify how many replicas you want to create with this spec
--role # specify the node role
delete
cluster CLUSTERNAME # delete an existing cluster
-a, --all # delete all existing clusters
node NODENAME # delete an existing node
--wait # wait for the node to be up and running before returning
--timeout # specify a timeout duration, after which the node creation will be interrupted, if not done yet
start NODENAME # start a (stopped) node
stop NODENAME # stop a node
delete NODENAME # delete an existing node
-a, --all # delete all existing nodes
start
cluster CLUSTERNAME # start a (stopped) cluster
-a, --all # start all clusters
node NODENAME # start a (stopped) node
stop
cluster CLUSTERNAME # stop a cluster
-a, --all # stop all clusters
node # stop a node
get
cluster [CLUSTERNAME [CLUSTERNAME ...]]
--no-headers # do not print headers
node NODENAME
list NODENAME
--no-headers # do not print headers
kubeconfig (CLUSTERNAME [CLUSTERNAME ...] | --all)
kubeconfig
get (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and save it into a file in $HOME/.k3d
-a, --all # get kubeconfigs from all clusters
merge (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and merge it/them into an existing kubeconfig
-a, --all # get kubeconfigs from all clusters
--output # specify the output file where the kubeconfig should be written to
--overwrite # [Careful!] forcefully overwrite the output file, ignoring existing contents
-s, --switch # switch current-context in kubeconfig to the new context
-s, --switch-context # switch current-context in kubeconfig to the new context
-u, --update # update conflicting fields in existing kubeconfig (default: true)
load
image [IMAGE [IMAGE ...]] # Load one or more images from the local runtime environment into k3d clusters
-d, --merge-default-kubeconfig # update the default kubeconfig (usually $KUBECONFIG or $HOME/.kube/config)
image
import [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
-c, --cluster # clusters to load the image into
-k, --keep-tarball # do not delete the image tarball from the shared volume after completion
-t, --tar # do not export image from runtime daemon, but directly import it from a tarball
completion SHELL # Generate completion scripts
version # show k3d build version
help [COMMAND] # show help text for any command
```

@ -7,18 +7,18 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
1. Create a cluster, mapping the ingress port 80 to localhost:8081
`#!bash k3d create cluster --api-port 6550 -p 8081:80@loadbalancer --workers 2`
`#!bash k3d cluster create --api-port 6550 -p 8081:80@loadbalancer --agents 2`
!!! info "Good to know"
- `--api-port 6550` is not required for the example to work. It's used to have `k3s`'s API-Server listening on port 6550 with that port mapped to the host system.
- the port-mapping construct `8081:80@loadbalancer` means
- map port `8081` from the host to port `80` on the container which matches the nodefilter `loadbalancer`
- the `loadbalancer` nodefilter matches only the `masterlb` that's deployed in front of a cluster's master nodes
- all ports exposed on the `masterlb` will be proxied to the same ports on all master nodes in the cluster
- the `loadbalancer` nodefilter matches only the `serverlb` that's deployed in front of a cluster's server nodes
- all ports exposed on the `serverlb` will be proxied to the same ports on all server nodes in the cluster
2. Get the kubeconfig file
`#!bash export KUBECONFIG="$(k3d get-kubeconfig --name='k3s-default')"`
`#!bash export KUBECONFIG="$(k3d kubeconfig get k3s-default)"`
3. Create a nginx deployment
@ -54,9 +54,9 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
## 2. via NodePort
1. Create a cluster, mapping the port 30080 from worker-0 to localhost:8082
1. Create a cluster, mapping the port 30080 from agent-0 to localhost:8082
`#!bash k3d create cluster mycluster -p 8082:30080@worker[0] --workers 2`
`#!bash k3d cluster create mycluster -p 8082:30080@agent[0] --agents 2`
- Note: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport)

@ -2,4 +2,186 @@
## Registries configuration file
...
You can add registries by specifying them in a `registries.yaml` and mounting them at creation time:
`#!bash k3d cluster create mycluster --volume /home/YOU/my-registries.yaml:/etc/rancher/k3s/registries.yaml`.
This file is a regular [k3s registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/private-registry/), and looks like this:
```yaml
mirrors:
"my.company.registry:5000":
endpoint:
- http://my.company.registry:5000
```
In this example, an image with a name like `my.company.registry:5000/nginx:latest` would be
_pulled_ from the registry running at `http://my.company.registry:5000`.
Note well there is an important limitation: **this configuration file will only work with k3s >= v0.10.0**. It will fail silently with previous versions of k3s, but you find in the [section below](#k3s-old) an alternative solution.
This file can also be used for providing additional information necessary for accessing some registries, like [authentication](#authenticated-registries) and [certificates](#secure-registries).
### Authenticated registries
When using authenticated registries, we can add the _username_ and _password_ in a
`configs` section in the `registries.yaml`, like this:
```yaml
mirrors:
my.company.registry:
endpoint:
- http://my.company.registry
configs:
my.company.registry:
auth:
username: aladin
password: abracadabra
```
### Secure registries
When using secure registries, the [`registries.yaml` file](#registries-file) must include information about the certificates. For example, if you want to use images from the secure registry running at `https://my.company.registry`, you must first download a CA file valid for that server and store it in some well-known directory like `${HOME}/.k3d/my-company-root.pem`.
Then you have to mount the CA file in some directory in the nodes in the cluster and include that mounted file in a `configs` section in the [`registries.yaml` file](#registries-file).
For example, if we mount the CA file in `/etc/ssl/certs/my-company-root.pem`, the `registries.yaml` will look like:
```yaml
mirrors:
my.company.registry:
endpoint:
- https://my.company.registry
configs:
my.company.registry:
tls:
# we will mount "my-company-root.pem" in the /etc/ssl/certs/ directory.
ca_file: "/etc/ssl/certs/my-company-root.pem"
```
Finally, we can create the cluster, mounting the CA file in the path we specified in `ca_file`:
`#!bash k3d cluster create --volume ${HOME}/.k3d/my-registries.yaml:/etc/rancher/k3s/registries.yaml --volume ${HOME}/.k3d/my-company-root.pem:/etc/ssl/certs/my-company-root.pem`
## Using a local registry
### Using the k3d registry
!!! info "Not ported yet"
The k3d-managed registry has not yet been ported from v1.x to v3.x
### Using your own local registry
You can start your own local registry it with some `docker` commands, like:
```bash
docker volume create local_registry
docker container run -d --name registry.localhost -v local_registry:/var/lib/registry --restart always -p 5000:5000 registry:2
```
These commands will start your registry in `registry.localhost:5000`. In order to push to this registry, you will need to make it accessible as described in the next section.
Once your registry is up and running, we will need to add it to your `registries.yaml` configuration file.
Finally, you have to connect the registry network to the k3d cluster network: `#!bash docker network connect k3d-k3s-default registry.localhost`. And then you can [test your local registry](#testing-your-registry).
### Pushing to your local registry address
As per the guide above, the registry will be available at `registry.localhost:5000`. All the nodes in your k3d cluster can resolve this hostname (thanks to the DNS server provided by the Docker daemon) but, in order to be able to push to this registry, this hostname also has to be resolved by your host.
Luckily (for Linux users), [NSS-myhostname](http://man7.org/linux/man-pages/man8/nss-myhostname.8.html) ships with many Linux distributions
and should resolve `*.localhost` automatically to `127.0.0.1`.
Otherwise, it's installable using `sudo apt install libnss-myhostname`.
If it's not the case, you can add an entry in your `/etc/hosts` file like this:
```bash
127.0.0.1 registry.localhost
```
Once again, this will only work with k3s >= v0.10.0 (see the some sections below when using k3s <= v0.9.1)
## Testing your registry
You should test that you can
* push to your registry from your local development machine.
* use images from that registry in `Deployments` in your k3d cluster.
We will verify these two things for a local registry (located at `registry.localhost:5000`) running in your development machine. Things would be basically the same for checking an external registry, but some additional configuration could be necessary in your local machine when using an authenticated or secure registry (please refer to Docker's documentation for this).
First, we can download some image (like `nginx`) and push it to our local registry with:
```shell script
docker pull nginx:latest
docker tag nginx:latest registry.localhost:5000/nginx:latest
docker push registry.localhost:5000/nginx:latest
```
Then we can deploy a pod referencing this image to your cluster:
```shell script
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-test-registry
labels:
app: nginx-test-registry
spec:
replicas: 1
selector:
matchLabels:
app: nginx-test-registry
template:
metadata:
labels:
app: nginx-test-registry
spec:
containers:
- name: nginx-test-registry
image: registry.localhost:5000/nginx:latest
ports:
- containerPort: 80
EOF
```
Then you should check that the pod is running with `kubectl get pods -l "app=nginx-test-registry"`.
## Configuring registries for k3s <= v0.9.1
k3s servers below v0.9.1 do not recognize the `registries.yaml` file as described in
the in the beginning, so you will need to embed the contents of that file in a `containerd` configuration file.
You will have to create your own `containerd` configuration file at some well-known path like `${HOME}/.k3d/config.toml.tmpl`, like this:
<pre>
# Original section: no changes
[plugins.opt]
path = "{{ .NodeConfig.Containerd.Opt }}"
[plugins.cri]
stream_server_address = "{{ .NodeConfig.AgentConfig.NodeName }}"
stream_server_port = "10010"
{{- if .IsRunningInUserNS }}
disable_cgroup = true
disable_apparmor = true
restrict_oom_score_adj = true
{{ end -}}
{{- if .NodeConfig.AgentConfig.PauseImage }}
sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}"
{{ end -}}
{{- if not .NodeConfig.NoFlannel }}
[plugins.cri.cni]
bin_dir = "{{ .NodeConfig.AgentConfig.CNIBinDir }}"
conf_dir = "{{ .NodeConfig.AgentConfig.CNIConfDir }}"
{{ end -}}
# Added section: additional registries and the endpoints
[plugins.cri.registry.mirrors]
[plugins.cri.registry.mirrors."<b>registry.localhost:5000</b>"]
endpoint = ["http://<b>registry.localhost:5000</b>"]
</pre>
and then mount it at `/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl` (where `containerd` in your k3d nodes will load it) when creating the k3d cluster:
```bash
k3d cluster create mycluster \
--volume ${HOME}/.k3d/config.toml.tmpl:/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
```

@ -11,28 +11,34 @@ To get a kubeconfig set up for you to connect to a k3d cluster, you can go diffe
## Getting the kubeconfig for a newly created cluster
1. Update your default kubeconfig **upon** cluster creation
- `#!bash k3d create cluster mycluster --update-kubeconfig`
- *Note:* this won't switch the current-context
2. Update your default kubeconfig **after** cluster creation
- `#!bash k3d get kubeconfig mycluster`
- *Note:* this won't switch the current-context
3. Update a different kubeconfig **after** cluster creation
- `#!bash k3d get kubeconfig mycluster --output some/other/file.yaml`
1. Create a new kubeconfig file **after** cluster creation
- `#!bash k3d kubeconfig get mycluster`
- *Note:* this will create (or update) the file `$HOME/.k3d/kubeconfig-mycluster.yaml`
- *Tip:* Use it: `#!bash export KUBECONFIG=$(k3d kubeconfig get mycluster)`
2. Update your default kubeconfig **upon** cluster creation
- `#!bash k3d cluster create mycluster --update-kubeconfig`
- *Note:* this won't switch the current-context (append `--switch-context` to do so)
3. Update your default kubeconfig **after** cluster creation
- `#!bash k3d kubeconfig merge mycluster --merge-default-kubeconfig`
- *Note:* this won't switch the current-context (append `--switch-context` to do so)
4. Update a different kubeconfig **after** cluster creation
- `#!bash k3d kubeconfig merge mycluster --output some/other/file.yaml`
- *Note:* this won't switch the current-context
- The file will be created if it doesn't exist
!!! info "Switching the current context"
None of the above options switch the current-context.
None of the above options switch the current-context by default.
This is intended to be least intrusive, since the current-context has a global effect.
You can switch the current-context directly with the `get kubeconfig` command by adding the `--switch` flag.
You can switch the current-context directly with the `kubeconfig merge` command by adding the `--switch-context` flag.
## Removing cluster details from the kubeconfig
`#!bash k3d delete cluster mycluster` will always remove the details for `mycluster` from the default kubeconfig.
`#!bash k3d cluster delete mycluster` will always remove the details for `mycluster` from the default kubeconfig.
It will also delete the respective kubeconfig file in `$HOME/.k3d/` if it exists.
## Handling multiple clusters
`k3d get kubeconfig` let's you specify one or more clusters via arguments _or_ all via `--all`.
All kubeconfigs will then be merged into a single file, which is either the default kubeconfig or the kubeconfig specified via `--output FILE`.
Note, that with multiple cluster specified, the `--switch` flag will change the current context to the cluster which was last in the list.
`k3d kubeconfig merge` let's you specify one or more clusters via arguments _or_ all via `--all`.
All kubeconfigs will then be merged into a single file if `--merge-default-kubeconfig` or `--output` is specified.
If none of those two flags was specified, a new file will be created per cluster and the merged path (e.g. `$HOME/.k3d/kubeconfig-cluster1.yaml:$HOME/.k3d/cluster2.yaml`) will be returned.
Note, that with multiple cluster specified, the `--switch-context` flag will change the current context to the cluster which was last in the list.

@ -1,25 +0,0 @@
# Creating multi-master clusters
!!! info "Important note"
For the best results (and less unexpected issues), choose 1, 3, 5, ... master nodes.
## Embedded dqlite
Create a cluster with 3 master nodes using k3s' embedded dqlite database.
The first master to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other master nodes.
```bash
k3d create cluster multimaster --masters 3
```
## Adding master nodes to a running cluster
In theory (and also in practice in most cases), this is as easy as executing the following command:
```bash
k3d create node newmaster --cluster multimaster --role master
```
!!! important "There's a trap!"
If your cluster was initially created with only a single master node, then this will fail.
That's because the initial master node was not started with the `--cluster-init` flag and thus is not using the dqlite backend.

@ -0,0 +1,25 @@
# Creating multi-server clusters
!!! info "Important note"
For the best results (and less unexpected issues), choose 1, 3, 5, ... server nodes.
## Embedded dqlite
Create a cluster with 3 server nodes using k3s' embedded dqlite database.
The first server to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other server nodes.
```bash
k3d cluster create multiserver --servers 3
```
## Adding server nodes to a running cluster
In theory (and also in practice in most cases), this is as easy as executing the following command:
```bash
k3d node create newserver --cluster multiserver --role server
```
!!! important "There's a trap!"
If your cluster was initially created with only a single server node, then this will fail.
That's because the initial server node was not started with the `--cluster-init` flag and thus is not using the dqlite backend.

@ -1,47 +1,44 @@
module github.com/rancher/k3d
module github.com/rancher/k3d/v3
go 1.14
require (
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Microsoft/go-winio v0.4.14 // indirect
github.com/Microsoft/hcsshim v0.8.6 // indirect
github.com/Microsoft/hcsshim v0.8.9 // indirect
github.com/containerd/cgroups v0.0.0-20190923161937-abd0b19954a6 // indirect
github.com/containerd/containerd v1.3.0-rc.2.0.20190924150618-aba201344ebf
github.com/containerd/containerd v1.3.4
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 // indirect
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c // indirect
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8 // indirect
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd // indirect
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c // indirect
github.com/docker/docker v1.4.2-0.20190905191220-3b23f9033967
github.com/docker/distribution v0.0.0-20200319173657-742aab907b54 // indirect
github.com/docker/docker v17.12.0-ce-rc1.0.20200528204242-89382f2f2074+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/go-test/deep v1.0.4
github.com/gogo/googleapis v1.3.0 // indirect
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/protobuf v1.4.0 // indirect
github.com/golang/protobuf v1.4.2 // indirect
github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5
github.com/imdario/mergo v0.3.9
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
github.com/mitchellh/go-homedir v1.1.0
github.com/moby/sys/mount v0.1.0 // indirect
github.com/moby/term v0.0.0-20200507201656-73f35e472e8f // indirect
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
github.com/opencontainers/runtime-spec v1.0.1 // indirect
github.com/sirupsen/logrus v1.5.0
github.com/spf13/cobra v1.0.0
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v1.0.1-0.20200629195214-2c5a0d300f8b
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
go.etcd.io/bbolt v1.3.3 // indirect
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 // indirect
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 // indirect
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 // indirect
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect
google.golang.org/grpc v1.23.0 // indirect
google.golang.org/genproto v0.0.0-20200604104852-0b0486081ffb // indirect
google.golang.org/grpc v1.29.1 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/yaml.v2 v2.2.8 // indirect
gotest.tools v2.2.0+incompatible // indirect
gotest.tools/v3 v3.0.2 // indirect
k8s.io/client-go v0.17.0
k8s.io/utils v0.0.0-20200109141947-94aeca20bf09 // indirect
)

129
go.sum

@ -14,10 +14,10 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o=
@ -29,27 +29,37 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20190923161937-abd0b19954a6 h1:jHSIafTFvTm7oBNnA8ZOzfmYylo9J3YzJHKOfUkeL94=
github.com/containerd/cgroups v0.0.0-20190923161937-abd0b19954a6/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/containerd v1.3.0-rc.2.0.20190924150618-aba201344ebf h1:eUqECIgzaUkOpYd1r7PF0NLxT0N+E6Xax31gYyzZd4A=
github.com/containerd/containerd v1.3.0-rc.2.0.20190924150618-aba201344ebf/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI=
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw=
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8 h1:jYCTS/16RWXXtVHNHo1KWNegd1kKQ7lHd7BStj/0hKw=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd h1:bRLyitWw3PT/2YuVaCKTPg0cA5dOFKFwKtkfcP2dLsA=
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
@ -59,6 +69,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -70,24 +82,27 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c h1:6L6qod4JzOm9KEqmfSyO6ZhsnN9dlcISRt+xdoyZeGE=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/docker v1.4.2-0.20190905191220-3b23f9033967 h1:hqs6DQFz659/085bXwClBRGefVf+kWCTsQR6wwkOMiU=
github.com/docker/docker v1.4.2-0.20190905191220-3b23f9033967/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/distribution v0.0.0-20200319173657-742aab907b54 h1:wxX61VZypmXacwrj9aLDwAxAuT5kq1aq3NFc5IfbfZs=
github.com/docker/distribution v0.0.0-20200319173657-742aab907b54/go.mod h1:Oqz4IonmMNc2N7GqfTL2xkhCQx0yS6nR+HrOZJnmKIk=
github.com/docker/docker v17.12.0-ce-rc1.0.20200528204242-89382f2f2074+incompatible h1:oQeenT4rlzuBqBKczNk1n1aHdBxYVmv/uWZySvk3Boo=
github.com/docker/docker v17.12.0-ce-rc1.0.20200528204242-89382f2f2074+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
@ -148,12 +163,16 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
@ -201,6 +220,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmg
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
@ -218,6 +238,8 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@ -233,6 +255,8 @@ github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM52
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -259,6 +283,12 @@ github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:F
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/sys/mount v0.1.0 h1:Ytx78EatgFKtrqZ0BvJ0UtJE472ZvawVmil6pIfuCCU=
github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
github.com/moby/sys/mountinfo v0.1.0 h1:r8vMRbMAFEAfiNptYVokP+nfxPJzvRuia5e2vzXtENo=
github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/moby/term v0.0.0-20200507201656-73f35e472e8f h1:FQQ9Wo/j3IZrVSv8RkGZoeYMuec0xAoSNijF1UqEgB4=
github.com/moby/term v0.0.0-20200507201656-73f35e472e8f/go.mod h1:uF4OSdW39LLr+K/v/iL6dOm257SGdQJGiyMU1QlNd6s=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -282,12 +312,14 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@ -300,24 +332,29 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083 h1:BVsJT8+ZbyuL3hypz/HmEiM8h2P6hBQGig4el9/MdjA=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
@ -336,8 +373,8 @@ github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@ -352,8 +389,8 @@ github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.1-0.20200629195214-2c5a0d300f8b h1:grM+VdcoRu+xbzmCXM1KuH5UQGk9Lc8yCiwZZ2PKVdU=
github.com/spf13/cobra v1.0.1-0.20200629195214-2c5a0d300f8b/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
@ -378,6 +415,7 @@ github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiff
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
@ -391,6 +429,8 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@ -400,8 +440,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90Pveol
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d h1:9FCpayM9Egr1baVnV1SX0H87m+XB0B8S0hAMi99X/3U=
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -420,11 +460,13 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -449,13 +491,16 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -484,6 +529,7 @@ golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
@ -500,17 +546,34 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 h1:ZUjXAXmrAyrmmCPHgCA/vChHcpsX27MZ3yBonD/z1KE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200604104852-0b0486081ffb h1:ek2py5bOqzR7MR/6obzk0rXUgYCLmjyLnaO9ssT+l6w=
google.golang.org/genproto v0.0.0-20200604104852-0b0486081ffb/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -536,6 +599,8 @@ gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

@ -62,7 +62,7 @@ verifySupported() {
# if it needs to be changed.
checkK3dInstalledVersion() {
if [[ -f "${K3D_INSTALL_DIR}/${APP_NAME}" ]]; then
local version=$(k3d --version | cut -d " " -f3)
local version=$(k3d version | grep 'k3d version' | cut -d " " -f3)
if [[ "$version" == "$TAG" ]]; then
echo "k3d ${version} is already ${DESIRED_VERSION:-latest}"
return 0

@ -21,7 +21,7 @@ THE SOFTWARE.
*/
package main
import "github.com/rancher/k3d/cmd"
import "github.com/rancher/k3d/v3/cmd"
func main() {
cmd.Execute()

@ -11,6 +11,7 @@ extra_javascript:
extra_css:
- static/css/asciinema-player.css
- static/css/extra.css
# Repo Information
repo_name: rancher/k3d
@ -26,7 +27,7 @@ theme:
primary: "black"
accent: "grey"
logo: static/img/k3d_logo_black_green.svg
favicon: static/img/favicons_black_blue/favicon.ico
favicon: static/img/favicons_black_blue/favicon.png
# Navigation
# nav: ommitted, because we're using the awesome-pages plugin (https://squidfunk.github.io/mkdocs-material/plugins/awesome-pages/)

@ -25,21 +25,25 @@ import (
"bytes"
"context"
"fmt"
"sort"
"strconv"
"strings"
"time"
k3drt "github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/pkg/util"
"github.com/imdario/mergo"
k3drt "github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/pkg/util"
"github.com/rancher/k3d/v3/version"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
// CreateCluster creates a new cluster consisting of
// ClusterCreate creates a new cluster consisting of
// - some containerized k3s nodes
// - a docker network
func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runtime) error {
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
if cluster.CreateClusterOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout)
@ -65,31 +69,31 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
if cluster.Network.Name == "host" {
useHostNet = true
if len(cluster.Nodes) > 1 {
return fmt.Errorf("Only one master node supported when using host network")
return fmt.Errorf("Only one server node supported when using host network")
}
}
// create cluster network or use an existing one
networkID, networkExists, err := runtime.CreateNetworkIfNotPresent(cluster.Network.Name)
networkID, networkExists, err := runtime.CreateNetworkIfNotPresent(ctx, cluster.Network.Name)
if err != nil {
log.Errorln("Failed to create cluster network")
return err
}
cluster.Network.Name = networkID
extraLabels := map[string]string{
"k3d.cluster.network": networkID,
"k3d.cluster.network.external": strconv.FormatBool(cluster.Network.External),
k3d.LabelNetwork: networkID,
k3d.LabelNetworkExternal: strconv.FormatBool(cluster.Network.External),
}
if networkExists {
extraLabels["k3d.cluster.network.external"] = "true" // if the network wasn't created, we say that it's managed externally (important for cluster deletion)
extraLabels[k3d.LabelNetworkExternal] = "true" // if the network wasn't created, we say that it's managed externally (important for cluster deletion)
}
/*
* Cluster Secret
* Cluster Token
*/
if cluster.Secret == "" {
cluster.Secret = GenerateClusterSecret()
if cluster.Token == "" {
cluster.Token = GenerateClusterToken()
}
/*
@ -98,12 +102,12 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
*/
if !cluster.CreateClusterOpts.DisableImageVolume {
imageVolumeName := fmt.Sprintf("%s-%s-images", k3d.DefaultObjectNamePrefix, cluster.Name)
if err := runtime.CreateVolume(imageVolumeName, map[string]string{"k3d.cluster": cluster.Name}); err != nil {
log.Errorln("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
if err := runtime.CreateVolume(ctx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil {
log.Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
return err
}
extraLabels["k3d.cluster.imageVolume"] = imageVolumeName
extraLabels[k3d.LabelImageVolume] = imageVolumeName
// attach volume to nodes
for _, node := range cluster.Nodes {
@ -115,19 +119,19 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
* Nodes
*/
// Worker defaults (per cluster)
// connection url is always the name of the first master node (index 0)
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.MasterRole, 0), k3d.DefaultAPIPort)
// agent defaults (per cluster)
// connection url is always the name of the first server node (index 0)
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
nodeSetup := func(node *k3d.Node, suffix int) error {
// cluster specific settings
if node.Labels == nil {
node.Labels = make(map[string]string) // TODO: maybe create an init function?
}
node.Labels["k3d.cluster"] = cluster.Name
node.Env = append(node.Env, fmt.Sprintf("K3S_TOKEN=%s", cluster.Secret))
node.Labels["k3d.cluster.secret"] = cluster.Secret
node.Labels["k3d.cluster.url"] = connectionURL
node.Labels[k3d.LabelClusterName] = cluster.Name
node.Env = append(node.Env, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token))
node.Labels[k3d.LabelClusterToken] = cluster.Token
node.Labels[k3d.LabelClusterURL] = connectionURL
// append extra labels
for k, v := range extraLabels {
@ -135,16 +139,16 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
}
// node role specific settings
if node.Role == k3d.MasterRole {
if node.Role == k3d.ServerRole {
node.MasterOpts.ExposeAPI = cluster.ExposeAPI
node.ServerOpts.ExposeAPI = cluster.ExposeAPI
// the cluster has an init master node, but its not this one, so connect it to the init node
if cluster.InitNode != nil && !node.MasterOpts.IsInit {
// the cluster has an init server node, but its not this one, so connect it to the init node
if cluster.InitNode != nil && !node.ServerOpts.IsInit {
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
}
} else if node.Role == k3d.WorkerRole {
} else if node.Role == k3d.AgentRole {
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
}
@ -153,7 +157,7 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
// create node
log.Infof("Creating node '%s'", node.Name)
if err := CreateNode(node, runtime); err != nil {
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
log.Errorln("Failed to create node")
return err
}
@ -163,41 +167,41 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
}
// used for node suffices
masterCount := 0
workerCount := 0
serverCount := 0
agentCount := 0
suffix := 0
// create init node first
if cluster.InitNode != nil {
log.Infoln("Creating initializing master node")
log.Infoln("Creating initializing server node")
cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init")
// in case the LoadBalancer was disabled, expose the API Port on the initializing master node
// in case the LoadBalancer was disabled, expose the API Port on the initializing server node
if cluster.CreateClusterOpts.DisableLoadBalancer {
cluster.InitNode.Ports = append(cluster.InitNode.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
}
if err := nodeSetup(cluster.InitNode, masterCount); err != nil {
if err := nodeSetup(cluster.InitNode, serverCount); err != nil {
return err
}
masterCount++
serverCount++
// wait for the initnode to come up before doing anything else
for {
select {
case <-ctx.Done():
log.Errorln("Failed to bring up initializing master node in time")
log.Errorln("Failed to bring up initializing server node in time")
return fmt.Errorf(">>> %w", ctx.Err())
default:
}
log.Debugln("Waiting for initializing master node...")
logreader, err := runtime.GetNodeLogs(cluster.InitNode)
log.Debugln("Waiting for initializing server node...")
logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode, time.Time{})
if err != nil {
if logreader != nil {
logreader.Close()
}
log.Errorln(err)
log.Errorln("Failed to get logs from the initializig master node.. waiting for 3 seconds instead")
log.Errorln("Failed to get logs from the initializig server node.. waiting for 3 seconds instead")
time.Sleep(3 * time.Second)
break
}
@ -206,7 +210,7 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
nRead, _ := buf.ReadFrom(logreader)
logreader.Close()
if nRead > 0 && strings.Contains(buf.String(), "Running kubelet") {
log.Debugln("Initializing master node is up... continuing")
log.Debugln("Initializing server node is up... continuing")
break
}
time.Sleep(time.Second)
@ -214,66 +218,60 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
}
// vars to support waiting for master nodes to be ready
waitForMasterWaitgroup, ctx := errgroup.WithContext(ctx)
// vars to support waiting for server nodes to be ready
waitForServerWaitgroup, ctx := errgroup.WithContext(ctx)
// create all other nodes, but skip the init node
for _, node := range cluster.Nodes {
if node.Role == k3d.MasterRole {
if node.Role == k3d.ServerRole {
// skip the init node here
if node == cluster.InitNode {
continue
} else if masterCount == 0 && cluster.CreateClusterOpts.DisableLoadBalancer {
// if this is the first master node and the master loadbalancer is disabled, expose the API Port on this master node
} else if serverCount == 0 && cluster.CreateClusterOpts.DisableLoadBalancer {
// if this is the first server node and the server loadbalancer is disabled, expose the API Port on this server node
node.Ports = append(node.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
}
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of masters registering
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering
// name suffix
suffix = masterCount
masterCount++
suffix = serverCount
serverCount++
} else if node.Role == k3d.WorkerRole {
} else if node.Role == k3d.AgentRole {
// name suffix
suffix = workerCount
workerCount++
suffix = agentCount
agentCount++
}
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
if err := nodeSetup(node, suffix); err != nil {
return err
}
}
// asynchronously wait for this master node to be ready (by checking the logs for a specific log mesage)
if node.Role == k3d.MasterRole && cluster.CreateClusterOpts.WaitForMaster {
masterNode := node
waitForMasterWaitgroup.Go(func() error {
// asynchronously wait for this server node to be ready (by checking the logs for a specific log mesage)
if node.Role == k3d.ServerRole && cluster.CreateClusterOpts.WaitForServer {
serverNode := node
waitForServerWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig")
log.Debugf("Starting to wait for server node '%s'", serverNode.Name)
return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{})
})
}
}
if err := waitForMasterWaitgroup.Wait(); err != nil {
log.Errorln("Failed to bring up all master nodes in time. Check the logs:")
log.Errorln(">>> ", err)
return fmt.Errorf("Failed to bring up cluster")
}
/*
* Auxiliary Containers
*/
// *** MasterLoadBalancer ***
// *** ServerLoadBalancer ***
if !cluster.CreateClusterOpts.DisableLoadBalancer {
if !useHostNet { // masterlb not supported in hostnetwork mode due to port collisions with master node
// Generate a comma-separated list of master/server names to pass to the LB container
if !useHostNet { // serverlb not supported in hostnetwork mode due to port collisions with server node
// Generate a comma-separated list of server/server names to pass to the LB container
servers := ""
for _, node := range cluster.Nodes {
if node.Role == k3d.MasterRole {
if node.Role == k3d.ServerRole {
log.Debugf("Node NAME: %s", node.Name)
if servers == "" {
servers = node.Name
@ -285,16 +283,16 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
// generate comma-separated list of extra ports to forward
ports := k3d.DefaultAPIPort
for _, portString := range cluster.MasterLoadBalancer.Ports {
for _, portString := range cluster.ServerLoadBalancer.Ports {
split := strings.Split(portString, ":")
ports += "," + split[len(split)-1]
}
// Create LB as a modified node with loadbalancerRole
lbNode := &k3d.Node{
Name: fmt.Sprintf("%s-%s-masterlb", k3d.DefaultObjectNamePrefix, cluster.Name),
Image: k3d.DefaultLBImage,
Ports: append(cluster.MasterLoadBalancer.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)),
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()),
Ports: append(cluster.ServerLoadBalancer.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)),
Env: []string{
fmt.Sprintf("SERVERS=%s", servers),
fmt.Sprintf("PORTS=%s", ports),
@ -305,26 +303,41 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
}
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
log.Infof("Creating LoadBalancer '%s'", lbNode.Name)
if err := CreateNode(lbNode, runtime); err != nil {
if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil {
log.Errorln("Failed to create loadbalancer")
return err
}
if cluster.CreateClusterOpts.WaitForServer {
waitForServerWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name)
return NodeWaitForLogMessage(ctx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{})
})
}
} else {
log.Infoln("Hostnetwork selected -> Skipping creation of Master LoadBalancer")
log.Infoln("Hostnetwork selected -> Skipping creation of server LoadBalancer")
}
}
if err := waitForServerWaitgroup.Wait(); err != nil {
log.Errorln("Failed to bring up all server nodes (and loadbalancer) in time. Check the logs:")
log.Errorf(">>> %+v", err)
return fmt.Errorf("Failed to bring up cluster")
}
return nil
}
// DeleteCluster deletes an existing cluster
func DeleteCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
// ClusterDelete deletes an existing cluster
func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
log.Infof("Deleting cluster '%s'", cluster.Name)
log.Debugf("%+v", cluster)
log.Debugf("Cluster Details: %+v", cluster)
failed := 0
for _, node := range cluster.Nodes {
if err := runtime.DeleteNode(node); err != nil {
if err := runtime.DeleteNode(ctx, node); err != nil {
log.Warningf("Failed to delete node '%s': Try to delete it manually", node.Name)
failed++
continue
@ -335,7 +348,7 @@ func DeleteCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
if cluster.Network.Name != "" {
if !cluster.Network.External {
log.Infof("Deleting cluster network '%s'", cluster.Network.Name)
if err := runtime.DeleteNetwork(cluster.Network.Name); err != nil {
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
if strings.HasSuffix(err.Error(), "active endpoints") {
log.Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
} else {
@ -350,7 +363,7 @@ func DeleteCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
// delete image volume
if cluster.ImageVolume != "" {
log.Infof("Deleting image volume '%s'", cluster.ImageVolume)
if err := runtime.DeleteVolume(cluster.ImageVolume); err != nil {
if err := runtime.DeleteVolume(ctx, cluster.ImageVolume); err != nil {
log.Warningf("Failed to delete image volume '%s' of cluster '%s': Try to delete it manually", cluster.ImageVolume, cluster.Name)
}
}
@ -362,9 +375,9 @@ func DeleteCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
return nil
}
// GetClusters returns a list of all existing clusters
func GetClusters(runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
nodes, err := runtime.GetNodesByLabel(k3d.DefaultObjectLabels)
// ClusterList returns a list of all existing clusters
func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels)
if err != nil {
log.Errorln("Failed to get clusters")
return nil, err
@ -375,7 +388,7 @@ func GetClusters(runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
for _, node := range nodes {
clusterExists := false
for _, cluster := range clusters {
if node.Labels["k3d.cluster"] == cluster.Name { // TODO: handle case, where this label doesn't exist
if node.Labels[k3d.LabelClusterName] == cluster.Name { // TODO: handle case, where this label doesn't exist
cluster.Nodes = append(cluster.Nodes, node)
clusterExists = true
break
@ -384,7 +397,7 @@ func GetClusters(runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
// cluster is not in the list yet, so we add it with the current node as its first member
if !clusterExists {
clusters = append(clusters, &k3d.Cluster{
Name: node.Labels["k3d.cluster"],
Name: node.Labels[k3d.LabelClusterName],
Nodes: []*k3d.Node{node},
})
}
@ -408,7 +421,7 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error {
// get the name of the cluster network
if cluster.Network.Name == "" {
if networkName, ok := node.Labels["k3d.cluster.network"]; ok {
if networkName, ok := node.Labels[k3d.LabelNetwork]; ok {
cluster.Network.Name = networkName
}
}
@ -416,7 +429,7 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error {
// check if the network is external
// since the struct value is a bool, initialized as false, we cannot check if it's unset
if !cluster.Network.External && !networkExternalSet {
if networkExternalString, ok := node.Labels["k3d.cluster.network.external"]; ok {
if networkExternalString, ok := node.Labels[k3d.LabelNetworkExternal]; ok {
if networkExternal, err := strconv.ParseBool(networkExternalString); err == nil {
cluster.Network.External = networkExternal
networkExternalSet = true
@ -426,20 +439,26 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error {
// get image volume // TODO: enable external image volumes the same way we do it with networks
if cluster.ImageVolume == "" {
if imageVolumeName, ok := node.Labels["k3d.cluster.imageVolume"]; ok {
if imageVolumeName, ok := node.Labels[k3d.LabelImageVolume]; ok {
cluster.ImageVolume = imageVolumeName
}
}
// get k3s cluster's token
if cluster.Token == "" {
if token, ok := node.Labels[k3d.LabelClusterToken]; ok {
cluster.Token = token
}
}
}
return nil
}
// GetCluster returns an existing cluster with all fields and node lists populated
func GetCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) (*k3d.Cluster, error) {
// ClusterGet returns an existing cluster with all fields and node lists populated
func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) (*k3d.Cluster, error) {
// get nodes that belong to the selected cluster
nodes, err := runtime.GetNodesByLabel(map[string]string{"k3d.cluster": cluster.Name})
nodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name})
if err != nil {
log.Errorf("Failed to get nodes for cluster '%s'", cluster.Name)
}
@ -450,7 +469,22 @@ func GetCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) (*k3d.Cluster, erro
// append nodes
for _, node := range nodes {
cluster.Nodes = append(cluster.Nodes, node)
// check if there's already a node in the struct
overwroteExisting := false
for _, existingNode := range cluster.Nodes {
// overwrite existing node
if existingNode.Name == node.Name {
mergo.MergeWithOverwrite(existingNode, node)
overwroteExisting = true
}
}
// no existing node overwritten: append new node
if !overwroteExisting {
cluster.Nodes = append(cluster.Nodes, node)
}
}
if err := populateClusterFieldsFromLabels(cluster); err != nil {
@ -461,8 +495,8 @@ func GetCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) (*k3d.Cluster, erro
return cluster, nil
}
// GenerateClusterSecret generates a random 20 character string
func GenerateClusterSecret() string {
// GenerateClusterToken generates a random 20 character string
func GenerateClusterToken() string {
return util.GenerateRandomString(20)
}
@ -470,35 +504,69 @@ func generateNodeName(cluster string, role k3d.Role, suffix int) string {
return fmt.Sprintf("%s-%s-%s-%d", k3d.DefaultObjectNamePrefix, cluster, role, suffix)
}
// StartCluster starts a whole cluster (i.e. all nodes of the cluster)
func StartCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
// ClusterStart starts a whole cluster (i.e. all nodes of the cluster)
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.ClusterStartOpts) error {
log.Infof("Starting cluster '%s'", cluster.Name)
start := time.Now()
if startClusterOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, startClusterOpts.Timeout)
defer cancel()
}
// vars to support waiting for server nodes to be ready
waitForServerWaitgroup, ctx := errgroup.WithContext(ctx)
failed := 0
var masterlb *k3d.Node
var serverlb *k3d.Node
for _, node := range cluster.Nodes {
// skip the LB, because we want to start it last
if node.Role == k3d.LoadBalancerRole {
masterlb = node
serverlb = node
continue
}
// start node
if err := runtime.StartNode(node); err != nil {
if err := runtime.StartNode(ctx, node); err != nil {
log.Warningf("Failed to start node '%s': Try to start it manually", node.Name)
failed++
continue
}
// asynchronously wait for this server node to be ready (by checking the logs for a specific log mesage)
if node.Role == k3d.ServerRole && startClusterOpts.WaitForServer {
serverNode := node
waitForServerWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for server node '%s'", serverNode.Name)
return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], start)
})
}
}
// start masterlb
if masterlb != nil {
log.Debugln("Starting masterlb...")
if err := runtime.StartNode(masterlb); err != nil { // FIXME: we could run into a nullpointer exception here
log.Warningf("Failed to start masterlb '%s': Try to start it manually", masterlb.Name)
// start serverlb
if serverlb != nil {
log.Debugln("Starting serverlb...")
if err := runtime.StartNode(ctx, serverlb); err != nil { // FIXME: we could run into a nullpointer exception here
log.Warningf("Failed to start serverlb '%s': Try to start it manually", serverlb.Name)
failed++
}
waitForServerWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for loadbalancer node '%s'", serverlb.Name)
return NodeWaitForLogMessage(ctx, runtime, serverlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
})
}
if err := waitForServerWaitgroup.Wait(); err != nil {
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
log.Errorln(">>> ", err)
return fmt.Errorf("Failed to bring up cluster")
}
if failed > 0 {
@ -507,13 +575,13 @@ func StartCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
return nil
}
// StopCluster stops a whole cluster (i.e. all nodes of the cluster)
func StopCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
// ClusterStop stops a whole cluster (i.e. all nodes of the cluster)
func ClusterStop(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
log.Infof("Stopping cluster '%s'", cluster.Name)
failed := 0
for _, node := range cluster.Nodes {
if err := runtime.StopNode(node); err != nil {
if err := runtime.StopNode(ctx, node); err != nil {
log.Warningf("Failed to stop node '%s': Try to stop it manually", node.Name)
failed++
continue
@ -525,3 +593,11 @@ func StopCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
}
return nil
}
// SortClusters : in place sort cluster list by cluster name alphabetical order
func SortClusters(clusters []*k3d.Cluster) []*k3d.Cluster {
sort.Slice(clusters, func(i, j int) bool {
return clusters[i].Name < clusters[j].Name
})
return clusters
}

@ -24,7 +24,7 @@ package cluster
import (
"fmt"
"github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/pkg/types"
)
// CheckName ensures that a cluster name is also a valid host name according to RFC 1123.

@ -23,14 +23,15 @@ package cluster
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path"
"time"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@ -43,21 +44,21 @@ type WriteKubeConfigOptions struct {
OverwriteExisting bool
}
// GetAndWriteKubeConfig ...
// 1. fetches the KubeConfig from the first master node retrieved for a given cluster
// KubeconfigGetWrite ...
// 1. fetches the KubeConfig from the first server node retrieved for a given cluster
// 2. modifies it by updating some fields with cluster-specific information
// 3. writes it to the specified output
func GetAndWriteKubeConfig(runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
// get kubeconfig from cluster node
kubeconfig, err := GetKubeconfig(runtime, cluster)
kubeconfig, err := KubeconfigGet(ctx, runtime, cluster)
if err != nil {
return output, err
}
// empty output parameter = write to default
if output == "" {
output, err = GetDefaultKubeConfigPath()
output, err = KubeconfigGetDefaultPath()
if err != nil {
return output, err
}
@ -65,7 +66,7 @@ func GetAndWriteKubeConfig(runtime runtimes.Runtime, cluster *k3d.Cluster, outpu
// simply write to the output, ignoring existing contents
if writeKubeConfigOptions.OverwriteExisting || output == "-" {
return output, WriteKubeConfigToPath(kubeconfig, output)
return output, KubeconfigWriteToPath(ctx, kubeconfig, output)
}
// load config from existing file or fail if it has non-kubeconfig contents
@ -102,49 +103,49 @@ func GetAndWriteKubeConfig(runtime runtimes.Runtime, cluster *k3d.Cluster, outpu
}
// update existing kubeconfig, but error out if there are conflicting fields but we don't want to update them
return output, UpdateKubeConfig(kubeconfig, existingKubeConfig, output, writeKubeConfigOptions.UpdateExisting, writeKubeConfigOptions.UpdateCurrentContext)
return output, KubeconfigMerge(ctx, kubeconfig, existingKubeConfig, output, writeKubeConfigOptions.UpdateExisting, writeKubeConfigOptions.UpdateCurrentContext)
}
// GetKubeconfig grabs the kubeconfig file from /output from a master node container,
// KubeconfigGet grabs the kubeconfig file from /output from a server node container,
// modifies it by updating some fields with cluster-specific information
// and returns a Config object for further processing
func GetKubeconfig(runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
// get all master nodes for the selected cluster
// TODO: getKubeconfig: we should make sure, that the master node we're trying to fetch from is actually running
masterNodes, err := runtime.GetNodesByLabel(map[string]string{"k3d.cluster": cluster.Name, "k3d.role": string(k3d.MasterRole)})
func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
// get all server nodes for the selected cluster
// TODO: getKubeconfig: we should make sure, that the server node we're trying to fetch from is actually running
serverNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.ServerRole)})
if err != nil {
log.Errorln("Failed to get master nodes")
log.Errorln("Failed to get server nodes")
return nil, err
}
if len(masterNodes) == 0 {
return nil, fmt.Errorf("Didn't find any master node")
if len(serverNodes) == 0 {
return nil, fmt.Errorf("Didn't find any server node")
}
// prefer a master node, which actually has the port exposed
var chosenMaster *k3d.Node
chosenMaster = nil
// prefer a server node, which actually has the port exposed
var chosenServer *k3d.Node
chosenServer = nil
APIPort := k3d.DefaultAPIPort
APIHost := k3d.DefaultAPIHost
for _, master := range masterNodes {
if _, ok := master.Labels["k3d.master.api.port"]; ok {
chosenMaster = master
APIPort = master.Labels["k3d.master.api.port"]
if _, ok := master.Labels["k3d.master.api.host"]; ok {
APIHost = master.Labels["k3d.master.api.host"]
for _, server := range serverNodes {
if _, ok := server.Labels[k3d.LabelServerAPIPort]; ok {
chosenServer = server
APIPort = server.Labels[k3d.LabelServerAPIPort]
if _, ok := server.Labels[k3d.LabelServerAPIHost]; ok {
APIHost = server.Labels[k3d.LabelServerAPIHost]
}
break
}
}
if chosenMaster == nil {
chosenMaster = masterNodes[0]
if chosenServer == nil {
chosenServer = serverNodes[0]
}
// get the kubeconfig from the first master node
reader, err := runtime.GetKubeconfig(chosenMaster)
// get the kubeconfig from the first server node
reader, err := runtime.GetKubeconfig(ctx, chosenServer)
if err != nil {
log.Errorf("Failed to get kubeconfig from node '%s'", chosenMaster.Name)
log.Errorf("Failed to get kubeconfig from node '%s'", chosenServer.Name)
return nil, err
}
defer reader.Close()
@ -198,8 +199,8 @@ func GetKubeconfig(runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdap
return kc, nil
}
// WriteKubeConfigToPath takes a kubeconfig and writes it to some path, which can be '-' for os.Stdout
func WriteKubeConfigToPath(kubeconfig *clientcmdapi.Config, path string) error {
// KubeconfigWriteToPath takes a kubeconfig and writes it to some path, which can be '-' for os.Stdout
func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
var output *os.File
defer output.Close()
var err error
@ -227,14 +228,14 @@ func WriteKubeConfigToPath(kubeconfig *clientcmdapi.Config, path string) error {
return err
}
log.Debugf("Wrote kubeconfig to '%s'", output.Name)
log.Debugf("Wrote kubeconfig to '%s'", output.Name())
return nil
}
// UpdateKubeConfig merges a new kubeconfig into an existing kubeconfig and returns the result
func UpdateKubeConfig(newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
// KubeconfigMerge merges a new kubeconfig into an existing kubeconfig and returns the result
func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
log.Debugf("Merging new KubeConfig:\n%+v\n>>> into existing KubeConfig:\n%+v", newKubeConfig, existingKubeConfig)
@ -258,8 +259,8 @@ func UpdateKubeConfig(newKubeConfig *clientcmdapi.Config, existingKubeConfig *cl
}
for k, v := range newKubeConfig.Contexts {
if _, ok := existingKubeConfig.Clusters[k]; ok && !overwriteConflicting {
return fmt.Errorf("Cluster '%s' already exists in target KubeConfig", k)
if _, ok := existingKubeConfig.Contexts[k]; ok && !overwriteConflicting {
return fmt.Errorf("Context '%s' already exists in target KubeConfig", k)
}
existingKubeConfig.Contexts[k] = v
}
@ -277,11 +278,11 @@ func UpdateKubeConfig(newKubeConfig *clientcmdapi.Config, existingKubeConfig *cl
log.Debugf("Merged KubeConfig:\n%+v", existingKubeConfig)
return WriteKubeConfig(existingKubeConfig, outPath)
return KubeconfigWrite(ctx, existingKubeConfig, outPath)
}
// WriteKubeConfig writes a kubeconfig to a path atomically
func WriteKubeConfig(kubeconfig *clientcmdapi.Config, path string) error {
// KubeconfigWrite writes a kubeconfig to a path atomically
func KubeconfigWrite(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
tempPath := fmt.Sprintf("%s.k3d_%s", path, time.Now().Format("20060102_150405.000000"))
if err := clientcmd.WriteToFile(*kubeconfig, tempPath); err != nil {
log.Errorf("Failed to write merged kubeconfig to temporary file '%s'", tempPath)
@ -299,9 +300,9 @@ func WriteKubeConfig(kubeconfig *clientcmdapi.Config, path string) error {
return nil
}
// GetDefaultKubeConfig loads the default KubeConfig file
func GetDefaultKubeConfig() (*clientcmdapi.Config, error) {
path, err := GetDefaultKubeConfigPath()
// KubeconfigGetDefaultFile loads the default KubeConfig file
func KubeconfigGetDefaultFile() (*clientcmdapi.Config, error) {
path, err := KubeconfigGetDefaultPath()
if err != nil {
return nil, err
}
@ -309,8 +310,8 @@ func GetDefaultKubeConfig() (*clientcmdapi.Config, error) {
return clientcmd.LoadFromFile(path)
}
// GetDefaultKubeConfigPath returns the path of the default kubeconfig, but errors if the KUBECONFIG env var specifies more than one file
func GetDefaultKubeConfigPath() (string, error) {
// KubeconfigGetDefaultPath returns the path of the default kubeconfig, but errors if the KUBECONFIG env var specifies more than one file
func KubeconfigGetDefaultPath() (string, error) {
defaultKubeConfigLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
if len(defaultKubeConfigLoadingRules.GetLoadingPrecedence()) > 1 {
return "", fmt.Errorf("Multiple kubeconfigs specified via KUBECONFIG env var: Please reduce to one entry, unset KUBECONFIG or explicitly choose an output")
@ -318,22 +319,22 @@ func GetDefaultKubeConfigPath() (string, error) {
return defaultKubeConfigLoadingRules.GetDefaultFilename(), nil
}
// RemoveClusterFromDefaultKubeConfig removes a cluster's details from the default kubeconfig
func RemoveClusterFromDefaultKubeConfig(cluster *k3d.Cluster) error {
defaultKubeConfigPath, err := GetDefaultKubeConfigPath()
// KubeconfigRemoveClusterFromDefaultConfig removes a cluster's details from the default kubeconfig
func KubeconfigRemoveClusterFromDefaultConfig(ctx context.Context, cluster *k3d.Cluster) error {
defaultKubeConfigPath, err := KubeconfigGetDefaultPath()
if err != nil {
return err
}
kubeconfig, err := GetDefaultKubeConfig()
kubeconfig, err := KubeconfigGetDefaultFile()
if err != nil {
return err
}
kubeconfig = RemoveClusterFromKubeConfig(cluster, kubeconfig)
return WriteKubeConfig(kubeconfig, defaultKubeConfigPath)
kubeconfig = KubeconfigRemoveCluster(ctx, cluster, kubeconfig)
return KubeconfigWrite(ctx, kubeconfig, defaultKubeConfigPath)
}
// RemoveClusterFromKubeConfig removes a cluster's details from a given kubeconfig
func RemoveClusterFromKubeConfig(cluster *k3d.Cluster, kubeconfig *clientcmdapi.Config) *clientcmdapi.Config {
// KubeconfigRemoveCluster removes a cluster's details from a given kubeconfig
func KubeconfigRemoveCluster(ctx context.Context, cluster *k3d.Cluster, kubeconfig *clientcmdapi.Config) *clientcmdapi.Config {
clusterName := fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)
contextName := fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)
authInfoName := fmt.Sprintf("admin@%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)

@ -22,38 +22,51 @@ THE SOFTWARE.
package cluster
import (
"context"
"fmt"
"strings"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// AddMasterToLoadBalancer adds a new master node to the loadbalancer configuration
func AddMasterToLoadBalancer(runtime runtimes.Runtime, cluster *k3d.Cluster, newNode *k3d.Node) error {
// UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster
func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) error {
var err error
// update cluster details to ensure that we have the latest node list
cluster, err = ClusterGet(ctx, runtime, cluster)
if err != nil {
log.Errorf("Failed to update details for cluster '%s'", cluster.Name)
return err
}
// find the LoadBalancer for the target cluster
masterNodes := ""
serverNodesList := []string{}
var loadbalancer *k3d.Node
for _, node := range cluster.Nodes {
if node.Role == k3d.LoadBalancerRole { // get the loadbalancer we want to update
loadbalancer = node
} else if node.Role == k3d.MasterRole { // create a list of master nodes
masterNodes += node.Name + ","
} else if node.Role == k3d.ServerRole { // create a list of server nodes
serverNodesList = append(serverNodesList, node.Name)
}
}
serverNodes := strings.Join(serverNodesList, ",")
if loadbalancer == nil {
return fmt.Errorf("Failed to find loadbalancer for cluster '%s'", cluster.Name)
}
masterNodes += newNode.Name // append the new master node to the end of the list
log.Debugf("SERVERS=%s", masterNodes)
log.Debugf("Servers as passed to serverlb: '%s'", serverNodes)
command := fmt.Sprintf("SERVERS=%s %s", masterNodes, "confd -onetime -backend env && nginx -s reload")
if err := runtime.ExecInNode(loadbalancer, []string{"sh", "-c", command}); err != nil {
log.Errorln("Failed to update loadbalancer configuration")
command := fmt.Sprintf("SERVERS=%s %s", serverNodes, "confd -onetime -backend env && nginx -s reload")
if err := runtime.ExecInNode(ctx, loadbalancer, []string{"sh", "-c", command}); err != nil {
if strings.Contains(err.Error(), "host not found in upstream") {
log.Warnf("Loadbalancer configuration updated, but one or more k3d nodes seem to be down, check the logs:\n%s", err.Error())
return nil
}
return err
}
return nil
}

@ -30,26 +30,30 @@ import (
"time"
"github.com/imdario/mergo"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
// AddNodeToCluster adds a node to an existing cluster
func AddNodeToCluster(runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Cluster) error {
cluster, err := GetCluster(cluster, runtime)
// NodeAddToCluster adds a node to an existing cluster
func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
targetClusterName := cluster.Name
cluster, err := ClusterGet(ctx, runtime, cluster)
if err != nil {
log.Errorf("Failed to find specified cluster '%s'", cluster.Name)
log.Errorf("Failed to find specified cluster '%s'", targetClusterName)
return err
}
log.Debugf("Adding node to cluster %+v", cluster)
// network
node.Network = cluster.Network.Name
// skeleton
node.Labels = map[string]string{}
if node.Labels == nil {
node.Labels = map[string]string{
k3d.LabelRole: string(node.Role),
}
}
node.Env = []string{}
// copy labels and env vars from a similar node in the selected cluster
@ -62,7 +66,8 @@ func AddNodeToCluster(runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Clu
}
// if we didn't find a node with the same role in the cluster, just choose any other node
if chosenNode == nil {
log.Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node...", node.Role, cluster.Name)
log.Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name)
node.Cmd = k3d.DefaultRoleCmds[node.Role]
for _, existingNode := range cluster.Nodes {
if existingNode.Role != k3d.LoadBalancerRole { // any role except for the LoadBalancer role
chosenNode = existingNode
@ -72,12 +77,12 @@ func AddNodeToCluster(runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Clu
}
// get node details
chosenNode, err = GetNode(chosenNode, runtime)
chosenNode, err = NodeGet(ctx, runtime, chosenNode)
if err != nil {
return err
}
log.Debugf("Copying configuration from existing node %+v", chosenNode)
log.Debugf("Adding node %+v \n>>> to cluster %+v\n>>> based on existing node %+v", node, cluster, chosenNode)
// merge node config of new node into existing node config
if err := mergo.MergeWithOverwrite(chosenNode, *node); err != nil {
@ -97,21 +102,40 @@ func AddNodeToCluster(runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Clu
}
}
if !k3sURLFound {
if url, ok := node.Labels["k3d.cluster.url"]; ok {
if url, ok := node.Labels[k3d.LabelClusterURL]; ok {
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", url))
} else {
log.Warnln("Failed to find K3S_URL value!")
}
}
if err := CreateNode(node, runtime); err != nil {
if node.Role == k3d.ServerRole {
for _, forbiddenCmd := range k3d.DoNotCopyServerFlags {
for i, cmd := range node.Cmd {
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
if cmd == forbiddenCmd {
log.Debugf("Dropping '%s' from node's cmd", forbiddenCmd)
node.Cmd = append(node.Cmd[:i], node.Cmd[i+1:]...)
}
}
for i, arg := range node.Args {
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
if arg == forbiddenCmd {
log.Debugf("Dropping '%s' from node's args", forbiddenCmd)
node.Args = append(node.Args[:i], node.Args[i+1:]...)
}
}
}
}
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
return err
}
// if it's a master node, then update the loadbalancer configuration to include it
if node.Role == k3d.MasterRole {
if err := AddMasterToLoadBalancer(runtime, cluster, node); err != nil {
log.Errorln("Failed to add new master node to cluster loadbalancer")
// if it's a server node, then update the loadbalancer configuration
if node.Role == k3d.ServerRole {
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
log.Errorln("Failed to update cluster loadbalancer")
return err
}
}
@ -119,17 +143,70 @@ func AddNodeToCluster(runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Clu
return nil
}
// CreateNodes creates a list of nodes
func CreateNodes(nodes []*k3d.Node, runtime runtimes.Runtime) { // TODO: pass `--atomic` flag, so we stop and return an error if any node creation fails?
// NodeAddToClusterMulti adds multiple nodes to a chosen cluster
func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
if createNodeOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
defer cancel()
}
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
for _, node := range nodes {
if err := CreateNode(node, runtime); err != nil {
if err := NodeAddToCluster(ctx, runtime, node, cluster, k3d.NodeCreateOpts{}); err != nil {
return err
}
if createNodeOpts.Wait {
currentNode := node
nodeWaitGroup.Go(func() error {
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
return NodeWaitForLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
})
}
}
if err := nodeWaitGroup.Wait(); err != nil {
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
log.Errorf(">>> %+v", err)
return fmt.Errorf("Failed to add nodes")
}
return nil
}
// NodeCreateMulti creates a list of nodes
func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, createNodeOpts k3d.NodeCreateOpts) error { // TODO: pass `--atomic` flag, so we stop and return an error if any node creation fails?
if createNodeOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
defer cancel()
}
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
for _, node := range nodes {
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
log.Error(err)
}
if createNodeOpts.Wait {
currentNode := node
nodeWaitGroup.Go(func() error {
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
return NodeWaitForLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
})
}
}
if err := nodeWaitGroup.Wait(); err != nil {
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
log.Errorf(">>> %+v", err)
return fmt.Errorf("Failed to create nodes")
}
return nil
}
// CreateNode creates a new containerized k3s node
func CreateNode(node *k3d.Node, runtime runtimes.Runtime) error {
// NodeCreate creates a new containerized k3s node
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
log.Debugf("Creating node from spec\n%+v", node)
/*
@ -148,52 +225,66 @@ func CreateNode(node *k3d.Node, runtime runtimes.Runtime) error {
}
node.Labels = labels
// second most important: the node role label
node.Labels["k3d.role"] = string(node.Role)
node.Labels[k3d.LabelRole] = string(node.Role)
// ### Environment ###
node.Env = append(node.Env, k3d.DefaultNodeEnv...) // append default node env vars
// specify options depending on node role
if node.Role == k3d.WorkerRole { // TODO: check here AND in CLI or only here?
if err := patchWorkerSpec(node); err != nil {
if node.Role == k3d.AgentRole { // TODO: check here AND in CLI or only here?
if err := patchAgentSpec(node); err != nil {
return err
}
} else if node.Role == k3d.MasterRole {
if err := patchMasterSpec(node); err != nil {
} else if node.Role == k3d.ServerRole {
if err := patchServerSpec(node); err != nil {
return err
}
log.Debugf("spec = %+v\n", node)
}
/*
* CREATION
*/
if err := runtime.CreateNode(node); err != nil {
if err := runtime.CreateNode(ctx, node); err != nil {
return err
}
return nil
}
// DeleteNode deletes an existing node
func DeleteNode(runtime runtimes.Runtime, node *k3d.Node) error {
// NodeDelete deletes an existing node
func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) error {
if err := runtime.DeleteNode(node); err != nil {
if err := runtime.DeleteNode(ctx, node); err != nil {
log.Error(err)
}
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]})
if err != nil {
log.Errorf("Failed to update loadbalancer: Failed to find cluster for node '%s'", node.Name)
return err
}
// if it's a server node, then update the loadbalancer configuration
if node.Role == k3d.ServerRole {
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
log.Errorln("Failed to update cluster loadbalancer")
return err
}
}
return nil
}
// patchWorkerSpec adds worker node specific settings to a node
func patchWorkerSpec(node *k3d.Node) error {
// patchAgentSpec adds agent node specific settings to a node
func patchAgentSpec(node *k3d.Node) error {
if node.Cmd == nil {
node.Cmd = []string{"agent"}
}
return nil
}
// patchMasterSpec adds worker node specific settings to a node
func patchMasterSpec(node *k3d.Node) error {
// patchServerSpec adds agent node specific settings to a node
func patchServerSpec(node *k3d.Node) error {
// command / arguments
if node.Cmd == nil {
@ -201,19 +292,19 @@ func patchMasterSpec(node *k3d.Node) error {
}
// Add labels and TLS SAN for the exposed API
// FIXME: For now, the labels concerning the API on the master nodes are only being used for configuring the kubeconfig
node.Labels["k3d.master.api.hostIP"] = node.MasterOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
node.Labels["k3d.master.api.host"] = node.MasterOpts.ExposeAPI.Host
node.Labels["k3d.master.api.port"] = node.MasterOpts.ExposeAPI.Port
// FIXME: For now, the labels concerning the API on the server nodes are only being used for configuring the kubeconfig
node.Labels[k3d.LabelServerAPIHostIP] = node.ServerOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
node.Labels[k3d.LabelServerAPIHost] = node.ServerOpts.ExposeAPI.Host
node.Labels[k3d.LabelServerAPIPort] = node.ServerOpts.ExposeAPI.Port
node.Args = append(node.Args, "--tls-san", node.MasterOpts.ExposeAPI.Host) // add TLS SAN for non default host name
node.Args = append(node.Args, "--tls-san", node.ServerOpts.ExposeAPI.Host) // add TLS SAN for non default host name
return nil
}
// GetNodes returns a list of all existing clusters
func GetNodes(runtime runtimes.Runtime) ([]*k3d.Node, error) {
nodes, err := runtime.GetNodesByLabel(k3d.DefaultObjectLabels)
// NodeList returns a list of all existing clusters
func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels)
if err != nil {
log.Errorln("Failed to get nodes")
return nil, err
@ -222,10 +313,10 @@ func GetNodes(runtime runtimes.Runtime) ([]*k3d.Node, error) {
return nodes, nil
}
// GetNode returns a node matching the specified node fields
func GetNode(node *k3d.Node, runtime runtimes.Runtime) (*k3d.Node, error) {
// NodeGet returns a node matching the specified node fields
func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3d.Node, error) {
// get node
node, err := runtime.GetNode(node)
node, err := runtime.GetNode(ctx, node)
if err != nil {
log.Errorf("Failed to get node '%s'", node.Name)
}
@ -233,8 +324,8 @@ func GetNode(node *k3d.Node, runtime runtimes.Runtime) (*k3d.Node, error) {
return node, nil
}
// WaitForNodeLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string) error {
// NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
for {
select {
case <-ctx.Done():
@ -243,7 +334,7 @@ func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *
}
// read the logs
out, err := runtime.GetNodeLogs(node)
out, err := runtime.GetNodeLogs(ctx, node, since)
if err != nil {
if out != nil {
out.Close()

@ -19,34 +19,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package stop
package containerd
import (
log "github.com/sirupsen/logrus"
import "context"
"github.com/spf13/cobra"
)
// NewCmdStop returns a new cobra command
func NewCmdStop() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "stop",
Short: "Stop a resource.",
Long: `Stop a resource.`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
// add subcommands
cmd.AddCommand(NewCmdStopCluster())
cmd.AddCommand(NewCmdStopNode())
// done
return cmd
// GetImages returns a list of images present in the runtime
func (d Containerd) GetImages(ctx context.Context) ([]string, error) {
return nil, nil
}

@ -23,12 +23,13 @@ THE SOFTWARE.
package containerd
import (
"context"
"io"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
)
// GetKubeconfig grabs the kubeconfig from inside a k3d node
func (d Containerd) GetKubeconfig(node *k3d.Node) (io.ReadCloser, error) {
func (d Containerd) GetKubeconfig(ctx context.Context, node *k3d.Node) (io.ReadCloser, error) {
return nil, nil
}

@ -21,12 +21,14 @@ THE SOFTWARE.
*/
package containerd
import "context"
// CreateNetworkIfNotPresent creates a new docker network
func (d Containerd) CreateNetworkIfNotPresent(name string) (string, bool, error) {
func (d Containerd) CreateNetworkIfNotPresent(ctx context.Context, name string) (string, bool, error) {
return "", false, nil
}
// DeleteNetwork deletes a network
func (d Containerd) DeleteNetwork(ID string) error {
func (d Containerd) DeleteNetwork(ctx context.Context, ID string) error {
return nil
}

@ -25,19 +25,17 @@ package containerd
import (
"context"
"io"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// CreateNode creates a new k3d node
func (d Containerd) CreateNode(node *k3d.Node) error {
log.Debugln("containerd.CreateNode...")
func (d Containerd) CreateNode(ctx context.Context, node *k3d.Node) error {
// create containerd client
ctx := context.Background()
clientOpts := []containerd.ClientOpt{
containerd.WithDefaultNamespace("k3d"),
}
@ -77,9 +75,7 @@ func (d Containerd) CreateNode(node *k3d.Node) error {
}
// DeleteNode deletes an existing k3d node
func (d Containerd) DeleteNode(node *k3d.Node) error {
log.Debugln("containerd.DeleteNode...")
ctx := context.Background()
func (d Containerd) DeleteNode(ctx context.Context, node *k3d.Node) error {
clientOpts := []containerd.ClientOpt{
containerd.WithDefaultNamespace("k3d"),
}
@ -95,7 +91,7 @@ func (d Containerd) DeleteNode(node *k3d.Node) error {
return err
}
if err = container.Delete(ctx, []containerd.DeleteOpts{}...); err != nil {
log.Errorln("Failed to delete container", container.ID)
log.Errorf("Failed to delete container '%s'", container.ID)
return err
}
@ -103,30 +99,30 @@ func (d Containerd) DeleteNode(node *k3d.Node) error {
}
// StartNode starts an existing node
func (d Containerd) StartNode(node *k3d.Node) error {
func (d Containerd) StartNode(ctx context.Context, node *k3d.Node) error {
return nil // TODO: fill
}
// StopNode stops an existing node
func (d Containerd) StopNode(node *k3d.Node) error {
func (d Containerd) StopNode(ctx context.Context, node *k3d.Node) error {
return nil // TODO: fill
}
func (d Containerd) GetNodesByLabel(labels map[string]string) ([]*k3d.Node, error) {
func (d Containerd) GetNodesByLabel(ctx context.Context, labels map[string]string) ([]*k3d.Node, error) {
return nil, nil
}
// GetNode tries to get a node container by its name
func (d Containerd) GetNode(node *k3d.Node) (*k3d.Node, error) {
func (d Containerd) GetNode(ctx context.Context, node *k3d.Node) (*k3d.Node, error) {
return nil, nil
}
// GetNodeLogs returns the logs from a given node
func (d Containerd) GetNodeLogs(node *k3d.Node) (io.ReadCloser, error) {
func (d Containerd) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time) (io.ReadCloser, error) {
return nil, nil
}
// ExecInNode execs a command inside a node
func (d Containerd) ExecInNode(node *k3d.Node, cmd []string) error {
func (d Containerd) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) error {
return nil
}

@ -20,3 +20,14 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package containerd
import (
"context"
k3d "github.com/rancher/k3d/v3/pkg/types"
)
// CopyToNode copies a file from the local FS to the selected node
func (d Containerd) CopyToNode(ctx context.Context, src string, dest string, node *k3d.Node) error {
return nil
}

@ -21,12 +21,19 @@ THE SOFTWARE.
*/
package containerd
import "context"
// CreateVolume creates a new named volume
func (d Containerd) CreateVolume(name string, labels map[string]string) error {
func (d Containerd) CreateVolume(ctx context.Context, name string, labels map[string]string) error {
return nil
}
// DeleteVolume creates a new named volume
func (d Containerd) DeleteVolume(name string) error {
func (d Containerd) DeleteVolume(ctx context.Context, name string) error {
return nil
}
// GetVolume tries to get a named volume
func (d Containerd) GetVolume(name string) (string, error) {
return "", nil
}

@ -33,17 +33,16 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// createContainer creates a new docker container from translated specs
func createContainer(dockerNode *NodeInDocker, name string) error {
func createContainer(ctx context.Context, dockerNode *NodeInDocker, name string) error {
log.Debugf("Creating docker container with translated config\n%+v\n", dockerNode) // TODO: remove?
// initialize docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -54,16 +53,16 @@ func createContainer(dockerNode *NodeInDocker, name string) error {
// create container
var resp container.ContainerCreateCreatedBody
for {
resp, err = docker.ContainerCreate(ctx, &dockerNode.ContainerConfig, &dockerNode.HostConfig, &dockerNode.NetworkingConfig, name)
resp, err = docker.ContainerCreate(ctx, &dockerNode.ContainerConfig, &dockerNode.HostConfig, &dockerNode.NetworkingConfig, nil, name)
if err != nil {
if client.IsErrNotFound(err) {
if err := pullImage(&ctx, docker, dockerNode.ContainerConfig.Image); err != nil {
log.Errorln("Failed to create container")
if err := pullImage(ctx, docker, dockerNode.ContainerConfig.Image); err != nil {
log.Errorf("Failed to create container '%s'", name)
return err
}
continue
}
log.Errorln("Failed to create container")
log.Errorf("Failed to create container '%s'", name)
return err
}
log.Debugln("Created container", resp.ID)
@ -80,10 +79,9 @@ func createContainer(dockerNode *NodeInDocker, name string) error {
}
// removeContainer deletes a running container (like docker rm -f)
func removeContainer(ID string) error {
func removeContainer(ctx context.Context, ID string) error {
// (0) create docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -109,9 +107,9 @@ func removeContainer(ID string) error {
}
// pullImage pulls a container image and outputs progress if --verbose flag is set
func pullImage(ctx *context.Context, docker *client.Client, image string) error {
func pullImage(ctx context.Context, docker *client.Client, image string) error {
resp, err := docker.ImagePull(*ctx, image, types.ImagePullOptions{})
resp, err := docker.ImagePull(ctx, image, types.ImagePullOptions{})
if err != nil {
log.Errorf("Failed to pull image '%s'", image)
return err
@ -135,9 +133,8 @@ func pullImage(ctx *context.Context, docker *client.Client, image string) error
}
func getNodeContainer(node *k3d.Node) (*types.Container, error) {
func getNodeContainer(ctx context.Context, node *k3d.Node) (*types.Container, error) {
// (0) create docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")

@ -0,0 +1,54 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package docker
import (
"context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
log "github.com/sirupsen/logrus"
)
// GetImages returns a list of images present in the runtime
func (d Docker) GetImages(ctx context.Context) ([]string, error) {
// create docker client
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return nil, err
}
defer docker.Close()
imageSummary, err := docker.ImageList(ctx, types.ImageListOptions{All: true})
if err != nil {
log.Errorln("Failed to list available docker images")
return nil, err
}
var images []string
for _, image := range imageSummary {
images = append(images, image.RepoTags...)
}
return images, nil
}

@ -27,13 +27,12 @@ import (
"io"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// GetKubeconfig grabs the kubeconfig from inside a k3d node
func (d Docker) GetKubeconfig(node *k3d.Node) (io.ReadCloser, error) {
ctx := context.Background()
func (d Docker) GetKubeconfig(ctx context.Context, node *k3d.Node) (io.ReadCloser, error) {
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -41,12 +40,12 @@ func (d Docker) GetKubeconfig(node *k3d.Node) (io.ReadCloser, error) {
}
defer docker.Close()
container, err := getNodeContainer(node)
container, err := getNodeContainer(ctx, node)
if err != nil {
return nil, err
}
log.Debugf("Container: %+v", container)
log.Debugf("Container Details: %+v", container)
reader, _, err := docker.CopyFromContainer(ctx, container.ID, "/output/kubeconfig.yaml")
if err != nil {

@ -28,16 +28,15 @@ import (
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// CreateNetworkIfNotPresent creates a new docker network
// @return: network name, exists, error
func (d Docker) CreateNetworkIfNotPresent(name string) (string, bool, error) {
func (d Docker) CreateNetworkIfNotPresent(ctx context.Context, name string) (string, bool, error) {
// (0) create new docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -82,9 +81,8 @@ func (d Docker) CreateNetworkIfNotPresent(name string) (string, bool, error) {
}
// DeleteNetwork deletes a network
func (d Docker) DeleteNetwork(ID string) error {
func (d Docker) DeleteNetwork(ctx context.Context, ID string) error {
// (0) create new docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -97,8 +95,7 @@ func (d Docker) DeleteNetwork(ID string) error {
}
// GetNetwork gets information about a network by its ID
func GetNetwork(ID string) (types.NetworkResource, error) {
ctx := context.Background()
func GetNetwork(ctx context.Context, ID string) (types.NetworkResource, error) {
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")

@ -32,12 +32,12 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// CreateNode creates a new container
func (d Docker) CreateNode(node *k3d.Node) error {
func (d Docker) CreateNode(ctx context.Context, node *k3d.Node) error {
// translate node spec to docker container specs
dockerNode, err := TranslateNodeToContainer(node)
@ -47,8 +47,8 @@ func (d Docker) CreateNode(node *k3d.Node) error {
}
// create node
if err := createContainer(dockerNode, node.Name); err != nil {
log.Errorln("Failed to create k3d node")
if err := createContainer(ctx, dockerNode, node.Name); err != nil {
log.Errorf("Failed to create node '%s'", node.Name)
return err
}
@ -56,16 +56,15 @@ func (d Docker) CreateNode(node *k3d.Node) error {
}
// DeleteNode deletes a node
func (d Docker) DeleteNode(nodeSpec *k3d.Node) error {
log.Debugln("docker.DeleteNode...")
return removeContainer(nodeSpec.Name)
func (d Docker) DeleteNode(ctx context.Context, nodeSpec *k3d.Node) error {
return removeContainer(ctx, nodeSpec.Name)
}
// GetNodesByLabel returns a list of existing nodes
func (d Docker) GetNodesByLabel(labels map[string]string) ([]*k3d.Node, error) {
func (d Docker) GetNodesByLabel(ctx context.Context, labels map[string]string) ([]*k3d.Node, error) {
// (0) get containers
containers, err := getContainersByLabel(labels)
containers, err := getContainersByLabel(ctx, labels)
if err != nil {
return nil, err
}
@ -85,9 +84,8 @@ func (d Docker) GetNodesByLabel(labels map[string]string) ([]*k3d.Node, error) {
}
// StartNode starts an existing node
func (d Docker) StartNode(node *k3d.Node) error {
func (d Docker) StartNode(ctx context.Context, node *k3d.Node) error {
// (0) create docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return fmt.Errorf("Failed to create docker client. %+v", err)
@ -95,7 +93,7 @@ func (d Docker) StartNode(node *k3d.Node) error {
defer docker.Close()
// get container which represents the node
nodeContainer, err := getNodeContainer(node)
nodeContainer, err := getNodeContainer(ctx, node)
if err != nil {
log.Errorf("Failed to get container for node '%s'", node.Name)
return err
@ -116,9 +114,8 @@ func (d Docker) StartNode(node *k3d.Node) error {
}
// StopNode stops an existing node
func (d Docker) StopNode(node *k3d.Node) error {
func (d Docker) StopNode(ctx context.Context, node *k3d.Node) error {
// (0) create docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return fmt.Errorf("Failed to create docker client. %+v", err)
@ -126,7 +123,7 @@ func (d Docker) StopNode(node *k3d.Node) error {
defer docker.Close()
// get container which represents the node
nodeContainer, err := getNodeContainer(node)
nodeContainer, err := getNodeContainer(ctx, node)
if err != nil {
log.Errorf("Failed to get container for node '%s'", node.Name)
return err
@ -145,9 +142,8 @@ func (d Docker) StopNode(node *k3d.Node) error {
return nil
}
func getContainersByLabel(labels map[string]string) ([]types.Container, error) {
func getContainersByLabel(ctx context.Context, labels map[string]string) ([]types.Container, error) {
// (0) create docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, fmt.Errorf("Failed to create docker client. %+v", err)
@ -176,9 +172,8 @@ func getContainersByLabel(labels map[string]string) ([]types.Container, error) {
}
// getContainer details returns the containerjson with more details
func getContainerDetails(containerID string) (types.ContainerJSON, error) {
func getContainerDetails(ctx context.Context, containerID string) (types.ContainerJSON, error) {
// (0) create docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return types.ContainerJSON{}, fmt.Errorf("Failed to create docker client. %+v", err)
@ -187,7 +182,7 @@ func getContainerDetails(containerID string) (types.ContainerJSON, error) {
containerDetails, err := docker.ContainerInspect(ctx, containerID)
if err != nil {
log.Errorln("Failed to get details for container '%s'", containerID)
log.Errorf("Failed to get details for container '%s'", containerID)
return types.ContainerJSON{}, err
}
@ -196,22 +191,21 @@ func getContainerDetails(containerID string) (types.ContainerJSON, error) {
}
// GetNode tries to get a node container by its name
func (d Docker) GetNode(node *k3d.Node) (*k3d.Node, error) {
container, err := getNodeContainer(node)
func (d Docker) GetNode(ctx context.Context, node *k3d.Node) (*k3d.Node, error) {
container, err := getNodeContainer(ctx, node)
if err != nil {
log.Errorf("Failed to get container for node '%s'", node.Name)
return nil, err
return node, err
}
containerDetails, err := getContainerDetails(container.ID)
containerDetails, err := getContainerDetails(ctx, container.ID)
if err != nil {
return nil, err
return node, err
}
node, err = TranslateContainerDetailsToNode(containerDetails)
if err != nil {
log.Errorf("Failed to translate container details for node '%s' to node object", node.Name)
return nil, err
return node, err
}
return node, nil
@ -219,15 +213,14 @@ func (d Docker) GetNode(node *k3d.Node) (*k3d.Node, error) {
}
// GetNodeLogs returns the logs from a given node
func (d Docker) GetNodeLogs(node *k3d.Node) (io.ReadCloser, error) {
func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time) (io.ReadCloser, error) {
// get the container for the given node
container, err := getNodeContainer(node)
container, err := getNodeContainer(ctx, node)
if err != nil {
return nil, err
}
// create docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -245,7 +238,11 @@ func (d Docker) GetNodeLogs(node *k3d.Node) (io.ReadCloser, error) {
return nil, fmt.Errorf("Node '%s' (container '%s') not running", node.Name, containerInspectResponse.ID)
}
logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
sinceStr := ""
if !since.IsZero() {
sinceStr = since.Format("2006-01-02T15:04:05")
}
logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Since: sinceStr})
if err != nil {
log.Errorf("Failed to get logs from node '%s' (container '%s')", node.Name, container.ID)
return nil, err
@ -255,18 +252,17 @@ func (d Docker) GetNodeLogs(node *k3d.Node) (io.ReadCloser, error) {
}
// ExecInNode execs a command inside a node
func (d Docker) ExecInNode(node *k3d.Node, cmd []string) error {
func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) error {
log.Debugf("Exec cmds '%+v' in node '%s'", cmd, node.Name)
log.Debugf("Executing command '%+v' in node '%s'", cmd, node.Name)
// get the container for the given node
container, err := getNodeContainer(node)
container, err := getNodeContainer(ctx, node)
if err != nil {
return err
}
// create docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -305,7 +301,7 @@ func (d Docker) ExecInNode(node *k3d.Node, cmd []string) error {
// get info about exec process inside container
execInfo, err := docker.ContainerExecInspect(ctx, exec.ID)
if err != nil {
log.Errorln("Failed to inspect exec process in node '%s'", node.Name)
log.Errorf("Failed to inspect exec process in node '%s'", node.Name)
return err
}
@ -326,7 +322,7 @@ func (d Docker) ExecInNode(node *k3d.Node, cmd []string) error {
logs, err := ioutil.ReadAll(execConnection.Reader)
if err != nil {
log.Errorln("Failed to get logs from node '%s'", node.Name)
log.Errorf("Failed to get logs from node '%s'", node.Name)
return err
}

@ -23,6 +23,7 @@ THE SOFTWARE.
package docker
import (
"context"
"fmt"
"strings"
@ -30,7 +31,7 @@ import (
docker "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -39,7 +40,9 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
/* initialize everything that we need */
containerConfig := docker.Config{}
hostConfig := docker.HostConfig{}
hostConfig := docker.HostConfig{
Init: &[]bool{true}[0],
}
networkingConfig := network.NetworkingConfig{}
/* Name & Image */
@ -76,14 +79,14 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
hostConfig.Privileged = true
/* Volumes */
// TODO: image volume
log.Debugf("Volumes: %+v", node.Volumes)
hostConfig.Binds = node.Volumes
// containerConfig.Volumes = map[string]struct{}{} // TODO: do we need this? We only used binds before
/* Ports */
exposedPorts, portBindings, err := nat.ParsePortSpecs(node.Ports)
if err != nil {
log.Errorln("Failed to parse port specs '%v'", node.Ports)
log.Errorf("Failed to parse port specs '%v'", node.Ports)
return nil, err
}
containerConfig.ExposedPorts = exposedPorts
@ -92,7 +95,7 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
node.Network: {},
}
netInfo, err := GetNetwork(node.Network)
netInfo, err := GetNetwork(context.Background(), node.Network)
if err != nil {
log.Warnln("Failed to get network information")
log.Warnln(err)
@ -113,7 +116,7 @@ func TranslateContainerToNode(cont *types.Container) (*k3d.Node, error) {
Name: strings.TrimPrefix(cont.Names[0], "/"), // container name with leading '/' cut off
Image: cont.Image,
Labels: cont.Labels,
Role: k3d.NodeRoles[cont.Labels["k3d.role"]],
Role: k3d.NodeRoles[cont.Labels[k3d.LabelRole]],
// TODO: all the rest
}
return node, nil
@ -139,25 +142,20 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
// get the clusterNetwork
clusterNetwork := ""
for networkName := range containerDetails.NetworkSettings.Networks {
if strings.HasPrefix(networkName, fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, containerDetails.Config.Labels["k3d.cluster"])) { // FIXME: catch error if label 'k3d.cluster' does not exist, but this should also never be the case
if strings.HasPrefix(networkName, fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, containerDetails.Config.Labels[k3d.LabelClusterName])) { // FIXME: catch error if label 'k3d.cluster' does not exist, but this should also never be the case
clusterNetwork = networkName
}
}
// masterOpts
masterOpts := k3d.MasterOpts{IsInit: false}
// serverOpts
serverOpts := k3d.ServerOpts{IsInit: false}
for k, v := range containerDetails.Config.Labels {
/*
node.Labels["k3d.master.api.hostIP"] = node.MasterOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
node.Labels["k3d.master.api.host"] = node.MasterOpts.ExposeAPI.Host
node.Labels["k3d.master.api.port"] = node.MasterOpts.ExposeAPI.Port
*/
if k == "k3d.master.api.hostIP" {
masterOpts.ExposeAPI.HostIP = v
} else if k == "k3d.master.api.host" {
masterOpts.ExposeAPI.Host = v
} else if k == "k3d.master.api.port" {
masterOpts.ExposeAPI.Port = v
if k == k3d.LabelServerAPIHostIP {
serverOpts.ExposeAPI.HostIP = v
} else if k == k3d.LabelServerAPIHost {
serverOpts.ExposeAPI.Host = v
} else if k == k3d.LabelServerAPIPort {
serverOpts.ExposeAPI.Port = v
}
}
@ -179,7 +177,7 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
node := &k3d.Node{
Name: strings.TrimPrefix(containerDetails.Name, "/"), // container name with leading '/' cut off
Role: k3d.NodeRoles[containerDetails.Config.Labels["k3d.role"]],
Role: k3d.NodeRoles[containerDetails.Config.Labels[k3d.LabelRole]],
Image: containerDetails.Image,
Volumes: containerDetails.HostConfig.Binds,
Env: env,
@ -189,8 +187,8 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
Restart: restart,
Labels: labels,
Network: clusterNetwork,
MasterOpts: masterOpts,
WorkerOpts: k3d.WorkerOpts{},
ServerOpts: serverOpts,
AgentOpts: k3d.AgentOpts{},
}
return node, nil
}

@ -30,14 +30,14 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
)
func TestTranslateNodeToContainer(t *testing.T) {
inputNode := &k3d.Node{
Name: "test",
Role: k3d.MasterRole,
Role: k3d.ServerRole,
Image: "rancher/k3s:v0.9.0",
Volumes: []string{"/test:/tmp/test"},
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
@ -45,7 +45,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
Args: []string{"--some-boolflag"},
Ports: []string{"0.0.0.0:6443:6443/tcp"},
Restart: true,
Labels: map[string]string{"k3d.role": string(k3d.MasterRole), "test_key_1": "test_val_1"},
Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"},
}
expectedRepresentation := &NodeInDocker{
@ -54,7 +54,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
Image: "rancher/k3s:v0.9.0",
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
Cmd: []string{"server", "--https-listen-port=6443", "--some-boolflag"},
Labels: map[string]string{"k3d.role": string(k3d.MasterRole), "test_key_1": "test_val_1"},
Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"},
ExposedPorts: nat.PortSet{},
},
HostConfig: container.HostConfig{

@ -22,10 +22,15 @@ THE SOFTWARE.
package docker
import (
"context"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// GetDefaultObjectLabelsFilter returns docker type filters created from k3d labels
@ -34,6 +39,52 @@ func GetDefaultObjectLabelsFilter(clusterName string) filters.Args {
for key, value := range k3d.DefaultObjectLabels {
filters.Add("label", fmt.Sprintf("%s=%s", key, value))
}
filters.Add("label", fmt.Sprintf("k3d.cluster=%s", clusterName))
filters.Add("label", fmt.Sprintf("%s=%s", k3d.LabelClusterName, clusterName))
return filters
}
// CopyToNode copies a file from the local FS to the selected node
func (d Docker) CopyToNode(ctx context.Context, src string, dest string, node *k3d.Node) error {
// create docker client
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return err
}
defer docker.Close()
container, err := getNodeContainer(ctx, node)
if err != nil {
log.Errorf("Failed to find container for target node '%s'", node.Name)
return err
}
// source: docker/cli/cli/command/container/cp
srcInfo, err := archive.CopyInfoSourcePath(src, false)
if err != nil {
log.Errorln("Failed to copy info source path")
return err
}
srcArchive, err := archive.TarResource(srcInfo)
if err != nil {
log.Errorln("Failed to create tar resource")
return err
}
defer srcArchive.Close()
destInfo := archive.CopyInfo{Path: dest}
destStat, _ := docker.ContainerStatPath(ctx, container.ID, dest) // don't blame me, docker is also not doing anything if err != nil ¯\_(ツ)_/¯
destInfo.Exists, destInfo.IsDir = true, destStat.Mode.IsDir()
destDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, destInfo)
if err != nil {
log.Errorln("Failed to prepare archive")
return err
}
defer preparedArchive.Close()
return docker.CopyToContainer(ctx, container.ID, destDir, preparedArchive, types.CopyToContainerOptions{AllowOverwriteDirWithFile: false})
}

@ -25,16 +25,16 @@ import (
"context"
"fmt"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
log "github.com/sirupsen/logrus"
)
// CreateVolume creates a new named volume
func (d Docker) CreateVolume(name string, labels map[string]string) error {
func (d Docker) CreateVolume(ctx context.Context, name string, labels map[string]string) error {
// (0) create new docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -63,9 +63,8 @@ func (d Docker) CreateVolume(name string, labels map[string]string) error {
}
// DeleteVolume creates a new named volume
func (d Docker) DeleteVolume(name string) error {
func (d Docker) DeleteVolume(ctx context.Context, name string) error {
// (0) create new docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
@ -83,7 +82,7 @@ func (d Docker) DeleteVolume(name string) error {
// check if volume is still in use
if vol.UsageData != nil {
if vol.UsageData.RefCount > 0 {
log.Errorf("Failed to delete volume '%s'")
log.Errorf("Failed to delete volume '%s'", vol.Name)
return fmt.Errorf("Volume '%s' is still referenced by %d containers", name, vol.UsageData.RefCount)
}
}
@ -96,3 +95,28 @@ func (d Docker) DeleteVolume(name string) error {
return nil
}
// GetVolume tries to get a named volume
func (d Docker) GetVolume(name string) (string, error) {
// (0) create new docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return "", err
}
defer docker.Close()
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s$", name))
volumeList, err := docker.VolumeList(ctx, filters)
if err != nil {
return "", err
}
if len(volumeList.Volumes) < 1 {
return "", fmt.Errorf("Failed to find named volume '%s'", name)
}
return volumeList.Volumes[0].Name, nil
}

@ -22,17 +22,25 @@ THE SOFTWARE.
package runtimes
import (
"context"
"fmt"
"io"
"time"
"github.com/rancher/k3d/pkg/runtimes/containerd"
"github.com/rancher/k3d/pkg/runtimes/docker"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/v3/pkg/runtimes/containerd"
"github.com/rancher/k3d/v3/pkg/runtimes/docker"
k3d "github.com/rancher/k3d/v3/pkg/types"
)
// SelectedRuntime is a runtime (pun intended) variable determining the selected runtime
var SelectedRuntime Runtime = docker.Docker{}
// Docker docker
var Docker = docker.Docker{}
// Containerd containerd
var Containerd = containerd.Containerd{}
// Runtimes defines a map of implemented k3d runtimes
var Runtimes = map[string]Runtime{
"docker": docker.Docker{},
@ -41,21 +49,23 @@ var Runtimes = map[string]Runtime{
// Runtime defines an interface that can be implemented for various container runtime environments (docker, containerd, etc.)
type Runtime interface {
CreateNode(*k3d.Node) error
DeleteNode(*k3d.Node) error
GetNodesByLabel(map[string]string) ([]*k3d.Node, error)
GetNode(*k3d.Node) (*k3d.Node, error)
CreateNetworkIfNotPresent(name string) (string, bool, error) // @return NETWORK_NAME, EXISTS, ERROR
GetKubeconfig(*k3d.Node) (io.ReadCloser, error)
DeleteNetwork(ID string) error
StartNode(*k3d.Node) error
StopNode(*k3d.Node) error
CreateVolume(string, map[string]string) error
DeleteVolume(string) error
CreateNode(context.Context, *k3d.Node) error
DeleteNode(context.Context, *k3d.Node) error
GetNodesByLabel(context.Context, map[string]string) ([]*k3d.Node, error)
GetNode(context.Context, *k3d.Node) (*k3d.Node, error)
CreateNetworkIfNotPresent(context.Context, string) (string, bool, error) // @return NETWORK_NAME, EXISTS, ERROR
GetKubeconfig(context.Context, *k3d.Node) (io.ReadCloser, error)
DeleteNetwork(context.Context, string) error
StartNode(context.Context, *k3d.Node) error
StopNode(context.Context, *k3d.Node) error
CreateVolume(context.Context, string, map[string]string) error
DeleteVolume(context.Context, string) error
GetVolume(string) (string, error)
GetRuntimePath() string // returns e.g. '/var/run/docker.sock' for a default docker setup
ExecInNode(*k3d.Node, []string) error
// DeleteContainer() error
GetNodeLogs(*k3d.Node) (io.ReadCloser, error)
ExecInNode(context.Context, *k3d.Node, []string) error
GetNodeLogs(context.Context, *k3d.Node, time.Time) (io.ReadCloser, error)
GetImages(context.Context) ([]string, error)
CopyToNode(context.Context, string, string, *k3d.Node) error
}
// GetRuntime checks, if a given name is represented by an implemented k3d runtime and returns it

@ -22,20 +22,64 @@ THE SOFTWARE.
package tools
import (
"context"
"fmt"
"os"
"path"
"strings"
"sync"
"time"
k3dc "github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
k3d "github.com/rancher/k3d/pkg/types"
k3dc "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/version"
log "github.com/sirupsen/logrus"
)
// LoadImagesIntoCluster starts up a k3d tools container for the selected cluster and uses it to export
// ImageImportIntoClusterMulti starts up a k3d tools container for the selected cluster and uses it to export
// images from the runtime to import them into the nodes of the selected cluster
func LoadImagesIntoCluster(runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, keepTarball bool) error {
cluster, err := k3dc.GetCluster(cluster, runtime)
func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, loadImageOpts k3d.ImageImportOpts) error {
var imagesFromRuntime []string
var imagesFromTar []string
runtimeImages, err := runtime.GetImages(ctx)
if err != nil {
log.Errorln("Failed to fetch list of exsiting images from runtime")
return err
}
for _, image := range images {
found := false
// Check if the current element is a file
if _, err := os.Stat(image); os.IsNotExist(err) {
// not a file? Check if such an image is present in the container runtime
for _, runtimeImage := range runtimeImages {
if image == runtimeImage {
found = true
imagesFromRuntime = append(imagesFromRuntime, image)
log.Debugf("Selected image '%s' found in runtime", image)
break
}
}
} else {
// file exists
found = true
imagesFromTar = append(imagesFromTar, image)
log.Debugf("Selected image '%s' is a file", image)
}
if !found {
log.Warnf("Image '%s' is not a file and couldn't be found in the container runtime", image)
}
}
// no images found to load -> exit early
if len(imagesFromRuntime)+len(imagesFromTar) == 0 {
return fmt.Errorf("No valid images specified")
}
cluster, err = k3dc.ClusterGet(ctx, runtime, cluster)
if err != nil {
log.Errorf("Failed to find the specified cluster")
return err
@ -48,8 +92,8 @@ func LoadImagesIntoCluster(runtime runtimes.Runtime, images []string, cluster *k
var imageVolume string
var ok bool
for _, node := range cluster.Nodes {
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
if imageVolume, ok = node.Labels["k3d.cluster.imageVolume"]; ok {
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
if imageVolume, ok = node.Labels[k3d.LabelImageVolume]; ok {
break
}
}
@ -61,79 +105,114 @@ func LoadImagesIntoCluster(runtime runtimes.Runtime, images []string, cluster *k
log.Debugf("Attaching to cluster's image volume '%s'", imageVolume)
// create tools node to export images
log.Infoln("Starting k3d-tools node...")
toolsNode, err := startToolsNode( // TODO: re-use existing container
runtime,
cluster,
cluster.Network.Name,
[]string{
fmt.Sprintf("%s:%s", imageVolume, k3d.DefaultImageVolumeMountPath),
fmt.Sprintf("%s:%s", runtime.GetRuntimePath(), runtime.GetRuntimePath()),
})
if err != nil {
log.Errorf("Failed to start tools container for cluster '%s'", cluster.Name)
var toolsNode *k3d.Node
toolsNode, err = runtime.GetNode(ctx, &k3d.Node{Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name)})
if err != nil || toolsNode == nil {
log.Infoln("Starting k3d-tools node...")
toolsNode, err = startToolsNode( // TODO: re-use existing container
ctx,
runtime,
cluster,
cluster.Network.Name,
[]string{
fmt.Sprintf("%s:%s", imageVolume, k3d.DefaultImageVolumeMountPath),
fmt.Sprintf("%s:%s", runtime.GetRuntimePath(), runtime.GetRuntimePath()),
})
if err != nil {
log.Errorf("Failed to start tools container for cluster '%s'", cluster.Name)
}
}
// save image to tarfile in shared volume
log.Infoln("Saving images...")
tarName := fmt.Sprintf("%s/k3d-%s-images-%s.tar", k3d.DefaultImageVolumeMountPath, cluster.Name, time.Now().Format("20060102150405")) // FIXME: change
if err := runtime.ExecInNode(toolsNode, append([]string{"./k3d-tools", "save-image", "-d", tarName}, images...)); err != nil {
log.Errorf("Failed to save images in tools container for cluster '%s'", cluster.Name)
return err
/* TODO:
* Loop over list of images and check, whether they are files (tar archives) and sort them respectively
* Special case: '-' means "read from stdin"
* 1. From daemon: save images -> import
* 2. From file: copy file -> import
* 3. From stdin: save to tar -> import
* Note: temporary storage location is always the shared image volume and actions are always executed by the tools node
*/
var importTarNames []string
if len(imagesFromRuntime) > 0 {
// save image to tarfile in shared volume
log.Infof("Saving %d image(s) from runtime...", len(imagesFromRuntime))
tarName := fmt.Sprintf("%s/k3d-%s-images-%s.tar", k3d.DefaultImageVolumeMountPath, cluster.Name, time.Now().Format("20060102150405"))
if err := runtime.ExecInNode(ctx, toolsNode, append([]string{"./k3d-tools", "save-image", "-d", tarName}, imagesFromRuntime...)); err != nil {
log.Errorf("Failed to save image(s) in tools container for cluster '%s'", cluster.Name)
return err
}
importTarNames = append(importTarNames, tarName)
}
if len(imagesFromTar) > 0 {
// copy tarfiles to shared volume
log.Infof("Saving %d tarball(s) to shared image volume...", len(imagesFromTar))
for _, file := range imagesFromTar {
tarName := fmt.Sprintf("%s/k3d-%s-images-%s-file-%s", k3d.DefaultImageVolumeMountPath, cluster.Name, time.Now().Format("20060102150405"), path.Base(file))
if err := runtime.CopyToNode(ctx, file, tarName, toolsNode); err != nil {
log.Errorf("Failed to copy image tar '%s' to tools node! Error below:\n%+v", file, err)
continue
}
importTarNames = append(importTarNames, tarName)
}
}
// import image in each node
log.Infoln("Importing images into nodes...")
var importWaitgroup sync.WaitGroup
for _, node := range cluster.Nodes {
// only import image in master and worker nodes (i.e. ignoring auxiliary nodes like the master loadbalancer)
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
importWaitgroup.Add(1)
go func(node *k3d.Node, wg *sync.WaitGroup) {
log.Infof("Importing images into node '%s'...", node.Name)
if err := runtime.ExecInNode(node, []string{"ctr", "image", "import", tarName}); err != nil {
log.Errorf("Failed to import images in node '%s'", node.Name)
log.Errorln(err)
}
wg.Done()
}(node, &importWaitgroup)
for _, tarName := range importTarNames {
for _, node := range cluster.Nodes {
// only import image in server and agent nodes (i.e. ignoring auxiliary nodes like the server loadbalancer)
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
importWaitgroup.Add(1)
go func(node *k3d.Node, wg *sync.WaitGroup, tarPath string) {
log.Infof("Importing images from tarball '%s' into node '%s'...", tarPath, node.Name)
if err := runtime.ExecInNode(ctx, node, []string{"ctr", "image", "import", tarPath}); err != nil {
log.Errorf("Failed to import images in node '%s'", node.Name)
log.Errorln(err)
}
wg.Done()
}(node, &importWaitgroup, tarName)
}
}
}
importWaitgroup.Wait()
// remove tarball
if !keepTarball {
log.Infoln("Removing the tarball...")
if err := runtime.ExecInNode(cluster.Nodes[0], []string{"rm", "-f", tarName}); err != nil { // TODO: do this in tools node (requires rm)
log.Errorf("Failed to delete tarball '%s'", tarName)
if !loadImageOpts.KeepTar && len(importTarNames) > 0 {
log.Infoln("Removing the tarball(s) from image volume...")
if err := runtime.ExecInNode(ctx, toolsNode, []string{"rm", "-f", strings.Join(importTarNames, " ")}); err != nil {
log.Errorf("Failed to delete one or more tarballs from '%+v'", importTarNames)
log.Errorln(err)
}
}
// delete tools container
log.Infoln("Removing k3d-tools node...")
if err := runtime.DeleteNode(toolsNode); err != nil {
log.Errorln("Failed to delete tools node '%s': Try to delete it manually", toolsNode.Name)
if err := runtime.DeleteNode(ctx, toolsNode); err != nil {
log.Errorf("Failed to delete tools node '%s': Try to delete it manually", toolsNode.Name)
}
log.Infoln("...Done")
log.Infoln("Successfully imported image(s)")
return nil
}
// startToolsNode will start a new k3d tools container and connect it to the network of the chosen cluster
func startToolsNode(runtime runtimes.Runtime, cluster *k3d.Cluster, network string, volumes []string) (*k3d.Node, error) {
func startToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, network string, volumes []string) (*k3d.Node, error) {
node := &k3d.Node{
Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name),
Image: k3d.DefaultToolsContainerImage,
Image: fmt.Sprintf("%s:%s", k3d.DefaultToolsImageRepo, version.GetHelperImageVersion()),
Role: k3d.NoRole,
Volumes: volumes,
Network: network,
Cmd: []string{},
Args: []string{"noop"},
Labels: k3d.DefaultObjectLabels,
}
if err := runtime.CreateNode(node); err != nil {
node.Labels[k3d.LabelClusterName] = cluster.Name
if err := runtime.CreateNode(ctx, node); err != nil {
log.Errorf("Failed to create tools container for cluster '%s'", cluster.Name)
return node, err
}

@ -38,27 +38,37 @@ const DefaultClusterNameMaxLength = 32
// DefaultK3sImageRepo specifies the default image repository for the used k3s image
const DefaultK3sImageRepo = "docker.io/rancher/k3s"
// DefaultLBImage defines the default cluster load balancer image
const DefaultLBImage = "docker.io/iwilltry42/k3d-proxy:v0.0.2"
// DefaultLBImageRepo defines the default cluster load balancer image
const DefaultLBImageRepo = "docker.io/rancher/k3d-proxy"
// DefaultToolsImageRepo defines the default image used for the tools container
const DefaultToolsImageRepo = "docker.io/rancher/k3d-tools"
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
const DefaultObjectNamePrefix = "k3d"
// ReadyLogMessageByRole defines the log messages we wait for until a server node is considered ready
var ReadyLogMessageByRole = map[Role]string{
ServerRole: "Wrote kubeconfig",
AgentRole: "Successfully registered node",
LoadBalancerRole: "start worker processes",
}
// Role defines a k3d node role
type Role string
// existing k3d node roles
const (
MasterRole Role = "master"
WorkerRole Role = "worker"
ServerRole Role = "server"
AgentRole Role = "agent"
NoRole Role = "noRole"
LoadBalancerRole Role = "loadbalancer"
)
// NodeRoles defines the roles available for nodes
var NodeRoles = map[string]Role{
string(MasterRole): MasterRole,
string(WorkerRole): WorkerRole,
string(ServerRole): ServerRole,
string(AgentRole): AgentRole,
string(LoadBalancerRole): LoadBalancerRole,
}
@ -67,6 +77,26 @@ var DefaultObjectLabels = map[string]string{
"app": "k3d",
}
// List of k3d technical label name
const (
LabelClusterName string = "k3d.cluster"
LabelClusterURL string = "k3d.cluster.url"
LabelClusterToken string = "k3d.cluster.token"
LabelImageVolume string = "k3d.cluster.imageVolume"
LabelNetworkExternal string = "k3d.cluster.network.external"
LabelNetwork string = "k3d.cluster.network"
LabelRole string = "k3d.role"
LabelServerAPIPort string = "k3d.server.api.port"
LabelServerAPIHost string = "k3d.server.api.host"
LabelServerAPIHostIP string = "k3d.server.api.hostIP"
)
// DefaultRoleCmds maps the node roles to their respective default commands
var DefaultRoleCmds = map[Role][]string{
ServerRole: {"server"},
AgentRole: {"agent"},
}
// DefaultTmpfsMounts specifies tmpfs mounts that are required for all k3d nodes
var DefaultTmpfsMounts = []string{
"/run",
@ -78,9 +108,6 @@ var DefaultNodeEnv = []string{
"K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml",
}
// DefaultToolsContainerImage defines the default image used for the tools container
const DefaultToolsContainerImage = "docker.io/iwilltry42/k3d-tools:v0.0.2" // TODO: get version dynamically or at build time
// DefaultImageVolumeMountPath defines the mount path inside k3d nodes where we will mount the shared image volume by default
const DefaultImageVolumeMountPath = "/k3d/images"
@ -96,16 +123,44 @@ const DefaultAPIPort = "6443"
// DefaultAPIHost defines the default host (IP) for the Kubernetes API
const DefaultAPIHost = "0.0.0.0"
// CreateClusterOpts describe a set of options one can set when creating a cluster
type CreateClusterOpts struct {
// DoNotCopyServerFlags defines a list of commands/args that shouldn't be copied from an existing node when adding a similar node to a cluster
var DoNotCopyServerFlags = []string{
"--cluster-init",
}
// ClusterCreateOpts describe a set of options one can set when creating a cluster
type ClusterCreateOpts struct {
DisableImageVolume bool
WaitForMaster bool
WaitForServer bool
Timeout time.Duration
DisableLoadBalancer bool
K3sServerArgs []string
K3sAgentArgs []string
}
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
type ClusterStartOpts struct {
WaitForServer bool
Timeout time.Duration
}
// NodeCreateOpts describes a set of options one can set when creating a new node
type NodeCreateOpts struct {
Wait bool
Timeout time.Duration
}
// NodeStartOpts describes a set of options one can set when (re-)starting a node
type NodeStartOpts struct {
Wait bool
Timeout time.Duration
}
// ImageImportOpts describes a set of options one can set for loading image(s) into cluster(s)
type ImageImportOpts struct {
KeepTar bool
}
// ClusterNetwork describes a network which a cluster is running in
type ClusterNetwork struct {
Name string `yaml:"name" json:"name,omitempty"`
@ -116,16 +171,48 @@ type ClusterNetwork struct {
type Cluster struct {
Name string `yaml:"name" json:"name,omitempty"`
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
Secret string `yaml:"cluster_secret" json:"clusterSecret,omitempty"`
Token string `yaml:"cluster_token" json:"clusterToken,omitempty"`
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
InitNode *Node // init master node
InitNode *Node // init server node
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
CreateClusterOpts *ClusterCreateOpts `yaml:"options" json:"options,omitempty"`
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
MasterLoadBalancer *Node `yaml:"master_loadbalancer" json:"masterLoadBalancer,omitempty"`
ServerLoadBalancer *Node `yaml:"server_loadbalancer" json:"serverLoadBalancer,omitempty"`
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
}
// ServerCount return number of server node into cluster
func (c *Cluster) ServerCount() int {
serverCount := 0
for _, node := range c.Nodes {
if node.Role == ServerRole {
serverCount++
}
}
return serverCount
}
// AgentCount return number of agent node into cluster
func (c *Cluster) AgentCount() int {
agentCount := 0
for _, node := range c.Nodes {
if node.Role == AgentRole {
agentCount++
}
}
return agentCount
}
// HasLoadBalancer returns true if cluster has a loadbalancer node
func (c *Cluster) HasLoadBalancer() bool {
for _, node := range c.Nodes {
if node.Role == LoadBalancerRole {
return true
}
}
return false
}
// Node describes a k3d node
type Node struct {
Name string `yaml:"name" json:"name,omitempty"`
@ -139,17 +226,17 @@ type Node struct {
Restart bool `yaml:"restart" json:"restart,omitempty"`
Labels map[string]string // filled automatically
Network string // filled automatically
MasterOpts MasterOpts `yaml:"master_opts" json:"masterOpts,omitempty"`
WorkerOpts WorkerOpts `yaml:"worker_opts" json:"workerOpts,omitempty"`
ServerOpts ServerOpts `yaml:"server_opts" json:"serverOpts,omitempty"`
AgentOpts AgentOpts `yaml:"agent_opts" json:"agentOpts,omitempty"`
}
// MasterOpts describes some additional master role specific opts
type MasterOpts struct {
IsInit bool `yaml:"is_initializing_master" json:"isInitializingMaster,omitempty"`
// ServerOpts describes some additional server role specific opts
type ServerOpts struct {
IsInit bool `yaml:"is_initializing_server" json:"isInitializingServer,omitempty"`
ExposeAPI ExposeAPI // filled automatically
}
// ExternalDatastore describes an external datastore used for HA/multi-master clusters
// ExternalDatastore describes an external datastore used for HA/multi-server clusters
type ExternalDatastore struct {
Endpoint string `yaml:"endpoint" json:"endpoint,omitempty"`
CAFile string `yaml:"ca_file" json:"caFile,omitempty"`
@ -165,8 +252,8 @@ type ExposeAPI struct {
Port string `yaml:"port" json:"port"`
}
// WorkerOpts describes some additional worker role specific opts
type WorkerOpts struct{}
// AgentOpts describes some additional agent role specific opts
type AgentOpts struct{}
// GetDefaultObjectName prefixes the passed name with the default prefix
func GetDefaultObjectName(name string) string {

@ -43,7 +43,7 @@ func GetConfigDirOrCreate() (string, error) {
// create directories if necessary
if err := createDirIfNotExists(configDir); err != nil {
log.Errorln("Failed to create config path '%s'", configDir)
log.Errorf("Failed to create config path '%s'", configDir)
return "", err
}

@ -37,7 +37,7 @@ const (
var src = rand.NewSource(time.Now().UnixNano())
// GenerateRandomString thanks to https://stackoverflow.com/a/31832326/6450189
// GenerateRandomString is used to generate a random string that is used as a cluster secret
// GenerateRandomString is used to generate a random string that is used as a cluster token
func GenerateRandomString(n int) string {
sb := strings.Builder{}

@ -1,8 +1,12 @@
FROM nginx:1.16.0-alpine
RUN apk -U --no-cache add curl ca-certificates\
ARG CONFD_REPO=iwilltry42/confd
ARG CONFD_VERSION=0.16.1
ARG OS=linux
ARG ARCH=amd64
RUN echo "Building for '${OS}/${ARCH}'..." \
&& apk -U --no-cache add curl ca-certificates\
&& mkdir -p /etc/confd \
&& curl -sLf https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-amd64 > /usr/bin/confd \
&& curl -sLf "https://github.com/${CONFD_REPO}/releases/download/v${CONFD_VERSION}/confd-${CONFD_VERSION}-${OS}-${ARCH}" > /usr/bin/confd \
&& chmod +x /usr/bin/confd \
&& apk del curl

@ -12,7 +12,7 @@ events {
stream {
{{- range $port := $ports }}
upstream master_nodes_{{ $port }} {
upstream server_nodes_{{ $port }} {
{{- range $server := $servers }}
server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s;
{{- end }}
@ -20,8 +20,8 @@ stream {
server {
listen {{ $port }};
proxy_pass master_nodes_{{ $port }};
proxy_timeout 30;
proxy_pass server_nodes_{{ $port }};
proxy_timeout 600;
proxy_connect_timeout 2s;
}
{{- end }}

@ -61,7 +61,7 @@ check_url() {
check_clusters() {
[ -n "$EXE" ] || abort "EXE is not defined"
for c in "$@" ; do
$EXE get kubeconfig "$c" --switch
$EXE kubeconfig merge "$c" --switch-context
if kubectl cluster-info ; then
passed "cluster $c is reachable"
else
@ -73,17 +73,27 @@ check_clusters() {
return 0
}
check_cluster_count() {
expectedClusterCount=$1
actualClusterCount=$($EXE cluster list --no-headers | wc -l)
if [[ $actualClusterCount != $expectedClusterCount ]]; then
failed "incorrect number of clusters available: $actualClusterCount != $expectedClusterCount"
return 1
fi
return 0
}
# check_multi_node verifies that a cluster runs with an expected number of nodes
check_multi_node() {
cluster=$1
expectedNodeCount=$2
$EXE get kubeconfig "$cluster" --switch
$EXE kubeconfig merge "$cluster" --switch-context
nodeCount=$(kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers | wc -l)
if [[ $nodeCount == $expectedNodeCount ]]; then
passed "cluster $cluster has $expectedNodeCount nodes, as expected"
else
warn "cluster $cluster has incorrect number of nodes: $nodeCount != $expectedNodeCount"
kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers
kubectl get nodes
docker ps -a
return 1
fi
@ -97,3 +107,8 @@ check_registry() {
check_volume_exists() {
docker volume inspect "$1" >/dev/null 2>&1
}
check_cluster_token_exist() {
[ -n "$EXE" ] || abort "EXE is not defined"
$EXE cluster get "$1" --token | grep "TOKEN" >/dev/null 2>&1
}

@ -6,23 +6,24 @@ K3D_IMAGE_TAG=$1
# define E2E_KEEP to non-empty for keeping the e2e runner container after running the tests
E2E_KEEP=${E2E_KEEP:-}
####################################################################################
# Max. time to wait for the runner container to be up
RUNNER_START_TIMEOUT=${E2E_RUNNER_START_TIMEOUT:-10}
TIMESTAMP=$(date "+%m%d%H%M%S")
####################################################################################
# Start the runner container
TIMESTAMP=$(date "+%y%m%d%H%M%S")
k3de2e=$(docker run -d \
-v "$(pwd)"/tests:/tests \
-v "$(pwd):/src" \
--privileged \
-e EXE="$K3D_EXE" \
-e CI="true" \
-e LOG_LEVEL="$LOG_LEVEL" \
-e E2E_SKIP="$E2E_SKIP" \
--name "k3d-e2e-runner-$TIMESTAMP" \
k3d:$K3D_IMAGE_TAG)
sleep 5 # wait 5 seconds for docker to start
"k3d:$K3D_IMAGE_TAG")
# Execute tests
# setup exit trap (make sure that we always stop and remove the runner container)
finish() {
docker stop "$k3de2e" || /bin/true
if [ -z "$E2E_KEEP" ] ; then
@ -31,4 +32,19 @@ finish() {
}
trap finish EXIT
docker exec "$k3de2e" /tests/runner.sh
# wait for the runner container to be up or exit early
TIMEOUT=0
until docker inspect "$k3de2e" | jq ".[0].State.Running" && docker logs "$k3de2e" 2>&1 | grep -i "API listen on /var/run/docker.sock"; do
if [[ $TIMEOUT -eq $RUNNER_START_TIMEOUT ]]; then
echo "Failed to start E2E Runner Container in $RUNNER_START_TIMEOUT seconds"
exit 1
fi
sleep 1
(( TIMEOUT++ ))
done
# build helper container images
docker exec --workdir /src "$k3de2e" make build-helper-images
# execute tests
docker exec "$k3de2e" /src/tests/runner.sh

@ -14,7 +14,7 @@ source "$CURR_DIR/common.sh"
info "Preparing filesystem and environment..."
mkdir -p /root/.kube
mkdir -p $HOME/.kube
for i in $CURR_DIR/test_*.sh ; do
base=$(basename $i .sh)

@ -7,15 +7,22 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source "$CURR_DIR/common.sh"
info "Creating two clusters..."
$EXE create cluster c1 --wait --timeout 60s --api-port 6443 || failed "could not create cluster c1"
$EXE create cluster c2 --wait --timeout 60s --api-port 6444 || failed "could not create cluster c2"
$EXE cluster create c1 --wait --timeout 60s --api-port 6443 || failed "could not create cluster c1"
$EXE cluster create c2 --wait --timeout 60s || failed "could not create cluster c2"
info "Checking that we can get both clusters..."
check_cluster_count 2
info "Checking we have access to both clusters..."
check_clusters "c1" "c2" || failed "error checking cluster"
info "Check k3s token retrieval"
check_cluster_token_exist "c1" || failed "could not find cluster token c1"
check_cluster_token_exist "c2" || failed "could not find cluster token c2"
info "Deleting clusters..."
$EXE delete cluster c1 || failed "could not delete the cluster c1"
$EXE delete cluster c2 || failed "could not delete the cluster c2"
$EXE cluster delete c1 || failed "could not delete the cluster c1"
$EXE cluster delete c2 || failed "could not delete the cluster c2"
exit 0

@ -9,7 +9,7 @@ source "$CURR_DIR/common.sh"
clustername="lifecycletest"
info "Creating cluster $clustername..."
$EXE create cluster "$clustername" --workers 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
$EXE cluster create "$clustername" --agents 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5
@ -23,17 +23,14 @@ check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
# 2. stop the cluster
info "Stopping cluster..."
$EXE stop cluster "$clustername"
$EXE cluster stop "$clustername"
info "Checking that cluster was stopped"
check_clusters "$clustername" && failed "cluster was not stopped, since we still have access"
# 3. start the cluster
info "Starting cluster..."
$EXE start cluster "$clustername"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5
$EXE cluster start "$clustername" --wait --timeout 360s || failed "cluster didn't come back in time"
info "Checking that we have access to the cluster..."
check_clusters "$clustername" || failed "error checking cluster"
@ -41,16 +38,23 @@ check_clusters "$clustername" || failed "error checking cluster"
info "Checking that we have 2 nodes online..."
check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
# 4. adding another agent node
info "Adding one agent node..."
$EXE node create "extra-agent" --cluster "$clustername" --role "agent" --wait --timeout 360s || failed "failed to add agent node"
info "Checking that we have 3 nodes available now..."
check_multi_node "$clustername" 3 || failed "failed to verify number of nodes"
# 4. load an image into the cluster
info "Loading an image into the cluster..."
docker pull nginx:latest > /dev/null
docker tag nginx:latest nginx:local > /dev/null
$EXE load image nginx:local -c $clustername || failed "could not import image in $clustername"
$EXE image import nginx:local -c $clustername || failed "could not import image in $clustername"
# Cleanup
info "Deleting cluster $clustername..."
$EXE delete cluster "$clustername" || failed "could not delete the cluster $clustername"
$EXE cluster delete "$clustername" || failed "could not delete the cluster $clustername"
exit 0

@ -6,20 +6,20 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
info "Creating cluster multimaster..."
$EXE create cluster "multimaster" --masters 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multimaster"
info "Creating cluster multiserver..."
$EXE cluster create "multiserver" --servers 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multiserver"
info "Checking that we have access to the cluster..."
check_clusters "multimaster" || failed "error checking cluster"
check_clusters "multiserver" || failed "error checking cluster"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5
info "Checking that we have 3 master nodes online..."
check_multi_node "multimaster" 3 || failed "failed to verify number of nodes"
info "Checking that we have 3 server nodes online..."
check_multi_node "multiserver" 3 || failed "failed to verify number of nodes"
info "Deleting cluster multimaster..."
$EXE delete cluster "multimaster" || failed "could not delete the cluster multimaster"
info "Deleting cluster multiserver..."
$EXE cluster delete "multiserver" || failed "could not delete the cluster multiserver"
exit 0

@ -36,10 +36,10 @@ Here's how k3d types should translate to a runtime type:
## Node Configuration
- master node(s)
- server node(s)
- ENV
- `K3S_CLUSTER_INIT`
- if num_masters > 1 && no external datastore configured
- if num_servers > 1 && no external datastore configured
- `K3S_KUBECONFIG_OUTPUT`
- k3d default -> `/output/kubeconfig.yaml`
- CMD/ARGS
@ -65,9 +65,9 @@ Here's how k3d types should translate to a runtime type:
- `privileged`
- Network
- cluster network or external/inherited
- worker nodes
- agent nodes
- ENV
- `K3S_URL` to connect to master node
- `K3S_URL` to connect to server node
- server hostname + port (6443)
- cluster-specific or inherited
- CMD/ARGS
@ -81,23 +81,23 @@ Here's how k3d types should translate to a runtime type:
- `--port [host:]port[:containerPort][/protocol][@group_identifier[[index] | @node_identifier]`
- Examples:
- `--port 0.0.0.0:8080:8081/tcp@workers` -> whole group
- `--port 80@workers[0]` -> single instance of group by list index
- `--port 80@workers[0,2-3]` -> multiple instances of a group by index lists and ranges
- `--port 80@k3d-test-worker-0` -> single instance by specific node identifier
- `--port 80@k3d-test-master-0@workers[1-5]` -> multiple instances by combination of node and group identifiers
- `--port 0.0.0.0:8080:8081/tcp@agents` -> whole group
- `--port 80@agents[0]` -> single instance of group by list index
- `--port 80@agents[0,2-3]` -> multiple instances of a group by index lists and ranges
- `--port 80@k3d-test-agent-0` -> single instance by specific node identifier
- `--port 80@k3d-test-server-0@agents[1-5]` -> multiple instances by combination of node and group identifiers
- analogous for volumes
## [WIP] Multi-Master Setup
## [WIP] Multi-Server Setup
- to make this possible, we always deploy a load-balancer (nginx) in front of the master nodes as an extra container
- to make this possible, we always deploy a load-balancer (nginx) in front of the server nodes as an extra container
- consider that in the kubeconfig file and `--tls-san`
### Variants
- [x] embedded datastore (dqlite)
- if `--masters` > 1 deploy a load-balancer in front of them as an extra container
- if `--servers` > 1 deploy a load-balancer in front of them as an extra container
- [ ] external datastore
## [DONE] Keep State in Docker Labels

@ -0,0 +1,13 @@
FROM golang:1.14 as builder
ARG GIT_TAG
WORKDIR /app
COPY . .
ENV GIT_TAG=${GIT_TAG}
ENV GO111MODULE=on
ENV CGO_ENABLED=0
RUN make build
FROM busybox:1.31
WORKDIR /app
COPY --from=builder /app/bin/k3d-tools .
ENTRYPOINT [ "/app/k3d-tools"]

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save