Merge pull request #12 from rancher/use-docker-sdk

Replace docker commands with docker sdk
pull/20/head
Thorsten Klein 5 years ago committed by GitHub
commit 705ef69dd9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 5
      Makefile
  2. 7
      README.md
  3. 338
      cli/commands.go
  4. 122
      cli/config.go
  5. 175
      cli/container.go
  6. 59
      cli/network.go
  7. 18
      cli/run.go
  8. 37
      cli/util.go
  9. 2
      go.mod
  10. 16
      main.go
  11. 16
      vendor/modules.txt
  12. 8
      version/version.go

@ -10,13 +10,16 @@ ifeq ($(GIT_TAG),)
GIT_TAG := $(shell git describe --always)
endif
# get latest k3s version
K3S_TAG := $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
# Go options
GO ?= go
PKG := $(shell go mod vendor)
TAGS :=
TESTS := .
TESTFLAGS :=
LDFLAGS := -w -s -X github.com/rancher/k3d/version.Version=${GIT_TAG}
LDFLAGS := -w -s -X github.com/rancher/k3d/version.Version=${GIT_TAG} -X github.com/rancher/k3d/version.K3sVersion=${K3S_TAG}
GOFLAGS :=
BINDIR := $(CURDIR)/bin
BINARIES := k3d

@ -46,8 +46,7 @@ Example Workflow: Create a new cluster and use it with `kubectl`
## TODO
- [ ] Use the docker client library instead of commands
- [ ] Test the docker version
- [ ] Improve cluster state management
- [x] Use the docker client library instead of commands
- [x] Improve cluster state management
- [x] Add install script
- [ ] Use [sirupsen/logrus](https://github.com/sirupsen/logrus) for prettier logs
- [ ] Add install script

@ -1,80 +1,91 @@
package run
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/urfave/cli"
)
// CheckTools checks if the installed tools work correctly
func CheckTools(c *cli.Context) error {
log.Print("Checking docker...")
cmd := "docker"
args := []string{"version"}
if err := runCommand(true, cmd, args...); err != nil {
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return err
}
ping, err := docker.Ping(ctx)
if err != nil {
return fmt.Errorf("ERROR: checking docker failed\n%+v", err)
}
log.Println("SUCCESS: Checking docker succeeded")
log.Printf("SUCCESS: Checking docker succeeded (API: v%s)\n", ping.APIVersion)
return nil
}
// CreateCluster creates a new single-node cluster container and initializes the cluster directory
func CreateCluster(c *cli.Context) error {
if c.IsSet("timeout") && !c.IsSet("wait") {
return errors.New("Cannot use --timeout flag without --wait flag")
}
port := fmt.Sprintf("%s:%s", c.String("port"), c.String("port"))
image := fmt.Sprintf("rancher/k3s:%s", c.String("version"))
cmd := "docker"
// create cluster network
networkID, err := createClusterNetwork(c.String("name"))
if err != nil {
return err
}
log.Printf("Created cluster network with ID %s", networkID)
// default docker arguments
args := []string{
"run",
"--name", c.String("name"),
"--publish", port,
"--privileged",
"--detach",
"--env", "K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml",
if c.IsSet("timeout") && !c.IsSet("wait") {
return errors.New("Cannot use --timeout flag without --wait flag")
}
// additional docker arguments
extraArgs := []string{}
// environment variables
env := []string{"K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml"}
if c.IsSet("env") || c.IsSet("e") {
for _, env := range c.StringSlice("env") {
extraArgs = append(extraArgs, "--env", env)
}
}
if c.IsSet("volume") {
extraArgs = append(extraArgs, "--volume", c.String("volume"))
env = append(env, c.StringSlice("env")...)
}
if len(extraArgs) > 0 {
args = append(args, extraArgs...)
k3sClusterSecret := ""
if c.Int("workers") > 0 {
k3sClusterSecret = fmt.Sprintf("K3S_CLUSTER_SECRET=%s", GenerateRandomString(20))
env = append(env, k3sClusterSecret)
}
// k3s version and options
args = append(args,
image,
"server", // cmd
"--https-listen-port", c.String("port"), //args
)
// additional k3s server arguments
// k3s server arguments
k3sServerArgs := []string{"--https-listen-port", c.String("port")}
if c.IsSet("server-arg") || c.IsSet("x") {
args = append(args, c.StringSlice("server-arg")...)
k3sServerArgs = append(k3sServerArgs, c.StringSlice("server-arg")...)
}
// let's go
log.Printf("Creating cluster [%s]", c.String("name"))
if err := runCommand(true, cmd, args...); err != nil {
return fmt.Errorf("ERROR: couldn't create cluster [%s]\n%+v", c.String("name"), err)
dockerID, err := createServer(
c.GlobalBool("verbose"),
fmt.Sprintf("docker.io/rancher/k3s:%s", c.String("version")),
c.String("port"),
k3sServerArgs,
env,
c.String("name"),
strings.Split(c.String("volume"), ","),
)
if err != nil {
log.Fatalf("ERROR: failed to create cluster\n%+v", err)
}
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return err
}
// wait for k3s to be up and running if we want it
@ -88,17 +99,17 @@ func CreateCluster(c *cli.Context) error {
}
return errors.New("Cluster creation exceeded specified timeout")
}
cmd := "docker"
args = []string{
"logs",
c.String("name"),
}
prog := exec.Command(cmd, args...)
output, err := prog.CombinedOutput()
out, err := docker.ContainerLogs(ctx, dockerID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
return err
out.Close()
return fmt.Errorf("ERROR: couldn't get docker logs for %s\n%+v", c.String("name"), err)
}
if strings.Contains(string(output), "Running kubelet") {
buf := new(bytes.Buffer)
nRead, _ := buf.ReadFrom(out)
out.Close()
output := buf.String()
if nRead > 0 && strings.Contains(string(output), "Running kubelet") {
break
}
@ -106,48 +117,85 @@ func CreateCluster(c *cli.Context) error {
}
createClusterDir(c.String("name"))
// worker nodes
if c.Int("workers") > 0 {
k3sWorkerArgs := []string{}
env := []string{k3sClusterSecret}
log.Printf("Booting %s workers for cluster %s", strconv.Itoa(c.Int("workers")), c.String("name"))
for i := 0; i < c.Int("workers"); i++ {
workerID, err := createWorker(
c.GlobalBool("verbose"),
fmt.Sprintf("docker.io/rancher/k3s:%s", c.String("version")),
k3sWorkerArgs,
env,
c.String("name"),
strings.Split(c.String("volume"), ","),
strconv.Itoa(i),
c.String("port"),
)
if err != nil {
return fmt.Errorf("ERROR: failed to create worker node for cluster %s\n%+v", c.String("name"), err)
}
fmt.Printf("Created worker with ID %s\n", workerID)
}
}
log.Printf("SUCCESS: created cluster [%s]", c.String("name"))
log.Printf(`You can now use the cluster with:
export KUBECONFIG="$(%s get-kubeconfig --name='%s')"
kubectl cluster-info`, os.Args[0], c.String("name"))
return nil
}
// DeleteCluster removes the cluster container and its cluster directory
func DeleteCluster(c *cli.Context) error {
cmd := "docker"
args := []string{"rm"}
clusters := []string{}
// operate on one or all clusters
clusters := make(map[string]cluster)
if !c.Bool("all") {
clusters = append(clusters, c.String("name"))
cluster, err := getCluster(c.String("name"))
if err != nil {
return err
}
clusters[c.String("name")] = cluster
} else {
clusterList, err := getClusterNames()
clusterMap, err := getClusters()
if err != nil {
return fmt.Errorf("ERROR: `--all` specified, but no clusters were found\n%+v", err)
}
clusters = append(clusters, clusterList...)
// copy clusterMap
for k, v := range clusterMap {
clusters[k] = v
}
}
// remove clusters one by one instead of appending all names to the docker command
// this allows for more granular error handling and logging
for _, cluster := range clusters {
log.Printf("Removing cluster [%s]", cluster)
args = append(args, cluster)
if err := runCommand(true, cmd, args...); err != nil {
log.Printf("WARNING: couldn't delete cluster [%s], trying a force remove now.", cluster)
args = args[:len(args)-1] // pop last element from list (name of cluster)
args = append(args, "-f", cluster)
if err := runCommand(true, cmd, args...); err != nil {
log.Printf("FAILURE: couldn't delete cluster [%s] -> %+v", cluster, err)
log.Printf("Removing cluster [%s]", cluster.name)
if len(cluster.workers) > 0 {
log.Printf("...Removing %d workers\n", len(cluster.workers))
for _, worker := range cluster.workers {
if err := removeContainer(worker.ID); err != nil {
log.Println(err)
continue
}
}
args = args[:len(args)-1] // pop last element from list (-f flag)
}
deleteClusterDir(cluster)
log.Printf("SUCCESS: removed cluster [%s]", cluster)
args = args[:len(args)-1] // pop last element from list (name of last cluster)
log.Println("...Removing server")
deleteClusterDir(cluster.name)
if err := removeContainer(cluster.server.ID); err != nil {
return fmt.Errorf("ERROR: Couldn't remove server for cluster %s\n%+v", cluster.name, err)
}
if err := deleteClusterNetwork(cluster.name); err != nil {
log.Printf("WARNING: couldn't delete cluster network for cluster %s\n%+v", cluster.name, err)
}
log.Printf("SUCCESS: removed cluster [%s]", cluster.name)
}
return nil
@ -155,31 +203,51 @@ func DeleteCluster(c *cli.Context) error {
// StopCluster stops a running cluster container (restartable)
func StopCluster(c *cli.Context) error {
cmd := "docker"
args := []string{"stop"}
clusters := []string{}
// operate on one or all clusters
clusters := make(map[string]cluster)
if !c.Bool("all") {
clusters = append(clusters, c.String("name"))
cluster, err := getCluster(c.String("name"))
if err != nil {
return err
}
clusters[c.String("name")] = cluster
} else {
clusterList, err := getClusterNames()
clusterMap, err := getClusters()
if err != nil {
return fmt.Errorf("ERROR: `--all` specified, but no clusters were found\n%+v", err)
}
clusters = append(clusters, clusterList...)
// copy clusterMap
for k, v := range clusterMap {
clusters[k] = v
}
}
// stop clusters one by one instead of appending all names to the docker command
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
}
// remove clusters one by one instead of appending all names to the docker command
// this allows for more granular error handling and logging
for _, cluster := range clusters {
log.Printf("Stopping cluster [%s]", cluster)
args = append(args, cluster)
if err := runCommand(true, cmd, args...); err != nil {
log.Printf("FAILURE: couldn't stop cluster [%s] -> %+v", cluster, err)
log.Printf("Stopping cluster [%s]", cluster.name)
if len(cluster.workers) > 0 {
log.Printf("...Stopping %d workers\n", len(cluster.workers))
for _, worker := range cluster.workers {
if err := docker.ContainerStop(ctx, worker.ID, nil); err != nil {
log.Println(err)
continue
}
}
}
log.Println("...Stopping server")
if err := docker.ContainerStop(ctx, cluster.server.ID, nil); err != nil {
return fmt.Errorf("ERROR: Couldn't stop server for cluster %s\n%+v", cluster.name, err)
}
log.Printf("SUCCESS: stopped cluster [%s]", cluster)
args = args[:len(args)-1] // pop last element from list (name of last cluster)
log.Printf("SUCCESS: Stopped cluster [%s]", cluster.name)
}
return nil
@ -187,31 +255,52 @@ func StopCluster(c *cli.Context) error {
// StartCluster starts a stopped cluster container
func StartCluster(c *cli.Context) error {
cmd := "docker"
args := []string{"start"}
clusters := []string{}
// operate on one or all clusters
clusters := make(map[string]cluster)
if !c.Bool("all") {
clusters = append(clusters, c.String("name"))
cluster, err := getCluster(c.String("name"))
if err != nil {
return err
}
clusters[c.String("name")] = cluster
} else {
clusterList, err := getClusterNames()
clusterMap, err := getClusters()
if err != nil {
return fmt.Errorf("ERROR: `--all` specified, but no clusters were found\n%+v", err)
}
clusters = append(clusters, clusterList...)
// copy clusterMap
for k, v := range clusterMap {
clusters[k] = v
}
}
// start clusters one by one instead of appending all names to the docker command
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
}
// remove clusters one by one instead of appending all names to the docker command
// this allows for more granular error handling and logging
for _, cluster := range clusters {
log.Printf("Starting cluster [%s]", cluster)
args = append(args, cluster)
if err := runCommand(true, cmd, args...); err != nil {
log.Printf("FAILURE: couldn't start cluster [%s] -> %+v", cluster, err)
log.Printf("Starting cluster [%s]", cluster.name)
log.Println("...Starting server")
if err := docker.ContainerStart(ctx, cluster.server.ID, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("ERROR: Couldn't start server for cluster %s\n%+v", cluster.name, err)
}
if len(cluster.workers) > 0 {
log.Printf("...Starting %d workers\n", len(cluster.workers))
for _, worker := range cluster.workers {
if err := docker.ContainerStart(ctx, worker.ID, types.ContainerStartOptions{}); err != nil {
log.Println(err)
continue
}
}
}
log.Printf("SUCCESS: started cluster [%s]", cluster)
args = args[:len(args)-1] // pop last element from list (name of last cluster)
log.Printf("SUCCESS: Started cluster [%s]", cluster.name)
}
return nil
@ -225,13 +314,56 @@ func ListClusters(c *cli.Context) error {
// GetKubeConfig grabs the kubeconfig from the running cluster and prints the path to stdout
func GetKubeConfig(c *cli.Context) error {
sourcePath := fmt.Sprintf("%s:/output/kubeconfig.yaml", c.String("name"))
destPath, _ := getClusterDir(c.String("name"))
cmd := "docker"
args := []string{"cp", sourcePath, destPath}
if err := runCommand(false, cmd, args...); err != nil {
return fmt.Errorf("ERROR: Couldn't get kubeconfig for cluster [%s]\n%+v", c.String("name"), err)
}
fmt.Printf("%s\n", path.Join(destPath, "kubeconfig.yaml"))
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("label", "app=k3d")
filters.Add("label", fmt.Sprintf("cluster=%s", c.String("name")))
filters.Add("label", "component=server")
server, err := docker.ContainerList(ctx, types.ContainerListOptions{
Filters: filters,
})
if err != nil {
return fmt.Errorf("Couldn't get server container for cluster %s\n%+v", c.String("name"), err)
}
// get kubeconfig file from container and read contents
reader, _, err := docker.CopyFromContainer(ctx, server[0].ID, "/output/kubeconfig.yaml")
if err != nil {
return fmt.Errorf("ERROR: couldn't copy kubeconfig.yaml from server container %s\n%+v", server[0].ID, err)
}
defer reader.Close()
readBytes, err := ioutil.ReadAll(reader)
if err != nil {
return fmt.Errorf("ERROR: couldn't read kubeconfig from container\n%+v", err)
}
// create destination kubeconfig file
clusterDir, err := getClusterDir(c.String("name"))
destPath := fmt.Sprintf("%s/kubeconfig.yaml", clusterDir)
if err != nil {
return err
}
kubeconfigfile, err := os.Create(destPath)
if err != nil {
return fmt.Errorf("ERROR: couldn't create kubeconfig.yaml in %s\n%+v", clusterDir, err)
}
defer kubeconfigfile.Close()
// write to file, skipping the first 512 bytes which contain file metadata and trimming any NULL characters
_, err = kubeconfigfile.Write(bytes.Trim(readBytes[512:], "\x00"))
if err != nil {
return fmt.Errorf("ERROR: couldn't write to kubeconfig.yaml\n%+v", err)
}
// output kubeconfig file path to stdout
fmt.Println(destPath)
return nil
}

@ -2,20 +2,27 @@ package run
import (
"context"
"io/ioutil"
"fmt"
"log"
"os"
"path"
"strconv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
dockerClient "github.com/docker/docker/client"
"github.com/mitchellh/go-homedir"
"github.com/olekukonko/tablewriter"
)
type cluster struct {
name string
image string
status string
name string
image string
status string
serverPorts []string
server types.Container
workers []types.Container
}
// createDirIfNotExists checks for the existence of a directory and creates it along with all required parents if not.
@ -56,21 +63,27 @@ func getClusterDir(name string) (string, error) {
// printClusters prints the names of existing clusters
func printClusters(all bool) {
clusterNames, err := getClusterNames()
clusters, err := getClusters()
if err != nil {
log.Fatalf("ERROR: Couldn't list clusters -> %+v", err)
log.Fatalf("ERROR: Couldn't list clusters\n%+v", err)
}
if len(clusterNames) == 0 {
if len(clusters) == 0 {
log.Printf("No clusters found!")
return
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"NAME", "IMAGE", "STATUS"})
table.SetHeader([]string{"NAME", "IMAGE", "STATUS", "WORKERS"})
for _, clusterName := range clusterNames {
cluster, _ := getCluster(clusterName)
clusterData := []string{cluster.name, cluster.image, cluster.status}
for _, cluster := range clusters {
workersRunning := 0
for _, worker := range cluster.workers {
if worker.State == "running" {
workersRunning++
}
}
workerData := fmt.Sprintf("%d/%d", workersRunning, len(cluster.workers))
clusterData := []string{cluster.name, cluster.image, cluster.status, workerData}
if cluster.status == "running" || all {
table.Append(clusterData)
}
@ -78,47 +91,68 @@ func printClusters(all bool) {
table.Render()
}
// getClusterNames returns a list of cluster names which are folder names in the config directory
func getClusterNames() ([]string, error) {
homeDir, err := homedir.Dir()
// getClusters uses the docker API to get existing clusters and compares that with the list of cluster directories
func getClusters() (map[string]cluster, error) {
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
log.Printf("ERROR: Couldn't get user's home directory")
return nil, err
return nil, fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
}
configDir := path.Join(homeDir, ".config", "k3d")
files, err := ioutil.ReadDir(configDir)
// Prepare docker label filters
filters := filters.NewArgs()
filters.Add("label", "app=k3d")
filters.Add("label", "component=server")
// get all servers created by k3d
k3dServers, err := docker.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filters: filters,
})
if err != nil {
log.Printf("ERROR: Couldn't list files in [%s]", configDir)
return nil, err
return nil, fmt.Errorf("WARNING: couldn't list server containers\n%+v", err)
}
clusters := []string{}
for _, file := range files {
if file.IsDir() {
clusters = append(clusters, file.Name())
clusters := make(map[string]cluster)
// don't filter for servers but for workers now
filters.Del("label", "component=server")
filters.Add("label", "component=worker")
// for all servers created by k3d, get workers and cluster information
for _, server := range k3dServers {
filters.Add("label", fmt.Sprintf("cluster=%s", server.Labels["cluster"]))
// get workers
workers, err := docker.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filters: filters,
})
if err != nil {
log.Printf("WARNING: couldn't get worker containers for cluster %s\n%+v", server.Labels["cluster"], err)
}
// save cluster information
serverPorts := []string{}
for _, port := range server.Ports {
serverPorts = append(serverPorts, strconv.Itoa(int(port.PublicPort)))
}
clusters[server.Labels["cluster"]] = cluster{
name: server.Labels["cluster"],
image: server.Image,
status: server.State,
serverPorts: serverPorts,
server: server,
workers: workers,
}
// clear label filters before searching for next cluster
filters.Del("label", fmt.Sprintf("cluster=%s", server.Labels["cluster"]))
}
return clusters, nil
}
// getCluster creates a cluster struct with populated information fields
func getCluster(name string) (cluster, error) {
cluster := cluster{
name: name,
image: "UNKNOWN",
status: "UNKNOWN",
}
docker, err := dockerClient.NewEnvClient()
if err != nil {
log.Printf("ERROR: couldn't create docker client -> %+v", err)
return cluster, err
}
containerInfo, err := docker.ContainerInspect(context.Background(), cluster.name)
if err != nil {
log.Printf("WARNING: couldn't get docker info for [%s] -> %+v", cluster.name, err)
} else {
cluster.image = containerInfo.Config.Image
cluster.status = containerInfo.ContainerJSONBase.State.Status
}
return cluster, nil
clusters, err := getClusters()
return clusters[name], err
}

@ -0,0 +1,175 @@
package run
import (
"context"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"time"
"github.com/docker/go-connections/nat"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
)
func createServer(verbose bool, image string, port string, args []string, env []string, name string, volumes []string) (string, error) {
log.Printf("Creating server using %s...\n", image)
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
}
reader, err := docker.ImagePull(ctx, image, types.ImagePullOptions{})
if err != nil {
return "", fmt.Errorf("ERROR: couldn't pull image %s\n%+v", image, err)
}
if verbose {
_, err := io.Copy(os.Stdout, reader)
if err != nil {
log.Printf("WARNING: couldn't get docker output\n%+v", err)
}
} else {
_, err := io.Copy(ioutil.Discard, reader)
if err != nil {
log.Printf("WARNING: couldn't get docker output\n%+v", err)
}
}
containerLabels := make(map[string]string)
containerLabels["app"] = "k3d"
containerLabels["component"] = "server"
containerLabels["created"] = time.Now().Format("2006-01-02 15:04:05")
containerLabels["cluster"] = name
containerName := fmt.Sprintf("k3d-%s-server", name)
containerPort := nat.Port(fmt.Sprintf("%s/tcp", port))
hostConfig := &container.HostConfig{
PortBindings: nat.PortMap{
containerPort: []nat.PortBinding{
{
HostIP: "0.0.0.0",
HostPort: port,
},
},
},
Privileged: true,
}
if len(volumes) > 0 && volumes[0] != "" {
hostConfig.Binds = volumes
}
networkingConfig := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
name: &network.EndpointSettings{
Aliases: []string{containerName},
},
},
}
resp, err := docker.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: append([]string{"server"}, args...),
ExposedPorts: nat.PortSet{
containerPort: struct{}{},
},
Env: env,
Labels: containerLabels,
}, hostConfig, networkingConfig, containerName)
if err != nil {
return "", fmt.Errorf("ERROR: couldn't create container %s\n%+v", containerName, err)
}
if err := docker.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
return "", fmt.Errorf("ERROR: couldn't start container %s\n%+v", containerName, err)
}
return resp.ID, nil
}
func createWorker(verbose bool, image string, args []string, env []string, name string, volumes []string, postfix string, serverPort string) (string, error) {
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
}
reader, err := docker.ImagePull(ctx, image, types.ImagePullOptions{})
if err != nil {
return "", fmt.Errorf("ERROR: couldn't pull image %s\n%+v", image, err)
}
if verbose {
_, err := io.Copy(os.Stdout, reader)
if err != nil {
log.Printf("WARNING: couldn't get docker output\n%+v", err)
}
}
containerLabels := make(map[string]string)
containerLabels["app"] = "k3d"
containerLabels["component"] = "worker"
containerLabels["created"] = time.Now().Format("2006-01-02 15:04:05")
containerLabels["cluster"] = name
containerName := fmt.Sprintf("k3d-%s-worker-%s", name, postfix)
env = append(env, fmt.Sprintf("K3S_URL=https://k3d-%s-server:%s", name, serverPort))
hostConfig := &container.HostConfig{
Tmpfs: map[string]string{
"/run": "",
"/var/run": "",
},
Privileged: true,
}
if len(volumes) > 0 && volumes[0] != "" {
hostConfig.Binds = volumes
}
networkingConfig := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
name: &network.EndpointSettings{
Aliases: []string{containerName},
},
},
}
resp, err := docker.ContainerCreate(ctx, &container.Config{
Image: image,
Env: env,
Labels: containerLabels,
}, hostConfig, networkingConfig, containerName)
if err != nil {
return "", fmt.Errorf("ERROR: couldn't create container %s\n%+v", containerName, err)
}
if err := docker.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
return "", fmt.Errorf("ERROR: couldn't start container %s\n%+v", containerName, err)
}
return resp.ID, nil
}
func removeContainer(ID string) error {
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
}
if err := docker.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{}); err != nil {
log.Printf("WARNING: couldn't delete container [%s], trying a force remove now.", ID)
if err := docker.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{Force: true}); err != nil {
return fmt.Errorf("FAILURE: couldn't delete container [%s] -> %+v", ID, err)
}
}
return nil
}

@ -0,0 +1,59 @@
package run
import (
"context"
"fmt"
"log"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
)
func createClusterNetwork(clusterName string) (string, error) {
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
}
resp, err := docker.NetworkCreate(ctx, clusterName, types.NetworkCreate{
Labels: map[string]string{
"app": "k3d",
"cluster": clusterName,
},
})
if err != nil {
return "", fmt.Errorf("ERROR: couldn't create network\n%+v", err)
}
return resp.ID, nil
}
func deleteClusterNetwork(clusterName string) error {
ctx := context.Background()
docker, err := client.NewEnvClient()
if err != nil {
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
}
filters := filters.NewArgs()
filters.Add("label", "app=k3d")
filters.Add("label", fmt.Sprintf("cluster=%s", clusterName))
networks, err := docker.NetworkList(ctx, types.NetworkListOptions{
Filters: filters,
})
if err != nil {
return fmt.Errorf("ERROR: couldn't find network for cluster %s\n%+v", clusterName, err)
}
for _, network := range networks {
if err := docker.NetworkRemove(ctx, network.ID); err != nil {
log.Printf("WARNING: couldn't remove network for cluster %s\n%+v", clusterName, err)
continue
}
}
return nil
}

@ -1,18 +0,0 @@
package run
import (
"log"
"os"
"os/exec"
)
// runCommand accepts the name and args and runs the specified command
func runCommand(verbose bool, name string, args ...string) error {
if verbose {
log.Printf("Running command: %+v", append([]string{name}, args...))
}
cmd := exec.Command(name, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}

@ -0,0 +1,37 @@
package run
import (
"math/rand"
"strings"
"time"
)
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var src = rand.NewSource(time.Now().UnixNano())
// GenerateRandomString thanks to https://stackoverflow.com/a/31832326/6450189
func GenerateRandomString(n int) string {
sb := strings.Builder{}
sb.Grow(n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
sb.WriteByte(letterBytes[idx])
i--
}
cache >>= letterIdxBits
remain--
}
return sb.String()
}

@ -6,7 +6,7 @@ require (
github.com/Microsoft/go-winio v0.4.12 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v1.13.1
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.3.3 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/mitchellh/go-homedir v1.1.0

@ -53,11 +53,11 @@ func main() {
},
cli.StringFlag{
Name: "volume, v",
Usage: "Mount a volume into the cluster node (Docker notation: `source:destination`)",
Usage: "Mount one or more volumes into the cluster node (Docker notation: `source:destination[,source:destination]`)",
},
cli.StringFlag{
Name: "version",
Value: "v0.3.0",
Value: version.GetK3sVersion(),
Usage: "Choose the k3s image version",
},
cli.IntFlag{
@ -82,6 +82,11 @@ func main() {
Name: "env, e",
Usage: "Pass an additional environment variable (new flag per variable)",
},
cli.IntFlag{
Name: "workers",
Value: 0,
Usage: "Specify how many worker nodes you want to spawn",
},
},
Action: run.CreateCluster,
},
@ -169,6 +174,13 @@ func main() {
},
}
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "verbose",
Usage: "Enable verbose output",
},
}
// run the whole thing
err := app.Run(os.Args)
if err != nil {

16
vendor/modules.txt vendored

@ -4,26 +4,26 @@ github.com/Microsoft/go-winio
github.com/docker/distribution/reference
github.com/docker/distribution/digestset
# github.com/docker/docker v1.13.1
github.com/docker/docker/client
github.com/docker/docker/api/types
github.com/docker/docker/api/types/container
github.com/docker/docker/api/types/events
github.com/docker/docker/api/types/filters
github.com/docker/docker/api/types/network
github.com/docker/docker/api/types/reference
github.com/docker/docker/client
github.com/docker/docker/api/types/mount
github.com/docker/docker/api/types/registry
github.com/docker/docker/api/types/swarm
github.com/docker/docker/api/types/time
github.com/docker/docker/api/types/blkiodev
github.com/docker/docker/api/types/strslice
github.com/docker/docker/api/types/versions
github.com/docker/docker/api/types/events
github.com/docker/docker/api/types/reference
github.com/docker/docker/api/types/time
github.com/docker/docker/api/types/volume
github.com/docker/docker/pkg/tlsconfig
github.com/docker/docker/api/types/mount
github.com/docker/docker/api/types/blkiodev
github.com/docker/docker/api/types/strslice
# github.com/docker/go-connections v0.4.0
github.com/docker/go-connections/nat
github.com/docker/go-connections/sockets
github.com/docker/go-connections/tlsconfig
github.com/docker/go-connections/nat
# github.com/docker/go-units v0.3.3
github.com/docker/go-units
# github.com/mattn/go-runewidth v0.0.4

@ -3,6 +3,9 @@ package version
// Version is the string that contains version
var Version string
// K3sVersion contains the latest version tag of K3s
var K3sVersion string
// GetVersion returns the version for cli, it gets it from "git describe --tags" or returns "dev" when doing simple go build
func GetVersion() string {
if len(Version) == 0 {
@ -10,3 +13,8 @@ func GetVersion() string {
}
return Version
}
// GetK3sVersion returns the version string for K3s
func GetK3sVersion() string {
return K3sVersion
}

Loading…
Cancel
Save