MasterLoadBalancer: add nodefilter & extra treatment

- masterlb is now an extra part of the cluster spec
- ports can now be attached to masterlb by using the 'loadbalancer'
nodefilter
- all ports exposed on the masterlb will be proxied to all master nodes
(on the same port)
pull/212/head
iwilltry42 4 years ago
parent 4bfac51c7c
commit 9c9f49646e
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
  1. 11
      cmd/create/createCluster.go
  2. 8
      cmd/util/filter.go
  3. 17
      pkg/cluster/cluster.go
  4. 21
      pkg/types/types.go
  5. 2
      proxy/conf.d/nginx.toml
  6. 5
      proxy/nginx-proxy
  7. 17
      proxy/templates/nginx.tmpl

@ -298,7 +298,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
}
log.Debugln(portFilterMap)
log.Debugf("PortFilterMap: %+v", portFilterMap)
/********************
* *
@ -317,6 +317,13 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
// generate list of nodes
cluster.Nodes = []*k3d.Node{}
// MasterLoadBalancer
if !createClusterOpts.DisableLoadBalancer {
cluster.MasterLoadBalancer = &k3d.Node{
Role: k3d.LoadBalancerRole,
}
}
/****************
* Master Nodes *
****************/
@ -372,7 +379,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
if len(filters) == 0 && (masterCount+workerCount) > 1 {
log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node.", portmap)
}
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
nodes, err := cliutil.FilterNodes(append(cluster.Nodes, cluster.MasterLoadBalancer), filters)
if err != nil {
log.Fatalln(err)
}

@ -34,7 +34,7 @@ import (
)
// Regexp pattern to match node filters
var filterRegexp = regexp.MustCompile(`^(?P<group>master|worker|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
var filterRegexp = regexp.MustCompile(`^(?P<group>master|worker|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
func SplitFiltersFromFlag(flag string) (string, []string, error) {
@ -74,11 +74,14 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
// map roles to subsets
masterNodes := []*k3d.Node{}
workerNodes := []*k3d.Node{}
var masterlb *k3d.Node
for _, node := range nodes {
if node.Role == k3d.MasterRole {
masterNodes = append(masterNodes, node)
} else if node.Role == k3d.WorkerRole {
workerNodes = append(workerNodes, node)
} else if node.Role == k3d.LoadBalancerRole {
masterlb = node
}
}
@ -111,6 +114,9 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
groupNodes = masterNodes
} else if submatches["group"] == string(k3d.WorkerRole) {
groupNodes = workerNodes
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
filteredNodes = append(filteredNodes, masterlb)
return filteredNodes, nil // early exit if filtered group is the loadbalancer
}
/* Option 1) subset defined by list */

@ -240,8 +240,10 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
suffix = workerCount
workerCount++
}
if err := nodeSetup(node, suffix); err != nil {
return err
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
if err := nodeSetup(node, suffix); err != nil {
return err
}
}
// asynchronously wait for this master node to be ready (by checking the logs for a specific log mesage)
@ -281,14 +283,21 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt
}
}
// generate comma-separated list of extra ports to forward
ports := k3d.DefaultAPIPort
for _, portString := range cluster.MasterLoadBalancer.Ports {
split := strings.Split(portString, ":")
ports += "," + split[len(split)-1]
}
// Create LB as a modified node with loadbalancerRole
lbNode := &k3d.Node{
Name: fmt.Sprintf("%s-%s-masterlb", k3d.DefaultObjectNamePrefix, cluster.Name),
Image: k3d.DefaultLBImage,
Ports: []string{fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)},
Ports: append(cluster.MasterLoadBalancer.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)),
Env: []string{
fmt.Sprintf("SERVERS=%s", servers),
fmt.Sprintf("PORT=%s", k3d.DefaultAPIPort),
fmt.Sprintf("PORTS=%s", ports),
},
Role: k3d.LoadBalancerRole,
Labels: k3d.DefaultObjectLabels, // TODO: createLoadBalancer: add more expressive labels

@ -39,7 +39,7 @@ const DefaultClusterNameMaxLength = 32
const DefaultK3sImageRepo = "docker.io/rancher/k3s"
// DefaultLBImage defines the default cluster load balancer image
const DefaultLBImage = "docker.io/iwilltry42/k3d-proxy:v0.0.1"
const DefaultLBImage = "docker.io/iwilltry42/k3d-proxy:v0.0.2"
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
const DefaultObjectNamePrefix = "k3d"
@ -114,15 +114,16 @@ type ClusterNetwork struct {
// Cluster describes a k3d cluster
type Cluster struct {
Name string `yaml:"name" json:"name,omitempty"`
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
Secret string `yaml:"cluster_secret" json:"clusterSecret,omitempty"`
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
InitNode *Node // init master node
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
Name string `yaml:"name" json:"name,omitempty"`
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
Secret string `yaml:"cluster_secret" json:"clusterSecret,omitempty"`
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
InitNode *Node // init master node
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
MasterLoadBalancer *Node `yaml:"master_loadbalancer" json:"masterLoadBalancer,omitempty"`
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
}
// Node describes a k3d node

@ -3,5 +3,5 @@ src = "nginx.tmpl"
dest = "/etc/nginx/nginx.conf"
keys = [
"SERVERS",
"PORT",
"PORTS",
]

@ -3,5 +3,10 @@
# Run confd
confd -onetime -backend env
# Output Configuration
echo "===== Initial nginx configuration ====="
cat /etc/nginx/nginx.conf
echo "======================================="
# Start nginx
nginx -g 'daemon off;'

@ -1,3 +1,6 @@
{{- $servers := split (getenv "SERVERS") "," -}}
{{- $ports := split (getenv "PORTS") "," -}}
error_log stderr notice;
worker_processes auto;
@ -8,16 +11,18 @@ events {
}
stream {
upstream kube_apiserver {
{{ $servers := split (getenv "SERVERS") "," }}{{range $servers}}
server {{.}}:{{getenv "PORT"}} max_fails=1 fail_timeout=10s slow_start=10s;
{{end}}
{{- range $port := $ports }}
upstream master_nodes_{{ $port }} {
{{- range $server := $servers }}
server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s;
{{- end }}
}
server {
listen {{getenv "PORT"}};
proxy_pass kube_apiserver;
listen {{ $port }};
proxy_pass master_nodes_{{ $port }};
proxy_timeout 30;
proxy_connect_timeout 2s;
}
{{- end }}
}
Loading…
Cancel
Save