add some more portmapping work

pull/670/head
iwilltry42 3 years ago
parent 2092ceaaa9
commit 8e29ad4f1a
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
  1. 2
      cmd/cluster/clusterCreate.go
  2. 14
      pkg/client/node.go
  3. 94
      pkg/types/loadbalancer.go
  4. 39
      pkg/types/node.go
  5. 42
      pkg/types/types.go
  6. 23
      pkg/util/filter.go

@ -301,7 +301,7 @@ func NewCmdClusterCreate() *cobra.Command {
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`")
_ = ppViper.BindPFlag("cli.volumes", cmd.Flags().Lookup("volume"))
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
_ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port"))
cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server[0]\"`")

@ -386,19 +386,7 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
/* global node configuration (applies for any node role) */
// ### Labels ###
labels := make(map[string]string)
for k, v := range k3d.DefaultRuntimeLabels {
labels[k] = v
}
for k, v := range k3d.DefaultRuntimeLabelsVar {
labels[k] = v
}
for k, v := range node.RuntimeLabels {
labels[k] = v
}
node.RuntimeLabels = labels
// second most important: the node role label
node.RuntimeLabels[k3d.LabelRole] = string(node.Role)
node.FillRuntimeLabels()
for k, v := range node.K3sNodeLabels {
node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v))

@ -0,0 +1,94 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package types
/* DESCRIPTION
* The Loadbalancer is a customized NGINX container running side-by-side with the cluster, NOT INSIDE IT.
* It is used to do plain proxying of tcp/udp ports to the k3d node containers.
* One advantage of this approach is, that we can add new ports while the cluster is still running by re-creating
* the loadbalancer and adding the new port config in the NGINX config. As the loadbalancer doesn't hold any state
* (apart from the config file), it can easily be re-created in just a few seconds.
*/
/*
* Loadbalancer Definition
*/
type Loadbalancer struct {
Node Node `mapstructure:",squash" yaml:",inline"` // the underlying node
Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration
}
func NewLoadbalancer() *Loadbalancer {
return &Loadbalancer{
Node: Node{
Role: LoadBalancerRole,
Image: GetLoadbalancerImage(),
},
}
}
/*
* Loadbalancer Configuration
*/
/* LoadbalancerConfig defines the coarse file structure to configure the k3d-proxy
* Example:
* ports:
* 1234.tcp:
* - k3d-k3s-default-server-0
* - k3d-k3s-default-server-1
* 4321.udp:
* - k3d-k3s-default-agent-0
* - k3d-k3s-default-agent-1
*/
type LoadbalancerConfig struct {
Ports map[string][]string `yaml:"ports"`
Settings LoadBalancerSettings `yaml:"settings"`
}
type LoadBalancerSettings struct {
WorkerProcesses int `yaml:"workerProcesses"`
}
const (
DefaultLoadbalancerConfigPath = "/etc/confd/values.yaml"
DefaultLoadbalancerWorkerProcesses = 1024
)
type LoadbalancerCreateOpts struct {
Labels map[string]string
}
/*
* Helper Functions
*/
// HasLoadBalancer returns true if cluster has a loadbalancer node
func (c *Cluster) HasLoadBalancer() bool {
for _, node := range c.Nodes {
if node.Role == LoadBalancerRole {
return true
}
}
return false
}

@ -0,0 +1,39 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package types
func (node *Node) FillRuntimeLabels() {
labels := make(map[string]string)
for k, v := range DefaultRuntimeLabels {
labels[k] = v
}
for k, v := range DefaultRuntimeLabelsVar {
labels[k] = v
}
for k, v := range node.RuntimeLabels {
labels[k] = v
}
node.RuntimeLabels = labels
// second most important: the node role label
node.RuntimeLabels[LabelRole] = string(node.Role)
}

@ -297,16 +297,6 @@ func (c *Cluster) AgentCountRunning() (int, int) {
return agentCount, agentsRunning
}
// HasLoadBalancer returns true if cluster has a loadbalancer node
func (c *Cluster) HasLoadBalancer() bool {
for _, node := range c.Nodes {
if node.Role == LoadBalancerRole {
return true
}
}
return false
}
type NodeIP struct {
IP netaddr.IP
Static bool
@ -413,35 +403,3 @@ type RegistryExternal struct {
Host string `yaml:"host" json:"host"`
Port string `yaml:"port" json:"port"`
}
/*
* Loadbalancer
*/
/* LoadbalancerConfig defines the coarse file structure to configure the k3d-proxy
* Example:
* ports:
* 1234.tcp:
* - k3d-k3s-default-server-0
* - k3d-k3s-default-server-1
* 4321.udp:
* - k3d-k3s-default-agent-0
* - k3d-k3s-default-agent-1
*/
type LoadbalancerConfig struct {
Ports map[string][]string `yaml:"ports"`
Settings LoadBalancerSettings `yaml:"settings"`
}
type LoadBalancerSettings struct {
WorkerProcesses int `yaml:"workerProcesses"`
}
const (
DefaultLoadbalancerConfigPath = "/etc/confd/values.yaml"
DefaultLoadbalancerWorkerProcesses = 1024
)
type LoadbalancerCreateOpts struct {
Labels map[string]string
}

@ -31,15 +31,26 @@ import (
log "github.com/sirupsen/logrus"
)
type NodeFilterSuffix string
const (
NodeFilterSuffixNone NodeFilterSuffix = "none"
NodeFilterMapKeyAll = "all"
)
// Regexp pattern to match node filters
var filterRegexp = regexp.MustCompile(`^(?P<group>server|agent|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
var NodeFilterRegexp = regexp.MustCompile(`^(?P<group>server|servers|agent|agents|loadbalancer|all)(?P<subsetSpec>:(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*-\d*)|(?P<subsetWildcard>\*)))?(?P<suffix>:[[:alpha:]]+)?$`)
// FilterNodes takes a string filter to return a filtered list of nodes
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, error) {
result := map[string][]*k3d.Node{
NodeFilterMapKeyAll: nodes,
}
if len(filters) == 0 || len(filters[0]) == 0 {
log.Warnln("No node filter specified")
return nodes, nil
return result, nil
}
// map roles to subsets
@ -64,21 +75,21 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
for _, filter := range filters {
// match regex with capturing groups
match := filterRegexp.FindStringSubmatch(filter)
match := NodeFilterRegexp.FindStringSubmatch(filter)
if len(match) == 0 {
return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", filter)
}
// map capturing group names to submatches
submatches := MapSubexpNames(filterRegexp.SubexpNames(), match)
submatches := MapSubexpNames(NodeFilterRegexp.SubexpNames(), match)
// if one of the filters is 'all', we only return this and drop all others
if submatches["group"] == "all" {
if len(filters) > 1 {
log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters)
}
return nodes, nil
return result, nil
}
// Choose the group of nodes to operate on

Loading…
Cancel
Save