properly handle --port 1234:4321@loadbalancer:proxy style port mappings which should default to all nodes as upstream

pull/670/head
iwilltry42 3 years ago
parent abc53113c3
commit 1944c06dad
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
  1. 5
      CHANGELOG.md
  2. 2
      Dockerfile
  3. 21
      pkg/config/transform.go
  4. 41
      pkg/util/filter.go
  5. 17
      tests/test_loadbalancer.sh

@ -11,8 +11,9 @@
- identifier = `server`, index = `0`, opt = `proxy`
- `opt` is an extra optional argument used for different purposes depending on the flag
- currently, only the `--port` flag has `opt`s, namely `proxy` and `direct` (see other breaking change)
-
- port-mapping now go via the loadbalancer (serverlb) by default
- the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default
- to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag
### Fixes

@ -5,7 +5,7 @@ COPY . .
RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version
FROM docker:20.10-dind as dind
RUN apk update && apk add bash curl sudo jq git make netcat-openbsd
RUN apk update && apk add bash curl sudo jq yq git make netcat-openbsd
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \
chmod +x ./kubectl && \
mv ./kubectl /usr/local/bin/kubectl

@ -175,16 +175,28 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port)
}
x, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters)
filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters)
if err != nil {
return nil, err
}
for suffix, nodes := range x {
for suffix, nodes := range filteredNodes {
portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port)
if err != nil {
return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err)
}
for _, n := range nodes {
if n.Role == k3d.LoadBalancerRole && n.Name == newCluster.ServerLoadBalancer.Node.Name {
log.Infoln("loadbalancer in filtered list for port mappings: defaulting to all servers and agents as upstream targets")
var err error
nodes, err = util.FilterNodes(newCluster.Nodes, []string{"agents:*", "servers:*"})
if err != nil {
return nil, err
}
}
}
if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings
if newCluster.ServerLoadBalancer == nil {
return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled")
@ -203,6 +215,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
return nil, err
}
}
} else if suffix != util.NodeFilterMapKeyAll {
return nil, fmt.Errorf("error adding port mappings: unknown suffix %s", suffix)
}
}
@ -396,6 +410,9 @@ func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMappi
portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto())
nodenames := []string{}
for _, node := range nodes {
if node.Role == k3d.LoadBalancerRole {
return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)")
}
nodenames = append(nodenames, node.Name)
}

@ -36,6 +36,16 @@ const (
NodeFilterMapKeyAll = "all"
)
var (
rolesByIdentifier = map[string]k3d.Role{
"server": k3d.ServerRole,
"servers": k3d.ServerRole,
"agent": k3d.AgentRole,
"agents": k3d.AgentRole,
"loadbalancer": k3d.LoadBalancerRole,
}
)
// Regexp pattern to match node filters
var NodeFilterRegexp = regexp.MustCompile(`^(?P<group>server|servers|agent|agents|loadbalancer|all)(?P<subsetSpec>:(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*-\d*)|(?P<subsetWildcard>\*)))?(?P<suffixSpec>:(?P<suffix>[[:alpha:]]+))?$`)
@ -74,7 +84,7 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string]
return nil, err
}
log.Tracef("Adding %d nodes for suffix >%s< (filter: %s)", len(filteredNodes), suffix, nf)
log.Tracef("Filtered %d nodes for suffix '%s' (filter: %s)", len(filteredNodes), suffix, nf)
result[suffix] = filteredNodes
}
@ -132,16 +142,21 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
// Choose the group of nodes to operate on
groupNodes := []*k3d.Node{}
if submatches["group"] == string(k3d.ServerRole) {
groupNodes = serverNodes
} else if submatches["group"] == string(k3d.AgentRole) {
groupNodes = agentNodes
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
if serverlb == nil {
return nil, fmt.Errorf("Node filter '%s' targets a node that does not exist (disabled?)", filter)
if role, ok := rolesByIdentifier[submatches["group"]]; ok {
switch role {
case k3d.ServerRole:
groupNodes = serverNodes
break
case k3d.AgentRole:
groupNodes = agentNodes
break
case k3d.LoadBalancerRole:
if serverlb == nil {
return nil, fmt.Errorf("Node filter '%s' targets a node that does not exist (disabled?)", filter)
}
filteredNodes = append(filteredNodes, serverlb)
return filteredNodes, nil // early exit if filtered group is the loadbalancer
}
filteredNodes = append(filteredNodes, serverlb)
return filteredNodes, nil // early exit if filtered group is the loadbalancer
}
/* Option 1) subset defined by list */
@ -166,10 +181,10 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
} else if submatches["subsetRange"] != "" {
/*
* subset specified by a range 'START:END', where each side is optional
* subset specified by a range 'START-END', where each side is optional
*/
split := strings.Split(submatches["subsetRange"], ":")
split := strings.Split(submatches["subsetRange"], "-")
if len(split) != 2 {
return nil, fmt.Errorf("Failed to parse subset range in '%s'", filter)
}
@ -226,6 +241,8 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
}
log.Tracef("Filtered %d nodes (filter: %s)", len(filteredNodes), filters)
return filteredNodes, nil
}

@ -25,7 +25,12 @@ function check_container_port() {
clustername="lbtest"
info "Creating cluster $clustername..."
$EXE cluster create $clustername --timeout 360s --agents 1 -p 2222:3333@server:0 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername"
$EXE cluster create $clustername --timeout 360s --agents 1 \
-p 2222:3333@server:0 \
-p 8080:80@server:0:proxy \
-p 1234:4321/tcp@agent:0:direct \
-p 4444:5555@loadbalancer:0:proxy \
|| failed "could not create cluster $clustername"
info "Checking we have access to the cluster..."
check_clusters "$clustername" || failed "error checking cluster"
@ -45,8 +50,16 @@ info "> Checking implicit proxy port mapping of port 3333 -> loadbalancer -> ser
check_container_port k3d-$clustername-server-0 "3333/tcp" && failed "3333/tcp on server-0 but should be on serverlb"
check_container_port k3d-$clustername-serverlb "3333/tcp" || failed "3333/tcp not on serverlb"
info "> Checking implicit proxy port mapping of port 5555 -> loadbalancer -> server-0 & agent-0"
check_container_port k3d-$clustername-server-0 "5555/tcp" && failed "5555/tcp on server-0 but should be on serverlb"
check_container_port k3d-$clustername-agent-0 "5555/tcp" && failed "5555/tcp on agent-0 but should be on serverlb"
check_container_port k3d-$clustername-serverlb "5555/tcp" || failed "5555/tcp not on serverlb"
info "Checking Loadbalancer Config..."
$EXE debug loadbalancer get-config $clustername | grep -A1 "80.tcp" | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0"
$EXE debug loadbalancer get-config $clustername | yq read - 'ports."80.tcp"' | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0"
$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-server-0" || failed "port 5555.tcp not configured for server-0"
$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-agent-0" || failed "port 5555.tcp not configured for agent-0"
info "Deleting clusters..."
$EXE cluster delete $clustername || failed "could not delete the cluster $clustername"

Loading…
Cancel
Save