diff --git a/Makefile b/Makefile index a594b511..a2136c40 100644 --- a/Makefile +++ b/Makefile @@ -80,7 +80,7 @@ fmt: .PHONY: test # we say code is not worth testing unless it's formatted test: fmt codegen - go test -v -coverpkg=./sentry,./cloud/linode/client,./cloud/linode/firewall,./cloud/linode -coverprofile ./coverage.out -cover ./sentry/... ./cloud/... $(TEST_ARGS) + go test -v -coverpkg=./sentry,./cloud/linode/client,./cloud/linode/firewall,./cloud/linode,./cloud/nodeipam,./cloud/nodeipam/ipam -coverprofile ./coverage.out -cover ./sentry/... ./cloud/... $(TEST_ARGS) .PHONY: build-linux build-linux: codegen diff --git a/cloud/linode/client/client.go b/cloud/linode/client/client.go index 4938a8fb..ca2efe2d 100644 --- a/cloud/linode/client/client.go +++ b/cloud/linode/client/client.go @@ -26,6 +26,7 @@ type Client interface { GetInstance(context.Context, int) (*linodego.Instance, error) ListInstances(context.Context, *linodego.ListOptions) ([]linodego.Instance, error) CreateInstance(ctx context.Context, opts linodego.InstanceCreateOptions) (*linodego.Instance, error) + ListInstanceConfigs(ctx context.Context, linodeID int, opts *linodego.ListOptions) ([]linodego.InstanceConfig, error) GetInstanceIPAddresses(context.Context, int) (*linodego.InstanceIPAddressResponse, error) AddInstanceIPAddress(ctx context.Context, linodeID int, public bool) (*linodego.InstanceIP, error) diff --git a/cloud/linode/client/client_with_metrics.go b/cloud/linode/client/client_with_metrics.go index 1034fbd7..e20e746c 100644 --- a/cloud/linode/client/client_with_metrics.go +++ b/cloud/linode/client/client_with_metrics.go @@ -280,6 +280,19 @@ func (_d ClientWithPrometheus) ListFirewallDevices(ctx context.Context, firewall return _d.base.ListFirewallDevices(ctx, firewallID, opts) } +// ListInstanceConfigs implements Client +func (_d ClientWithPrometheus) ListInstanceConfigs(ctx context.Context, linodeID int, opts *linodego.ListOptions) (ia1 []linodego.InstanceConfig, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListInstanceConfigs", result).Inc() + }() + return _d.base.ListInstanceConfigs(ctx, linodeID, opts) +} + // ListInstances implements Client func (_d ClientWithPrometheus) ListInstances(ctx context.Context, lp1 *linodego.ListOptions) (ia1 []linodego.Instance, err error) { defer func() { diff --git a/cloud/linode/client/mocks/mock_client.go b/cloud/linode/client/mocks/mock_client.go index 038379d5..d672ab8e 100644 --- a/cloud/linode/client/mocks/mock_client.go +++ b/cloud/linode/client/mocks/mock_client.go @@ -315,6 +315,21 @@ func (mr *MockClientMockRecorder) ListFirewallDevices(arg0, arg1, arg2 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFirewallDevices", reflect.TypeOf((*MockClient)(nil).ListFirewallDevices), arg0, arg1, arg2) } +// ListInstanceConfigs mocks base method. +func (m *MockClient) ListInstanceConfigs(arg0 context.Context, arg1 int, arg2 *linodego.ListOptions) ([]linodego.InstanceConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListInstanceConfigs", arg0, arg1, arg2) + ret0, _ := ret[0].([]linodego.InstanceConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListInstanceConfigs indicates an expected call of ListInstanceConfigs. +func (mr *MockClientMockRecorder) ListInstanceConfigs(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceConfigs", reflect.TypeOf((*MockClient)(nil).ListInstanceConfigs), arg0, arg1, arg2) +} + // ListInstances mocks base method. func (m *MockClient) ListInstances(arg0 context.Context, arg1 *linodego.ListOptions) ([]linodego.Instance, error) { m.ctrl.T.Helper() diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index 04bd6958..8b1d67a4 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -56,6 +56,7 @@ var Options struct { GlobalStopChannel chan<- struct{} EnableIPv6ForLoadBalancers bool AllocateNodeCIDRs bool + DisableIPv6NodeCIDRAllocation bool ClusterCIDRIPv4 string NodeCIDRMaskSizeIPv4 int NodeCIDRMaskSizeIPv6 int diff --git a/cloud/linode/cloud_test.go b/cloud/linode/cloud_test.go index a46aa218..b335b939 100644 --- a/cloud/linode/cloud_test.go +++ b/cloud/linode/cloud_test.go @@ -20,6 +20,7 @@ func TestNewCloudRouteControllerDisabled(t *testing.T) { t.Setenv("LINODE_API_TOKEN", "dummyapitoken") t.Setenv("LINODE_REGION", "us-east") t.Setenv("LINODE_REQUEST_TIMEOUT_SECONDS", "10") + t.Setenv("LINODE_URL", "https://api.linode.com/v4") Options.NodeBalancerPrefix = "ccm" t.Run("should not fail if vpc is empty and routecontroller is disabled", func(t *testing.T) { @@ -45,6 +46,7 @@ func TestNewCloud(t *testing.T) { t.Setenv("LINODE_REGION", "us-east") t.Setenv("LINODE_REQUEST_TIMEOUT_SECONDS", "10") t.Setenv("LINODE_ROUTES_CACHE_TTL_SECONDS", "60") + t.Setenv("LINODE_URL", "https://api.linode.com/v4") Options.LinodeGoDebug = true Options.NodeBalancerPrefix = "ccm" diff --git a/cloud/linode/nodeipamcontroller.go b/cloud/linode/nodeipamcontroller.go index 678574c5..1957e8c2 100644 --- a/cloud/linode/nodeipamcontroller.go +++ b/cloud/linode/nodeipamcontroller.go @@ -27,24 +27,24 @@ import ( "k8s.io/apimachinery/pkg/util/wait" v1 "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" - cloudprovider "k8s.io/cloud-provider" - nodeipamcontroller "k8s.io/kubernetes/pkg/controller/nodeipam" - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" netutils "k8s.io/utils/net" + + nodeipamcontroller "github.com/linode/linode-cloud-controller-manager/cloud/nodeipam" + "github.com/linode/linode-cloud-controller-manager/cloud/nodeipam/ipam" ) const ( - maxAllowedNodeCIDRs = 2 + maxAllowedNodeCIDRsIPv4 = 1 ) var ( // defaultNodeMaskCIDRIPv4 is default mask size for IPv4 node cidr defaultNodeMaskCIDRIPv4 = 24 // defaultNodeMaskCIDRIPv6 is default mask size for IPv6 node cidr - defaultNodeMaskCIDRIPv6 = 64 + defaultNodeMaskCIDRIPv6 = 112 ) -func startNodeIpamController(stopCh <-chan struct{}, cloud cloudprovider.Interface, nodeInformer v1.NodeInformer, kubeclient kubernetes.Interface) error { +func startNodeIpamController(stopCh <-chan struct{}, cloud *linodeCloud, nodeInformer v1.NodeInformer, kubeclient kubernetes.Interface) error { var serviceCIDR *net.IPNet var secondaryServiceCIDR *net.IPNet @@ -54,51 +54,24 @@ func startNodeIpamController(stopCh <-chan struct{}, cloud cloudprovider.Interfa } // failure: bad cidrs in config - clusterCIDRs, dualStack, err := processCIDRs(Options.ClusterCIDRIPv4) + clusterCIDRs, err := processCIDRs(Options.ClusterCIDRIPv4) if err != nil { return fmt.Errorf("processCIDRs failed: %w", err) } - // failure: more than one cidr but they are not configured as dual stack - if len(clusterCIDRs) > 1 && !dualStack { - return fmt.Errorf("len of ClusterCIDRs==%v and they are not configured as dual stack (at least one from each IPFamily", len(clusterCIDRs)) - } - - // failure: more than cidrs is not allowed even with dual stack - if len(clusterCIDRs) > maxAllowedNodeCIDRs { - return fmt.Errorf("len of clusters is:%v > more than max allowed of %d", len(clusterCIDRs), maxAllowedNodeCIDRs) - } - - /* TODO: uncomment and fix if we want to support service cidr overlap with nodecidr - // service cidr processing - if len(strings.TrimSpace(nodeIPAMConfig.ServiceCIDR)) != 0 { - _, serviceCIDR, err = netutils.ParseCIDRSloppy(nodeIPAMConfig.ServiceCIDR) - if err != nil { - klog.ErrorS(err, "Unsuccessful parsing of service CIDR", "CIDR", nodeIPAMConfig.ServiceCIDR) - } + if len(clusterCIDRs) == 0 { + return fmt.Errorf("no clusterCIDR specified. Must specify --cluster-cidr if --allocate-node-cidrs is set") } - if len(strings.TrimSpace(nodeIPAMConfig.SecondaryServiceCIDR)) != 0 { - _, secondaryServiceCIDR, err = netutils.ParseCIDRSloppy(nodeIPAMConfig.SecondaryServiceCIDR) - if err != nil { - klog.ErrorS(err, "Unsuccessful parsing of service CIDR", "CIDR", nodeIPAMConfig.SecondaryServiceCIDR) - } + if len(clusterCIDRs) > maxAllowedNodeCIDRsIPv4 { + return fmt.Errorf("too many clusterCIDRs specified for ipv4, max allowed is %d", maxAllowedNodeCIDRsIPv4) } - // the following checks are triggered if both serviceCIDR and secondaryServiceCIDR are provided - if serviceCIDR != nil && secondaryServiceCIDR != nil { - // should be dual stack (from different IPFamilies) - dualstackServiceCIDR, err := netutils.IsDualStackCIDRs([]*net.IPNet{serviceCIDR, secondaryServiceCIDR}) - if err != nil { - return nil, false, fmt.Errorf("failed to perform dualstack check on serviceCIDR and secondaryServiceCIDR error:%v", err) - } - if !dualstackServiceCIDR { - return nil, false, fmt.Errorf("serviceCIDR and secondaryServiceCIDR are not dualstack (from different IPfamiles)") - } + if clusterCIDRs[0].IP.To4() == nil { + return fmt.Errorf("clusterCIDR %s is not ipv4", clusterCIDRs[0].String()) } - */ - nodeCIDRMaskSizes := setNodeCIDRMaskSizes(clusterCIDRs) + nodeCIDRMaskSizes := setNodeCIDRMaskSizes() ctx := wait.ContextForChannel(stopCh) @@ -106,12 +79,14 @@ func startNodeIpamController(stopCh <-chan struct{}, cloud cloudprovider.Interfa ctx, nodeInformer, cloud, + cloud.client, kubeclient, clusterCIDRs, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSizes, - ipam.RangeAllocatorType, + ipam.CloudAllocatorType, + Options.DisableIPv6NodeCIDRAllocation, ) if err != nil { return err @@ -121,47 +96,25 @@ func startNodeIpamController(stopCh <-chan struct{}, cloud cloudprovider.Interfa return nil } -// processCIDRs is a helper function that works on a comma separated cidrs and returns -// a list of typed cidrs -// a flag if cidrs represents a dual stack -// error if failed to parse any of the cidrs -func processCIDRs(cidrsList string) ([]*net.IPNet, bool, error) { +// processCIDR is a helper function that works on cidr and returns a list of typed cidrs +// error if failed to parse the cidr +func processCIDRs(cidrsList string) ([]*net.IPNet, error) { cidrsSplit := strings.Split(strings.TrimSpace(cidrsList), ",") cidrs, err := netutils.ParseCIDRs(cidrsSplit) if err != nil { - return nil, false, err + return nil, err } - // if cidrs has an error then the previous call will fail - // safe to ignore error checking on next call - dualstack, err := netutils.IsDualStackCIDRs(cidrs) - if err != nil { - return nil, false, fmt.Errorf("failed to perform dualstack check on cidrs: %w", err) - } - - return cidrs, dualstack, nil + return cidrs, nil } -func setNodeCIDRMaskSizes(clusterCIDRs []*net.IPNet) []int { - sortedSizes := func(maskSizeIPv4, maskSizeIPv6 int) []int { - nodeMaskCIDRs := make([]int, len(clusterCIDRs)) - - for idx, clusterCIDR := range clusterCIDRs { - if netutils.IsIPv6CIDR(clusterCIDR) { - nodeMaskCIDRs[idx] = maskSizeIPv6 - } else { - nodeMaskCIDRs[idx] = maskSizeIPv4 - } - } - return nodeMaskCIDRs - } - +func setNodeCIDRMaskSizes() []int { if Options.NodeCIDRMaskSizeIPv4 != 0 { defaultNodeMaskCIDRIPv4 = Options.NodeCIDRMaskSizeIPv4 } if Options.NodeCIDRMaskSizeIPv6 != 0 { defaultNodeMaskCIDRIPv6 = Options.NodeCIDRMaskSizeIPv6 } - return sortedSizes(defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6) + return []int{defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6} } diff --git a/cloud/linode/nodeipamcontroller_test.go b/cloud/linode/nodeipamcontroller_test.go index 6c15e1a5..d81b7a50 100644 --- a/cloud/linode/nodeipamcontroller_test.go +++ b/cloud/linode/nodeipamcontroller_test.go @@ -28,70 +28,26 @@ import ( v1 "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" - cloudprovider "k8s.io/cloud-provider" ) func Test_setNodeCIDRMaskSizes(t *testing.T) { type args struct { - clusterCIDRs []*net.IPNet - ipv4NetMask int - ipv6NetMask int + ipv4NetMask int + ipv6NetMask int } - _, ipv4Net, _ := net.ParseCIDR("10.192.0.0/10") - _, ipv6Net, _ := net.ParseCIDR("fd00::/56") tests := []struct { name string args args want []int }{ { - name: "empty cluster cidrs", - args: args{ - clusterCIDRs: []*net.IPNet{}, - }, - want: []int{}, - }, - { - name: "single cidr", - args: args{ - clusterCIDRs: []*net.IPNet{ - { - IP: ipv4Net.IP, - Mask: ipv4Net.Mask, - }, - }, - }, - want: []int{defaultNodeMaskCIDRIPv4}, - }, - { - name: "two cidrs", - args: args{ - clusterCIDRs: []*net.IPNet{ - { - IP: ipv4Net.IP, - Mask: ipv4Net.Mask, - }, - { - IP: ipv6Net.IP, - Mask: ipv6Net.Mask, - }, - }, - }, + name: "default cidr mask sizes", + args: args{}, want: []int{defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6}, }, { name: "two cidrs with custom mask sizes", args: args{ - clusterCIDRs: []*net.IPNet{ - { - IP: ipv4Net.IP, - Mask: ipv4Net.Mask, - }, - { - IP: ipv6Net.IP, - Mask: ipv6Net.Mask, - }, - }, ipv4NetMask: 25, ipv6NetMask: 80, }, @@ -112,7 +68,7 @@ func Test_setNodeCIDRMaskSizes(t *testing.T) { if tt.args.ipv6NetMask != 0 { Options.NodeCIDRMaskSizeIPv6 = tt.args.ipv6NetMask } - got := setNodeCIDRMaskSizes(tt.args.clusterCIDRs) + got := setNodeCIDRMaskSizes() if !reflect.DeepEqual(got, tt.want) { t.Errorf("setNodeCIDRMaskSizes() = %v, want %v", got, tt.want) } @@ -125,22 +81,19 @@ func Test_processCIDRs(t *testing.T) { cidrsList string } _, ipv4Net, _ := net.ParseCIDR("10.192.0.0/10") - _, ipv6Net, _ := net.ParseCIDR("fd00::/56") tests := []struct { - name string - args args - want []*net.IPNet - ipv6Enabled bool - wantErr bool + name string + args args + want []*net.IPNet + wantErr bool }{ { name: "empty cidr list", args: args{ cidrsList: "", }, - want: nil, - ipv6Enabled: false, - wantErr: true, + want: nil, + wantErr: true, }, { name: "valid ipv4 cidr", @@ -153,31 +106,12 @@ func Test_processCIDRs(t *testing.T) { Mask: ipv4Net.Mask, }, }, - ipv6Enabled: false, - wantErr: false, - }, - { - name: "valid ipv4 and ipv6 cidrs", - args: args{ - cidrsList: "10.192.0.0/10,fd00::/56", - }, - want: []*net.IPNet{ - { - IP: ipv4Net.IP, - Mask: ipv4Net.Mask, - }, - { - IP: ipv6Net.IP, - Mask: ipv6Net.Mask, - }, - }, - ipv6Enabled: true, - wantErr: false, + wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, got1, err := processCIDRs(tt.args.cidrsList) + got, err := processCIDRs(tt.args.cidrsList) if (err != nil) != tt.wantErr { t.Errorf("processCIDRs() error = %v, wantErr %v", err, tt.wantErr) return @@ -185,9 +119,6 @@ func Test_processCIDRs(t *testing.T) { if !reflect.DeepEqual(got, tt.want) { t.Errorf("processCIDRs() got = %v, want %v", got, tt.want) } - if got1 != tt.ipv6Enabled { - t.Errorf("processCIDRs() got1 = %v, want %v", got1, tt.ipv6Enabled) - } }) } } @@ -195,7 +126,7 @@ func Test_processCIDRs(t *testing.T) { func Test_startNodeIpamController(t *testing.T) { type args struct { stopCh <-chan struct{} - cloud cloudprovider.Interface + cloud linodeCloud nodeInformer v1.NodeInformer kubeclient kubernetes.Interface allocateNodeCIDRs bool @@ -211,7 +142,7 @@ func Test_startNodeIpamController(t *testing.T) { name: "allocate-node-cidrs not set", args: args{ stopCh: make(<-chan struct{}), - cloud: nil, + cloud: linodeCloud{}, nodeInformer: nil, kubeclient: nil, allocateNodeCIDRs: false, @@ -220,10 +151,22 @@ func Test_startNodeIpamController(t *testing.T) { wantErr: false, }, { - name: "incorrect cluster-cidrs specified", + name: "allocate-node-cidrs set but cluster-cidr not set", + args: args{ + stopCh: make(<-chan struct{}), + cloud: linodeCloud{}, + nodeInformer: nil, + kubeclient: nil, + allocateNodeCIDRs: true, + clusterCIDR: "", + }, + wantErr: true, + }, + { + name: "incorrect cluster-cidr specified", args: args{ stopCh: make(<-chan struct{}), - cloud: nil, + cloud: linodeCloud{}, nodeInformer: nil, kubeclient: nil, allocateNodeCIDRs: true, @@ -232,26 +175,26 @@ func Test_startNodeIpamController(t *testing.T) { wantErr: true, }, { - name: "more than one ipv4 cidrs specified", + name: "ipv6 cidr specified", args: args{ stopCh: make(<-chan struct{}), - cloud: nil, + cloud: linodeCloud{}, nodeInformer: nil, kubeclient: nil, allocateNodeCIDRs: true, - clusterCIDR: "10.192.0.0/10,192.168.0.0/16", + clusterCIDR: "fd00::/80", }, wantErr: true, }, { - name: "more than two cidrs specified", + name: "more than one cidr specified", args: args{ stopCh: make(<-chan struct{}), - cloud: nil, + cloud: linodeCloud{}, nodeInformer: nil, kubeclient: nil, allocateNodeCIDRs: true, - clusterCIDR: "10.192.0.0/10,fd00::/80,192.168.0.0/16", + clusterCIDR: "10.192.0.0/10,fd00::/80", }, wantErr: true, }, @@ -259,7 +202,7 @@ func Test_startNodeIpamController(t *testing.T) { name: "correct cidrs specified", args: args{ stopCh: make(<-chan struct{}), - cloud: nil, + cloud: linodeCloud{}, nodeInformer: informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Nodes(), kubeclient: kubeClient, allocateNodeCIDRs: true, @@ -278,7 +221,7 @@ func Test_startNodeIpamController(t *testing.T) { Options.AllocateNodeCIDRs = tt.args.allocateNodeCIDRs Options.ClusterCIDRIPv4 = tt.args.clusterCIDR t.Run(tt.name, func(t *testing.T) { - if err := startNodeIpamController(tt.args.stopCh, tt.args.cloud, tt.args.nodeInformer, tt.args.kubeclient); (err != nil) != tt.wantErr { + if err := startNodeIpamController(tt.args.stopCh, &tt.args.cloud, tt.args.nodeInformer, tt.args.kubeclient); (err != nil) != tt.wantErr { t.Errorf("startNodeIpamController() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/cloud/nodeipam/doc.go b/cloud/nodeipam/doc.go new file mode 100644 index 00000000..af4cd15c --- /dev/null +++ b/cloud/nodeipam/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodeipam contains code for syncing cloud instances with +// node registry +package nodeipam diff --git a/cloud/nodeipam/ipam/cidr_allocator.go b/cloud/nodeipam/ipam/cidr_allocator.go new file mode 100644 index 00000000..7b9db985 --- /dev/null +++ b/cloud/nodeipam/ipam/cidr_allocator.go @@ -0,0 +1,158 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "context" + "fmt" + "net" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + informers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" +) + +// CIDRAllocatorType is the type of the allocator to use. +type CIDRAllocatorType string + +const ( + // RangeAllocatorType is the allocator that uses an internal CIDR + // range allocator to do node CIDR range allocations. + RangeAllocatorType CIDRAllocatorType = "RangeAllocator" + // CloudAllocatorType is the allocator that uses cloud platform + // support to do node CIDR range allocations. + CloudAllocatorType CIDRAllocatorType = "CloudAllocator" + // IPAMFromClusterAllocatorType uses the ipam controller sync'ing the node + // CIDR range allocations from the cluster to the cloud. + IPAMFromClusterAllocatorType = "IPAMFromCluster" + // IPAMFromCloudAllocatorType uses the ipam controller sync'ing the node + // CIDR range allocations from the cloud to the cluster. + IPAMFromCloudAllocatorType = "IPAMFromCloud" +) + +// TODO: figure out the good setting for those constants. +const ( + // The amount of time the nodecontroller polls on the list nodes endpoint. + apiserverStartupGracePeriod = 10 * time.Minute + + // The no. of NodeSpec updates NC can process concurrently. + cidrUpdateWorkers = 30 + + // cidrUpdateRetries is the no. of times a NodeSpec update will be retried before dropping it. + cidrUpdateRetries = 3 +) + +// nodePollInterval is used in listing node +var nodePollInterval = 10 * time.Second + +// CIDRAllocator is an interface implemented by things that know how +// to allocate/occupy/recycle CIDR for nodes. +type CIDRAllocator interface { + // AllocateOrOccupyCIDR looks at the given node, assigns it a valid + // CIDR if it doesn't currently have one or mark the CIDR as used if + // the node already have one. + AllocateOrOccupyCIDR(ctx context.Context, node *v1.Node) error + // ReleaseCIDR releases the CIDR of the removed node. + ReleaseCIDR(logger klog.Logger, node *v1.Node) error + // Run starts all the working logic of the allocator. + Run(ctx context.Context) +} + +// CIDRAllocatorParams is parameters that's required for creating new +// cidr range allocator. +type CIDRAllocatorParams struct { + // ClusterCIDRs is list of cluster cidrs. + ClusterCIDRs []*net.IPNet + // ServiceCIDR is primary service cidr for cluster. + ServiceCIDR *net.IPNet + // SecondaryServiceCIDR is secondary service cidr for cluster. + SecondaryServiceCIDR *net.IPNet + // NodeCIDRMaskSizes is list of node cidr mask sizes. + NodeCIDRMaskSizes []int + DisableIPv6NodeCIDRAllocation bool +} + +// New creates a new CIDR range allocator. +func New(ctx context.Context, linodeClient client.Client, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) { + nodeList, err := listNodes(ctx, kubeClient) + if err != nil { + return nil, err + } + + switch allocatorType { + case CloudAllocatorType: + return NewLinodeCIDRAllocator(ctx, linodeClient, kubeClient, nodeInformer, allocatorParams, nodeList) + case RangeAllocatorType: + return nil, fmt.Errorf("RangeAllocatorType is not supported") + default: + return nil, fmt.Errorf("invalid or unsupported CIDR allocator type: %v", allocatorType) + } +} + +func listNodes(ctx context.Context, kubeClient clientset.Interface) (*v1.NodeList, error) { + var nodeList *v1.NodeList + logger := klog.FromContext(ctx) + + // We must poll because apiserver might not be up. This error causes + // controller manager to restart. + if pollErr := wait.PollUntilContextTimeout(ctx, nodePollInterval, apiserverStartupGracePeriod, true, func(ctx context.Context) (bool, error) { + var err error + nodeList, err = kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + FieldSelector: fields.Everything().String(), + LabelSelector: labels.Everything().String(), + }) + if err != nil { + logger.Error(err, "Failed to list all nodes") + return false, nil + } + return true, nil + }); pollErr != nil { + return nil, fmt.Errorf("failed to list all nodes in %v, cannot proceed without updating CIDR map", + apiserverStartupGracePeriod) + } + return nodeList, nil +} + +// ipnetToStringList converts a slice of net.IPNet into a list of CIDR in string format +func ipnetToStringList(inCIDRs []*net.IPNet) []string { + outCIDRs := make([]string, len(inCIDRs)) + for idx, inCIDR := range inCIDRs { + outCIDRs[idx] = inCIDR.String() + } + return outCIDRs +} + +// occupyServiceCIDR removes the service CIDR range from the cluster CIDR if it +// intersects. +func occupyServiceCIDR(set *cidrset.CidrSet, clusterCIDR, serviceCIDR *net.IPNet) error { + if clusterCIDR.Contains(serviceCIDR.IP) || serviceCIDR.Contains(clusterCIDR.IP) { + if err := set.Occupy(serviceCIDR); err != nil { + return err + } + } + return nil +} diff --git a/cloud/nodeipam/ipam/cloud_allocator.go b/cloud/nodeipam/ipam/cloud_allocator.go new file mode 100644 index 00000000..0761d5ae --- /dev/null +++ b/cloud/nodeipam/ipam/cloud_allocator.go @@ -0,0 +1,546 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/linode/linodego" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + informers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + nodeutil "k8s.io/component-helpers/node/util" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" + controllerutil "k8s.io/kubernetes/pkg/controller/util/node" + netutils "k8s.io/utils/net" + + linode "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" +) + +type cloudAllocator struct { + client clientset.Interface + + linodeClient linode.Client + // cluster cidr as passed in during controller creation for ipv4 addresses + clusterCIDR *net.IPNet + // for clusterCIDR we maintain what is used and what is not + cidrSet *cidrset.CidrSet + // nodeLister is able to list/get nodes and is populated by the shared informer passed to controller + nodeLister corelisters.NodeLister + // nodesSynced returns true if the node shared informer has been synced at least once. + nodesSynced cache.InformerSynced + broadcaster record.EventBroadcaster + recorder record.EventRecorder + + // queues are where incoming work is placed to de-dup and to allow "easy" + // rate limited requeues on errors + queue workqueue.TypedRateLimitingInterface[any] + + // nodeCIDRMaskSizeIPv6 is the mask size for the IPv6 CIDR assigned to nodes. + nodeCIDRMaskSizeIPv6 int + // disableIPv6NodeCIDRAllocation is true if we should not allocate IPv6 CIDRs for nodes. + disableIPv6NodeCIDRAllocation bool +} + +const providerIDPrefix = "linode://" + +var _ CIDRAllocator = &cloudAllocator{} + +// NewLinodeCIDRAllocator returns a CIDRAllocator to allocate CIDRs for node +// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size. +// Caller must always pass in a list of existing nodes so the new allocator. +// Caller must ensure that ClusterCIDR is semantically correct +// can initialize its CIDR map. NodeList is only nil in testing. +func NewLinodeCIDRAllocator(ctx context.Context, linodeClient linode.Client, client clientset.Interface, nodeInformer informers.NodeInformer, allocatorParams CIDRAllocatorParams, nodeList *v1.NodeList) (CIDRAllocator, error) { + logger := klog.FromContext(ctx) + if client == nil { + logger.Error(nil, "kubeClient is nil when starting CIDRAllocator") + klog.FlushAndExit(klog.ExitFlushTimeout, 1) + } + + eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) + + // create a cidrSet for ipv4 cidr we operate on + cidrSet, err := cidrset.NewCIDRSet(allocatorParams.ClusterCIDRs[0], allocatorParams.NodeCIDRMaskSizes[0]) + if err != nil { + return nil, err + } + + ca := &cloudAllocator{ + client: client, + linodeClient: linodeClient, + clusterCIDR: allocatorParams.ClusterCIDRs[0], + cidrSet: cidrSet, + nodeLister: nodeInformer.Lister(), + nodesSynced: nodeInformer.Informer().HasSynced, + broadcaster: eventBroadcaster, + recorder: recorder, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any](), "cidrallocator_node"), + nodeCIDRMaskSizeIPv6: allocatorParams.NodeCIDRMaskSizes[1], + disableIPv6NodeCIDRAllocation: allocatorParams.DisableIPv6NodeCIDRAllocation, + } + + if allocatorParams.ServiceCIDR != nil { + ca.filterOutServiceRange(logger, allocatorParams.ServiceCIDR) + } else { + logger.Info("No Service CIDR provided. Skipping filtering out service addresses") + } + + if allocatorParams.SecondaryServiceCIDR != nil { + ca.filterOutServiceRange(logger, allocatorParams.SecondaryServiceCIDR) + } else { + logger.Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses") + } + + if nodeList != nil { + for _, node := range nodeList.Items { + if len(node.Spec.PodCIDRs) == 0 { + logger.V(4).Info("Node has no CIDR, ignoring", "node", klog.KObj(&node)) + continue + } + logger.V(4).Info("Node has CIDR, occupying it in CIDR map", "node", klog.KObj(&node), "podCIDR", node.Spec.PodCIDR) + if err := ca.occupyCIDRs(ctx, &node); err != nil { + // This will happen if: + // 1. We find garbage in the podCIDRs field. Retrying is useless. + // 2. CIDR out of range: This means a node CIDR has changed. + // This error will keep crashing controller-manager. + return nil, err + } + } + } + + if _, err := nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err == nil { + ca.queue.Add(key) + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err == nil { + ca.queue.Add(key) + } + }, + DeleteFunc: func(obj interface{}) { + // The informer cache no longer has the object, and since Node doesn't have a finalizer, + // we don't see the Update with DeletionTimestamp != 0. + node, ok := obj.(*v1.Node) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj)) + return + } + node, ok = tombstone.Obj.(*v1.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object types: %v", obj)) + return + } + } + if err := ca.ReleaseCIDR(logger, node); err != nil { + utilruntime.HandleError(fmt.Errorf("error while processing CIDR Release: %w", err)) + } + }, + }); err != nil { + logger.Error(err, "Failed to add event handler to node informer") + return nil, err + } + + return ca, nil +} + +func (c *cloudAllocator) Run(ctx context.Context) { + defer utilruntime.HandleCrash() + + // Start event processing pipeline. + c.broadcaster.StartStructuredLogging(3) + logger := klog.FromContext(ctx) + logger.Info("Sending events to api server") + c.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")}) + defer c.broadcaster.Shutdown() + + defer c.queue.ShutDown() + + logger.Info("Starting linode's cloud CIDR allocator") + defer logger.Info("Shutting down linode's cloud CIDR allocator") + + if !cache.WaitForNamedCacheSync("cidrallocator", ctx.Done(), c.nodesSynced) { + return + } + + for i := 0; i < cidrUpdateWorkers; i++ { + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + } + + <-ctx.Done() +} + +// runWorker is a long-running function that will continually call the +// processNextWorkItem function in order to read and process a message on the +// queue. +func (c *cloudAllocator) runWorker(ctx context.Context) { + for c.processNextNodeWorkItem(ctx) { + } +} + +// processNextWorkItem will read a single work item off the queue and +// attempt to process it, by calling the syncHandler. +func (c *cloudAllocator) processNextNodeWorkItem(ctx context.Context) bool { + obj, shutdown := c.queue.Get() + if shutdown { + return false + } + + // We wrap this block in a func so we can defer r.queue.Done. + err := func(logger klog.Logger, obj interface{}) error { + // We call Done here so the workNodeQueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the queue and attempted again after a back-off + // period. + defer c.queue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workNodeQueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workNodeQueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workNodeQueue. + if key, ok = obj.(string); !ok { + // As the item in the workNodeQueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.queue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workNodeQueue but got %#v", obj)) + return nil + } + // Run the syncHandler, passing it the namespace/name string of the + // Foo resource to be synced. + if err := c.syncNode(ctx, key); err != nil { + // Put the item back on the queue to handle any transient errors. + c.queue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queue again until another change happens. + c.queue.Forget(obj) + logger.V(4).Info("Successfully synced", "key", key) + return nil + }(klog.FromContext(ctx), obj) + if err != nil { + utilruntime.HandleError(err) + return true + } + + return true +} + +func (c *cloudAllocator) syncNode(ctx context.Context, key string) error { + logger := klog.FromContext(ctx) + startTime := time.Now() + defer func() { + logger.V(4).Info("Finished syncing Node request", "node", key, "elapsed", time.Since(startTime)) + }() + + node, err := c.nodeLister.Get(key) + if apierrors.IsNotFound(err) { + logger.V(3).Info("node has been deleted", "node", key) + // TODO: obtain the node object information to call ReleaseCIDR from here + // and retry if there is an error. + return nil + } + if err != nil { + return err + } + // Check the DeletionTimestamp to determine if object is under deletion. + if !node.DeletionTimestamp.IsZero() { + logger.V(3).Info("node is being deleted", "node", key) + return nil + } + return c.AllocateOrOccupyCIDR(ctx, node) +} + +// marks node.PodCIDRs[...] as used in allocator's tracked cidrSet +func (c *cloudAllocator) occupyCIDRs(ctx context.Context, node *v1.Node) error { + if len(node.Spec.PodCIDRs) == 0 { + return nil + } + logger := klog.FromContext(ctx) + for idx, cidr := range node.Spec.PodCIDRs { + _, podCIDR, err := netutils.ParseCIDRSloppy(cidr) + if err != nil { + return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, cidr) + } + // IPv6 CIDRs are allocated from node-specific ranges + // We don't track them in the cidrSet + if podCIDR.IP.To4() == nil { + logger.V(4).Info("Nothing to occupy for IPv6 CIDR", "cidr", podCIDR) + return nil + } + // If node has a pre allocate cidr that does not exist in our cidrs. + // This will happen if cluster went from dualstack(multi cidrs) to non-dualstack + // then we have now way of locking it + if idx >= 1 { + return fmt.Errorf("node:%s has an allocated cidr: %v at index:%v that does not exist in cluster cidrs configuration", node.Name, cidr, idx) + } + + if err := c.cidrSet.Occupy(podCIDR); err != nil { + return fmt.Errorf("failed to mark cidr[%v] at idx [%v] as occupied for node: %v: %w", podCIDR, idx, node.Name, err) + } + } + return nil +} + +// getIPv6RangeFromInterface extracts the IPv6 range from a Linode instance configuration interface. +func getIPv6RangeFromInterface(iface linodego.InstanceConfigInterface) string { + if ipv6 := iface.IPv6; ipv6 != nil { + if len(ipv6.SLAAC) > 0 { + return ipv6.SLAAC[0].Range + } + if len(ipv6.Ranges) > 0 { + return ipv6.Ranges[0].Range + } + } + return "" +} + +// allocateIPv6CIDR allocates an IPv6 CIDR for the given node. +// It retrieves the instance configuration for the node and extracts the IPv6 range. +// It then creates a new net.IPNet with the IPv6 address and mask size defined +// by nodeCIDRMaskSizeIPv6. The function returns an error if it fails to retrieve +// the instance configuration or parse the IPv6 range. +func (c *cloudAllocator) allocateIPv6CIDR(ctx context.Context, node *v1.Node) (*net.IPNet, error) { + if node.Spec.ProviderID == "" { + return nil, fmt.Errorf("node %s has no ProviderID set, cannot calculate ipv6 range for it", node.Name) + } + // Extract the Linode ID from the ProviderID + if !strings.HasPrefix(node.Spec.ProviderID, providerIDPrefix) { + return nil, fmt.Errorf("node %s has invalid ProviderID %s, expected prefix '%s'", node.Name, node.Spec.ProviderID, providerIDPrefix) + } + // Parse the Linode ID from the ProviderID + id, err := strconv.Atoi(strings.TrimPrefix(node.Spec.ProviderID, providerIDPrefix)) + if err != nil { + return nil, fmt.Errorf("failed to parse Linode ID from ProviderID %s: %w", node.Spec.ProviderID, err) + } + // Retrieve the instance configuration for the Linode ID + configs, err := c.linodeClient.ListInstanceConfigs(ctx, id, &linodego.ListOptions{}) + if err != nil || len(configs) == 0 { + return nil, fmt.Errorf("failed to list instance configs: %w", err) + } + + ipv6Range := "" + for _, iface := range configs[0].Interfaces { + if iface.Purpose == linodego.InterfacePurposeVPC { + ipv6Range = getIPv6RangeFromInterface(iface) + if ipv6Range != "" { + break + } + } + } + + if ipv6Range == "" { + return nil, fmt.Errorf("failed to find ipv6 range in instance config: %v", configs[0]) + } + + ip, _, err := net.ParseCIDR(ipv6Range) + if err != nil { + return nil, fmt.Errorf("failed parsing ipv6 range %s: %w", ipv6Range, err) + } + + mask := net.CIDRMask(c.nodeCIDRMaskSizeIPv6, 128) + ipv6Embedded := &net.IPNet{ + IP: ip.Mask(mask), + Mask: mask, + } + + return ipv6Embedded, nil +} + +// WARNING: If you're adding any return calls or defer any more work from this +// function you have to make sure to update nodesInProcessing properly with the +// disposition of the node when the work is done. +func (c *cloudAllocator) AllocateOrOccupyCIDR(ctx context.Context, node *v1.Node) error { + if node == nil { + return nil + } + + if len(node.Spec.PodCIDRs) > 0 { + return c.occupyCIDRs(ctx, node) + } + + logger := klog.FromContext(ctx) + allocatedCIDRs := make([]*net.IPNet, 2) + + podCIDR, err := c.cidrSet.AllocateNext() + if err != nil { + controllerutil.RecordNodeStatusChange(logger, c.recorder, node, "CIDRNotAvailable") + return fmt.Errorf("failed to allocate cidr from cluster cidr: %w", err) + } + allocatedCIDRs[0] = podCIDR + + // If IPv6 CIDR allocation is disabled, log and return early. + if c.disableIPv6NodeCIDRAllocation { + logger.V(4).Info("IPv6 CIDR allocation disabled; using only IPv4", "node", klog.KObj(node)) + // remove the second CIDR from the allocatedCIDRs slice + // since we are not allocating IPv6 CIDR + allocatedCIDRs = allocatedCIDRs[:1] + return c.enqueueCIDRUpdate(ctx, node.Name, allocatedCIDRs) + } + // Allocate IPv6 CIDR for the node. + logger.V(4).Info("Allocating IPv6 CIDR", "node", klog.KObj(node)) + if allocatedCIDRs[1], err = c.allocateIPv6CIDR(ctx, node); err != nil { + return fmt.Errorf("failed to assign IPv6 CIDR: %w", err) + } + + return c.enqueueCIDRUpdate(ctx, node.Name, allocatedCIDRs) +} + +// enqueueCIDRUpdate adds the node name and CIDRs to the work queue for processing. +func (c *cloudAllocator) enqueueCIDRUpdate(ctx context.Context, nodeName string, cidrs []*net.IPNet) error { + logger := klog.FromContext(ctx) + logger.V(4).Info("Putting node with CIDR into the work queue", "node", nodeName, "CIDR", cidrs) + return c.updateCIDRsAllocation(ctx, nodeName, cidrs) +} + +// ReleaseCIDR marks node.podCIDRs[...] as unused in our tracked cidrSets +func (c *cloudAllocator) ReleaseCIDR(logger klog.Logger, node *v1.Node) error { + if node == nil || len(node.Spec.PodCIDRs) == 0 { + return nil + } + + for idx, cidr := range node.Spec.PodCIDRs { + _, podCIDR, err := netutils.ParseCIDRSloppy(cidr) + if err != nil { + return fmt.Errorf("failed to parse CIDR %s on Node %v: %w", cidr, node.Name, err) + } + if podCIDR.IP.To4() == nil { + logger.V(4).Info("Nothing to release for IPv6 CIDR", "cidr", podCIDR) + continue + } + + // If node has a pre allocate cidr that does not exist in our cidrs. + // This will happen if cluster went from dualstack(multi cidrs) to non-dualstack + // then we have now way of locking it + if idx >= 1 { + return fmt.Errorf("node:%s has an allocated cidr: %v at index:%v that does not exist in cluster cidrs configuration", node.Name, cidr, idx) + } + + logger.V(4).Info("Release CIDR for node", "CIDR", cidr, "node", klog.KObj(node)) + if err = c.cidrSet.Release(podCIDR); err != nil { + return fmt.Errorf("error when releasing CIDR %v: %w", cidr, err) + } + } + return nil +} + +// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used across all cidrs +// so that they won't be assignable. +func (c *cloudAllocator) filterOutServiceRange(logger klog.Logger, serviceCIDR *net.IPNet) { + // Checks if service CIDR has a nonempty intersection with cluster + // CIDR. It is the case if either clusterCIDR contains serviceCIDR with + // clusterCIDR's Mask applied (this means that clusterCIDR contains + // serviceCIDR) or vice versa (which means that serviceCIDR contains + // clusterCIDR). + // if they don't overlap then ignore the filtering + if !c.clusterCIDR.Contains(serviceCIDR.IP.Mask(c.clusterCIDR.Mask)) && !serviceCIDR.Contains(c.clusterCIDR.IP.Mask(serviceCIDR.Mask)) { + return + } + + // at this point, len(cidrSet) == len(clusterCidr) + if err := c.cidrSet.Occupy(serviceCIDR); err != nil { + logger.Error(err, "Error filtering out service cidr out cluster cidr", "CIDR", c.clusterCIDR, "serviceCIDR", serviceCIDR) + } +} + +// updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server. +func (c *cloudAllocator) updateCIDRsAllocation(ctx context.Context, nodeName string, allocatedCIDRs []*net.IPNet) error { + var err error + var node *v1.Node + logger := klog.FromContext(ctx) + cidrsString := ipnetToStringList(allocatedCIDRs) + node, err = c.nodeLister.Get(nodeName) + if err != nil { + logger.Error(err, "Failed while getting node for updating Node.Spec.PodCIDRs", "node", klog.KRef("", nodeName)) + return err + } + + // if cidr list matches the proposed. + // then we possibly updated this node + // and just failed to ack the success. + if len(node.Spec.PodCIDRs) == len(allocatedCIDRs) { + match := true + for idx, cidr := range cidrsString { + if node.Spec.PodCIDRs[idx] != cidr { + match = false + break + } + } + if match { + logger.V(4).Info("Node already has allocated CIDR. It matches the proposed one", "node", klog.KObj(node), "CIDRs", allocatedCIDRs) + return nil + } + } + + // node has cidrs, release the reserved + if len(node.Spec.PodCIDRs) != 0 { + logger.Error(nil, "Node already has a CIDR allocated. Releasing the new one", "node", klog.KObj(node), "podCIDRs", node.Spec.PodCIDRs) + if releaseErr := c.cidrSet.Release(allocatedCIDRs[0]); releaseErr != nil { + logger.Error(releaseErr, "Error when releasing CIDR", "CIDR", allocatedCIDRs[0]) + } + return nil + } + + // If we reached here, it means that the node has no CIDR currently assigned. So we set it. + for i := 0; i < cidrUpdateRetries; i++ { + if err = nodeutil.PatchNodeCIDRs(ctx, c.client, types.NodeName(node.Name), cidrsString); err == nil { + logger.Info("Set node PodCIDR", "node", klog.KObj(node), "podCIDRs", cidrsString) + return nil + } + } + // failed release back to the pool + logger.Error(err, "Failed to update node PodCIDR after multiple attempts", "node", klog.KObj(node), "podCIDRs", cidrsString) + controllerutil.RecordNodeStatusChange(logger, c.recorder, node, "CIDRAssignmentFailed") + // We accept the fact that we may leak CIDRs here. This is safer than releasing + // them in case when we don't know if request went through. + // NodeController restart will return all falsely allocated CIDRs to the pool. + if !apierrors.IsServerTimeout(err) { + logger.Error(err, "CIDR assignment for node failed. Releasing allocated CIDR", "node", klog.KObj(node)) + if releaseErr := c.cidrSet.Release(allocatedCIDRs[0]); releaseErr != nil { + logger.Error(releaseErr, "Error releasing allocated CIDR for node", "node", klog.KObj(node)) + } + } + return err +} diff --git a/cloud/nodeipam/ipam/cloud_allocator_test.go b/cloud/nodeipam/ipam/cloud_allocator_test.go new file mode 100644 index 00000000..6959423a --- /dev/null +++ b/cloud/nodeipam/ipam/cloud_allocator_test.go @@ -0,0 +1,884 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/appscode/go/wait" + "github.com/golang/mock/gomock" + "github.com/linode/linodego" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" + "k8s.io/kubernetes/pkg/controller/testutil" + "k8s.io/kubernetes/test/utils/ktesting" + netutils "k8s.io/utils/net" + "k8s.io/utils/ptr" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" +) + +type testCase struct { + description string + linodeClient *mocks.MockClient + fakeNodeHandler *testutil.FakeNodeHandler + allocatorParams CIDRAllocatorParams + // key is index of the cidr allocated + expectedAllocatedCIDR map[int]string + allocatedCIDRs map[int][]string + // should controller creation fail? + ctrlCreateFail bool +} + +func TestOccupyPreExistingCIDR(t *testing.T) { + // all tests operate on a single node + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + testCases := []testCase{ + { + description: "success, single stack no node allocation", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") + return []*net.IPNet{clusterCIDRv4} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 112}, + }, + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: false, + }, + { + description: "success, single stack correct node allocation", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.1.0/24"}, + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") + return []*net.IPNet{clusterCIDRv4} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 112}, + }, + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: false, + }, + // failure cases + { + description: "fail, single stack incorrect node allocation", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"172.10.1.0/24"}, + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") + return []*net.IPNet{clusterCIDRv4} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 112}, + }, + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: true, + }, + } + + // test function + tCtx := ktesting.Init(t) + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + // Initialize the cloud allocator. + fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) + nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{}) + _, err := NewLinodeCIDRAllocator(tCtx, tc.linodeClient, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) + if err == nil && tc.ctrlCreateFail { + t.Fatalf("creating cloud allocator was expected to fail, but it did not") + } + if err != nil && !tc.ctrlCreateFail { + t.Fatalf("creating cloud allocator was expected to succeed, but it did not") + } + }) + } +} + +func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { + // Non-parallel test (overrides global var) + oldNodePollInterval := nodePollInterval + nodePollInterval = test.NodePollInterval + defer func() { + nodePollInterval = oldNodePollInterval + }() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + // all tests operate on a single node + testCases := []testCase{ + { + description: "When there's no ServiceCIDR return first CIDR in range", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "172.234.236.211", + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30, 112}, + }, + expectedAllocatedCIDR: map[int]string{ + 0: "127.123.234.0/30", + 1: "2300:5800:2:1::/112", + }, + }, + { + description: "Correctly filter out ServiceCIDR", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "172.234.236.211", + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30, 112}, + }, + // it should return first /30 CIDR after service range + expectedAllocatedCIDR: map[int]string{ + 0: "127.123.234.64/30", + 1: "2300:5800:2:1::/112", + }, + }, + { + description: "Correctly ignore already allocated CIDRs", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "172.234.236.211", + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30, 112}, + }, + allocatedCIDRs: map[int][]string{ + 0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"}, + }, + expectedAllocatedCIDR: map[int]string{ + 0: "127.123.234.76/30", + 1: "2300:5800:2:1::/112", + }, + }, + { + description: "no double counting", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.0.0/24"}, + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "172.234.236.202", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.2.0/24"}, + ProviderID: fmt.Sprintf("%s22345", providerIDPrefix), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "172.234.236.201", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s32345", providerIDPrefix), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "172.234.236.211", + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := netutils.ParseCIDRSloppy("10.10.0.0/22") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 112}, + }, + expectedAllocatedCIDR: map[int]string{ + 0: "10.10.1.0/24", + 1: "2300:5800:2:1::/112", + }, + }, + } + + // test function + _, tCtx := ktesting.NewTestContext(t) + testFunc := func(tc testCase) { + fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) + nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{}) + tc.linodeClient.EXPECT().ListInstanceConfigs(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return([]linodego.InstanceConfig{ + { + Interfaces: []linodego.InstanceConfigInterface{ + { + VPCID: ptr.To(12345), + Purpose: linodego.InterfacePurposeVPC, + IPv6: &linodego.InstanceConfigInterfaceIPv6{ + Ranges: []linodego.InstanceConfigInterfaceIPv6Range{ + { + Range: "2300:5800:2:1::/64", + }, + }, + }, + }, + }, + }, + }, nil) + allocator, err := NewLinodeCIDRAllocator(tCtx, tc.linodeClient, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) + if err != nil { + t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) + return + } + cidrAllocator, ok := allocator.(*cloudAllocator) + if !ok { + t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) + return + } + cidrAllocator.nodesSynced = test.AlwaysReady + cidrAllocator.recorder = testutil.NewFakeRecorder() + go allocator.Run(tCtx) + + // this is a bit of white box testing + // pre allocate the cidrs as per the test + for _, allocatedList := range tc.allocatedCIDRs { + for _, allocated := range allocatedList { + _, cidr, err := netutils.ParseCIDRSloppy(allocated) + if err != nil { + t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) + } + if err = cidrAllocator.cidrSet.Occupy(cidr); err != nil { + t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err) + } + } + } + + updateCount := 0 + for _, node := range tc.fakeNodeHandler.Existing { + if node.Spec.PodCIDRs == nil { + updateCount++ + } + if err := allocator.AllocateOrOccupyCIDR(tCtx, node); err != nil { + t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) + } + } + if updateCount != 1 { + t.Fatalf("test error: all tests must update exactly one node") + } + if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil { + t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) + } + + if len(tc.expectedAllocatedCIDR) == 0 { + // nothing further expected + return + } + for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { + if len(updatedNode.Spec.PodCIDRs) == 0 { + continue // not assigned yet + } + // match + for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { + if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { + t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) + break + } + } + } + } + + // run the test cases + for _, tc := range testCases { + testFunc(tc) + } +} + +func TestAllocateOrOccupyCIDRFailure(t *testing.T) { + testCases := []testCase{ + { + description: "When there's no ServiceCIDR return first CIDR in range", + linodeClient: nil, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30, 112}, + }, + allocatedCIDRs: map[int][]string{ + 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, + }, + }, + } + _, tCtx := ktesting.NewTestContext(t) + testFunc := func(tc testCase) { + // Initialize the cloud allocator. + allocator, err := NewLinodeCIDRAllocator(tCtx, tc.linodeClient, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) + if err != nil { + t.Logf("%v: failed to create NewLinodeCIDRAllocator with error %v", tc.description, err) + } + cloudAllocator, ok := allocator.(*cloudAllocator) + if !ok { + t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) + return + } + cloudAllocator.nodesSynced = test.AlwaysReady + cloudAllocator.recorder = testutil.NewFakeRecorder() + go allocator.Run(tCtx) + + // this is a bit of white box testing + for _, allocatedList := range tc.allocatedCIDRs { + for _, allocated := range allocatedList { + _, cidr, err := netutils.ParseCIDRSloppy(allocated) + if err != nil { + t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, cidr, err) + } + err = cloudAllocator.cidrSet.Occupy(cidr) + if err != nil { + t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, cidr, err) + } + } + } + if err := allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0]); err == nil { + t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) + } + // We don't expect any updates, so just sleep for some time + time.Sleep(time.Second) + if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { + t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) + } + if len(tc.expectedAllocatedCIDR) == 0 { + // nothing further expected + return + } + for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { + if len(updatedNode.Spec.PodCIDRs) == 0 { + continue // not assigned yet + } + // match + for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { + if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR { + t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) + break + } + } + } + } + for _, tc := range testCases { + testFunc(tc) + } +} + +type releaseTestCase struct { + description string + linodeClient *mocks.MockClient + fakeNodeHandler *testutil.FakeNodeHandler + allocatorParams CIDRAllocatorParams + expectedAllocatedCIDRFirstRound map[int]string + expectedAllocatedCIDRSecondRound map[int]string + allocatedCIDRs map[int][]string + cidrsToRelease [][]string +} + +func TestReleaseCIDRSuccess(t *testing.T) { + // Non-parallel test (overrides global var) + oldNodePollInterval := nodePollInterval + nodePollInterval = test.NodePollInterval + defer func() { + nodePollInterval = oldNodePollInterval + }() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + testCases := []releaseTestCase{ + { + description: "Correctly release preallocated CIDR", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "172.234.236.211", + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30, 112}, + }, + allocatedCIDRs: map[int][]string{ + 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, + }, + expectedAllocatedCIDRFirstRound: nil, + cidrsToRelease: [][]string{ + {"127.123.234.4/30"}, + }, + expectedAllocatedCIDRSecondRound: map[int]string{ + 0: "127.123.234.4/30", + }, + }, + { + description: "Correctly recycle CIDR", + linodeClient: client, + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "172.234.236.211", + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30, 112}, + }, + allocatedCIDRs: map[int][]string{ + 0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, + }, + expectedAllocatedCIDRFirstRound: map[int]string{ + 0: "127.123.234.0/30", + }, + cidrsToRelease: [][]string{ + {"127.123.234.0/30"}, + }, + expectedAllocatedCIDRSecondRound: map[int]string{ + 0: "127.123.234.0/30", + }, + }, + } + logger, tCtx := ktesting.NewTestContext(t) + testFunc := func(tc releaseTestCase) { + tc.linodeClient.EXPECT().ListInstanceConfigs(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return([]linodego.InstanceConfig{ + { + Interfaces: []linodego.InstanceConfigInterface{ + { + VPCID: ptr.To(12345), + Purpose: linodego.InterfacePurposeVPC, + IPv6: &linodego.InstanceConfigInterfaceIPv6{ + Ranges: []linodego.InstanceConfigInterfaceIPv6Range{ + { + Range: "2300:5800:2:1::/64", + }, + }, + }, + }, + }, + }, + }, nil) + allocator, _ := NewLinodeCIDRAllocator(tCtx, tc.linodeClient, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) + rangeAllocator, ok := allocator.(*cloudAllocator) + if !ok { + t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) + return + } + rangeAllocator.nodesSynced = test.AlwaysReady + rangeAllocator.recorder = testutil.NewFakeRecorder() + go allocator.Run(tCtx) + + // this is a bit of white box testing + for _, allocatedList := range tc.allocatedCIDRs { + for _, allocated := range allocatedList { + _, cidr, err := netutils.ParseCIDRSloppy(allocated) + if err != nil { + t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) + } + err = rangeAllocator.cidrSet.Occupy(cidr) + if err != nil { + t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err) + } + } + } + + err := allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0]) + if len(tc.expectedAllocatedCIDRFirstRound) != 0 { + if err != nil { + t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) + } + if err = test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { + t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) + } + } else { + if err == nil { + t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) + } + // We don't expect any updates here + time.Sleep(time.Second) + if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { + t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) + } + } + for _, cidrToRelease := range tc.cidrsToRelease { + nodeToRelease := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + } + nodeToRelease.Spec.PodCIDRs = cidrToRelease + err = allocator.ReleaseCIDR(logger, &nodeToRelease) + if err != nil { + t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err) + } + } + if err = allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0]); err != nil { + t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) + } + if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { + t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) + } + + if len(tc.expectedAllocatedCIDRSecondRound) == 0 { + // nothing further expected + return + } + for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { + if len(updatedNode.Spec.PodCIDRs) == 0 { + continue // not assigned yet + } + // match + for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound { + if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { + t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) + break + } + } + } + } + + for _, tc := range testCases { + testFunc(tc) + } +} + +func TestNodeDeletionReleaseCIDR(t *testing.T) { + _, clusterCIDRv4, err := netutils.ParseCIDRSloppy("10.10.0.0/16") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + _, allocatedCIDR, err := netutils.ParseCIDRSloppy("10.10.0.0/24") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testCases := []struct { + description string + nodeKey string + existingNodes []*v1.Node + shouldReleaseCIDR bool + }{ + { + description: "Regular node not under deletion", + nodeKey: "node0", + existingNodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + PodCIDR: allocatedCIDR.String(), + PodCIDRs: []string{allocatedCIDR.String()}, + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + }, + }, + shouldReleaseCIDR: false, + }, + { + description: "Node under deletion", + nodeKey: "node0", + existingNodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Spec: v1.NodeSpec{ + PodCIDR: allocatedCIDR.String(), + PodCIDRs: []string{allocatedCIDR.String()}, + ProviderID: fmt.Sprintf("%s12345", providerIDPrefix), + }, + }, + }, + shouldReleaseCIDR: false, + }, + { + description: "Node deleted", + nodeKey: "node0", + existingNodes: []*v1.Node{}, + shouldReleaseCIDR: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + allocatorParams := CIDRAllocatorParams{ + ClusterCIDRs: []*net.IPNet{clusterCIDRv4}, ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 112}, + } + + fakeNodeHandler := &testutil.FakeNodeHandler{ + Existing: tc.existingNodes, + Clientset: fake.NewSimpleClientset(), + } + _, tCtx := ktesting.NewTestContext(t) + + fakeNodeInformer := test.FakeNodeInformer(fakeNodeHandler) + nodeList, err := fakeNodeHandler.List(tCtx, metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to get list of nodes %v", err) + } + allocator, err := NewLinodeCIDRAllocator(tCtx, nil, fakeNodeHandler, fakeNodeInformer, allocatorParams, nodeList) + if err != nil { + t.Fatalf("failed to create NewLinodeCIDRAllocator: %v", err) + } + cloudAllocator, ok := allocator.(*cloudAllocator) + if !ok { + t.Fatalf("found non-default implementation of CIDRAllocator") + } + cloudAllocator.nodesSynced = test.AlwaysReady + cloudAllocator.recorder = testutil.NewFakeRecorder() + + if err = cloudAllocator.syncNode(tCtx, tc.nodeKey); err != nil { + t.Fatalf("failed to run rangeAllocator.syncNode") + } + + // if the allocated CIDR was released we expect the nextAllocated CIDR to be the same + nextCIDR, err := cloudAllocator.cidrSet.AllocateNext() + if err != nil { + t.Fatalf("unexpected error trying to allocate next CIDR: %v", err) + } + expectedCIDR := "10.10.1.0/24" // existing allocated is 10.0.0.0/24 + if tc.shouldReleaseCIDR { + expectedCIDR = allocatedCIDR.String() // if cidr was released we expect to reuse it. ie: 10.0.0.0/24 + } + if nextCIDR.String() != expectedCIDR { + t.Fatalf("Expected CIDR %s to be allocated next, but got: %v", expectedCIDR, nextCIDR.String()) + } + }) + } +} diff --git a/cloud/nodeipam/ipam/controller_test.go b/cloud/nodeipam/ipam/controller_test.go new file mode 100644 index 00000000..cf497f60 --- /dev/null +++ b/cloud/nodeipam/ipam/controller_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "errors" + "net" + "testing" + + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" +) + +func TestOccupyServiceCIDR(t *testing.T) { + const clusterCIDR = "10.1.0.0/16" + +TestCase: + for _, tc := range []struct { + serviceCIDR string + }{ + {"10.0.255.0/24"}, + {"10.1.0.0/24"}, + {"10.1.255.0/24"}, + {"10.2.0.0/24"}, + } { + serviceCIDR := test.MustParseCIDR(tc.serviceCIDR) + set, err := cidrset.NewCIDRSet(test.MustParseCIDR(clusterCIDR), 24) + if err != nil { + t.Errorf("test case %+v: NewCIDRSet() = %v, want nil", tc, err) + } + if err := occupyServiceCIDR(set, test.MustParseCIDR(clusterCIDR), serviceCIDR); err != nil { + t.Errorf("test case %+v: occupyServiceCIDR() = %v, want nil", tc, err) + } + // Allocate until full. + var cidrs []*net.IPNet + for { + cidr, err := set.AllocateNext() + if err != nil { + if errors.Is(err, cidrset.ErrCIDRRangeNoCIDRsRemaining) { + break + } + t.Errorf("set.AllocateNext() = %v, want %v", err, cidrset.ErrCIDRRangeNoCIDRsRemaining) + continue TestCase + } + cidrs = append(cidrs, cidr) + } + // No allocated CIDR range should intersect with serviceCIDR. + for _, c := range cidrs { + if c.Contains(serviceCIDR.IP) || serviceCIDR.Contains(c.IP) { + t.Errorf("test case %+v: allocated CIDR %v from service range", tc, c) + } + } + } +} diff --git a/cloud/nodeipam/ipam/doc.go b/cloud/nodeipam/ipam/doc.go new file mode 100644 index 00000000..eab481f9 --- /dev/null +++ b/cloud/nodeipam/ipam/doc.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ipam provides different allocators for assigning IP ranges to nodes. +// We currently support several kinds of IPAM allocators (these are denoted by +// the CIDRAllocatorType): +// - RangeAllocator is an allocator that assigns PodCIDRs to nodes and works +// in conjunction with the RouteController to configure the network to get +// connectivity. +// - CloudAllocator is an allocator that synchronizes PodCIDRs from IP +// ranges assignments from the underlying cloud platform. +// - (Alpha only) IPAMFromCluster is an allocator that has the similar +// functionality as the RangeAllocator but also synchronizes cluster-managed +// ranges into the cloud platform. +// - (Alpha only) IPAMFromCloud is the same as CloudAllocator (synchronizes +// from cloud into the cluster.) +package ipam diff --git a/cloud/nodeipam/node_ipam_controller.go b/cloud/nodeipam/node_ipam_controller.go new file mode 100644 index 00000000..56b01e0b --- /dev/null +++ b/cloud/nodeipam/node_ipam_controller.go @@ -0,0 +1,147 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeipam + +import ( + "context" + "fmt" + "net" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + cloudprovider "k8s.io/cloud-provider" + controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers" + "k8s.io/klog/v2" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" + "github.com/linode/linode-cloud-controller-manager/cloud/nodeipam/ipam" +) + +// Controller is the controller that manages node ipam state. +type Controller struct { + allocatorType ipam.CIDRAllocatorType + + cloud cloudprovider.Interface + linodeClient client.Client + clusterCIDRs []*net.IPNet + serviceCIDR *net.IPNet + secondaryServiceCIDR *net.IPNet + kubeClient clientset.Interface + eventBroadcaster record.EventBroadcaster + + nodeLister corelisters.NodeLister + nodeInformerSynced cache.InformerSynced + + cidrAllocator ipam.CIDRAllocator +} + +// NewNodeIpamController returns a new node IP Address Management controller to +// sync instances from cloudprovider. +// This method returns an error if it is unable to initialize the CIDR bitmap with +// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes +// currently, this should be handled as a fatal error. +func NewNodeIpamController( + ctx context.Context, + nodeInformer coreinformers.NodeInformer, + cloud cloudprovider.Interface, + linodeClient client.Client, + kubeClient clientset.Interface, + clusterCIDRs []*net.IPNet, + serviceCIDR *net.IPNet, + secondaryServiceCIDR *net.IPNet, + nodeCIDRMaskSizes []int, + allocatorType ipam.CIDRAllocatorType, + disableIPv6NodeCIDRAllocation bool, +) (*Controller, error) { + if kubeClient == nil { + return nil, fmt.Errorf("kubeClient is nil when starting Controller") + } + + if len(clusterCIDRs) == 0 { + return nil, fmt.Errorf("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set") + } + + for idx, cidr := range clusterCIDRs { + mask := cidr.Mask + if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSizes[idx] { + return nil, fmt.Errorf("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than or equal to --node-cidr-mask-size configured for CIDR family") + } + } + + ic := &Controller{ + cloud: cloud, + linodeClient: linodeClient, + kubeClient: kubeClient, + eventBroadcaster: record.NewBroadcaster(record.WithContext(ctx)), + clusterCIDRs: clusterCIDRs, + serviceCIDR: serviceCIDR, + secondaryServiceCIDR: secondaryServiceCIDR, + allocatorType: allocatorType, + } + + var err error + + allocatorParams := ipam.CIDRAllocatorParams{ + ClusterCIDRs: clusterCIDRs, + ServiceCIDR: ic.serviceCIDR, + SecondaryServiceCIDR: ic.secondaryServiceCIDR, + NodeCIDRMaskSizes: nodeCIDRMaskSizes, + DisableIPv6NodeCIDRAllocation: disableIPv6NodeCIDRAllocation, + } + + ic.cidrAllocator, err = ipam.New(ctx, ic.linodeClient, kubeClient, cloud, nodeInformer, ic.allocatorType, allocatorParams) + if err != nil { + return nil, err + } + + ic.nodeLister = nodeInformer.Lister() + ic.nodeInformerSynced = nodeInformer.Informer().HasSynced + + return ic, nil +} + +// Run starts an asynchronous loop that monitors the status of cluster nodes. +func (nc *Controller) Run(ctx context.Context) { + defer utilruntime.HandleCrash() + + // Start event processing pipeline. + nc.eventBroadcaster.StartStructuredLogging(3) + nc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: nc.kubeClient.CoreV1().Events("")}) + defer nc.eventBroadcaster.Shutdown() + klog.FromContext(ctx).Info("Starting ipam controller") + defer klog.FromContext(ctx).Info("Shutting down ipam controller") + + if !cache.WaitForNamedCacheSync("node", ctx.Done(), nc.nodeInformerSynced) { + return + } + + go nc.cidrAllocator.Run(ctx) + + <-ctx.Done() +} + +// RunWithMetrics is a wrapper for Run that also tracks starting and stopping of the nodeipam controller with additional metric +func (nc *Controller) RunWithMetrics(ctx context.Context, controllerManagerMetrics *controllersmetrics.ControllerManagerMetrics) { + controllerManagerMetrics.ControllerStarted("nodeipam") + defer controllerManagerMetrics.ControllerStopped("nodeipam") + nc.Run(ctx) +} diff --git a/deploy/chart/templates/daemonset.yaml b/deploy/chart/templates/daemonset.yaml index acf99bec..fa9387b3 100644 --- a/deploy/chart/templates/daemonset.yaml +++ b/deploy/chart/templates/daemonset.yaml @@ -68,6 +68,9 @@ spec: {{- if not $clusterCIDR }} {{- fail "clusterCIDR is required if enableNodeIPAM is set" }} {{- end }} + {{- with .Values.disableIPv6NodeCIDRAllocation }} + - --disable-ipv6-node-cidr-allocation={{ . }} + {{- end }} {{- with .Values.nodeCIDRMaskSizeIPv4 }} - --node-cidr-mask-size-ipv4={{ . }} {{- end }} diff --git a/deploy/chart/values.yaml b/deploy/chart/values.yaml index 48629d34..2f55f503 100644 --- a/deploy/chart/values.yaml +++ b/deploy/chart/values.yaml @@ -88,6 +88,7 @@ tolerations: # clusterCIDR: 10.192.0.0/10 # nodeCIDRMaskSizeIPv4: 24 # nodeCIDRMaskSizeIPv6: 64 +# disableIPv6NodeCIDRAllocation: false # vpcs and subnets that node internal IPs will be assigned from (not required if already specified in routeController) # Use one of the two: either [vpcNames and subnetNames] or [vpcIDs and subnetIDs] diff --git a/docs/configuration/environment.md b/docs/configuration/environment.md index b7565773..2a21e955 100644 --- a/docs/configuration/environment.md +++ b/docs/configuration/environment.md @@ -56,6 +56,7 @@ The CCM supports the following flags: | `--node-cidr-mask-size-ipv4` | Int | `24` | ipv4 cidr mask size for pod cidrs allocated to nodes | | `--node-cidr-mask-size-ipv6` | Int | `64` | ipv6 cidr mask size for pod cidrs allocated to nodes | | `--nodebalancer-prefix` | String | `ccm` | Name prefix for NoadBalancers. | +| `--disable-ipv6-node-cidr-allocation` | `false` | disables allocating IPv6 CIDR ranges to nodes when using CCM for node IPAM (set to `true` if IPv6 ranges are not configured on Linode interfaces) | ## Configuration Methods diff --git a/docs/configuration/nodeipam.md b/docs/configuration/nodeipam.md index b093a431..3737d47b 100644 --- a/docs/configuration/nodeipam.md +++ b/docs/configuration/nodeipam.md @@ -21,7 +21,7 @@ Note: Make sure node IPAM allocation is disabled in kube-controller-manager to avoid both controllers competing to assign CIDRs to nodes. To make sure its disabled, check and make sure kube-controller-manager is not started with `--allocate-node-cidrs` flag. ## Allocated subnet size -By default, CCM allocates /24 subnet for ipv4 addresses and /64 for ipv6 addresses to nodes. If one wants different subnet range, it can be configured by using `--node-cidr-mask-size-ipv4` and `--node-cidr-mask-size-ipv6` flags. +By default, CCM allocates /24 subnet for IPv4 addresses and /112 for IPv6 addresses to nodes. For IPv6 CIDR allocation using CCM, linodes should have IPv6 ranges configured on their interfaces. If one wants different subnet range, it can be configured by using `--node-cidr-mask-size-ipv4` and `--node-cidr-mask-size-ipv6` flags. ```yaml spec: @@ -31,7 +31,10 @@ spec: - name: ccm-linode args: - --allocate-node-cidrs=true - - --cluster-cidr=10.192.0.0/10,fd00::/56 + - --cluster-cidr=10.192.0.0/10 - --node-cidr-mask-size-ipv4=25 - --node-cidr-mask-size-ipv6=64 ``` + +## Disabling ipv6 ipam allocation +If one wants to just use ipv4 node ipam allocation for their nodes, they can start CCM with `--disable-ipv6-node-cidr-allocation=true` which disables ipv6 range allocation to nodes. diff --git a/go.mod b/go.mod index 695ba85a..f519d76f 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 github.com/hexdigest/gowrap v1.4.2 - github.com/linode/linodego v1.52.1 + github.com/linode/linodego v1.53.1-0.20250709175023-9b152d30578c github.com/prometheus/client_golang v1.22.0 github.com/spf13/pflag v1.0.7 github.com/stretchr/testify v1.10.0 @@ -25,7 +25,10 @@ require ( k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 ) -require sigs.k8s.io/randfill v1.0.0 // indirect +require ( + github.com/onsi/gomega v1.35.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect +) require ( cel.dev/expr v0.20.0 // indirect @@ -138,16 +141,16 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect - golang.org/x/crypto v0.38.0 // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/net v0.40.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.14.0 // indirect + golang.org/x/sync v0.15.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.25.0 // indirect + golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.30.0 // indirect + golang.org/x/tools v0.33.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect google.golang.org/grpc v1.72.1 // indirect @@ -159,7 +162,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.33.0 // indirect k8s.io/apiserver v0.33.3 // indirect - k8s.io/component-helpers v0.33.3 // indirect + k8s.io/component-helpers v0.33.3 k8s.io/controller-manager v0.33.3 // indirect k8s.io/kms v0.33.3 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect diff --git a/go.sum b/go.sum index 4d578ca5..3f232c98 100644 --- a/go.sum +++ b/go.sum @@ -202,8 +202,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/linode/linodego v1.52.1 h1:HJ1cz1n9n3chRP9UrtqmP91+xTi0Q5l+H/4z4tpkwgQ= -github.com/linode/linodego v1.52.1/go.mod h1:zEN2sX+cSdp67EuRY1HJiyuLujoa7HqvVwNEcJv3iXw= +github.com/linode/linodego v1.53.1-0.20250709175023-9b152d30578c h1:WlZm+YNHBuphycMZG2s2+F04hx2wx1ShuOwPAIInjP8= +github.com/linode/linodego v1.53.1-0.20250709175023-9b152d30578c/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA= github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o= github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY= github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= @@ -391,31 +391,31 @@ golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -434,8 +434,8 @@ golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -444,8 +444,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/main.go b/main.go index 4b7d888b..81e403e8 100644 --- a/main.go +++ b/main.go @@ -100,6 +100,7 @@ func main() { command.Flags().StringVar(&linode.Options.NodeBalancerBackendIPv4SubnetName, "nodebalancer-backend-ipv4-subnet-name", "", "ipv4 subnet name to use for NodeBalancer backends") command.Flags().BoolVar(&linode.Options.DisableNodeBalancerVPCBackends, "disable-nodebalancer-vpc-backends", false, "disables nodebalancer backends in VPCs (when enabled, nodebalancers will only have private IPs as backends for backward compatibility)") command.Flags().StringVar(&linode.Options.NodeBalancerPrefix, "nodebalancer-prefix", "ccm", fmt.Sprintf("Name prefix for NoadBalancers. (max. %v char.)", linode.NodeBalancerPrefixCharLimit)) + command.Flags().BoolVar(&linode.Options.DisableIPv6NodeCIDRAllocation, "disable-ipv6-node-cidr-allocation", false, "disables IPv6 node cidr allocation by ipam controller (when enabled, IPv6 cidr ranges will be allocated to nodes)") // Set static flags command.Flags().VisitAll(func(fl *pflag.Flag) {