diff --git a/.cspell.json b/.cspell.json index b91b4b0d..0591734b 100644 --- a/.cspell.json +++ b/.cspell.json @@ -49,7 +49,8 @@ "cnpg", "pooler", "finalizer", - "superfences" + "superfences", + "tolerations" ], "ignorePaths": [ ".git/**", diff --git a/api/v1alpha1/codercontrolplane_types.go b/api/v1alpha1/codercontrolplane_types.go index d1558622..8ee8acfa 100644 --- a/api/v1alpha1/codercontrolplane_types.go +++ b/api/v1alpha1/codercontrolplane_types.go @@ -53,6 +53,63 @@ type CoderControlPlaneSpec struct { // control plane is ready and re-uploads when the Secret value changes. // +optional LicenseSecretRef *SecretKeySelector `json:"licenseSecretRef,omitempty"` + + // ServiceAccount configures the ServiceAccount for the control plane pod. + // +kubebuilder:default={} + ServiceAccount ServiceAccountSpec `json:"serviceAccount,omitempty"` + // RBAC configures namespace-scoped RBAC for workspace provisioning. + // +kubebuilder:default={} + RBAC RBACSpec `json:"rbac,omitempty"` + + // Resources sets resource requests/limits for the control plane container. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // SecurityContext sets the container security context. + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + // PodSecurityContext sets the pod-level security context. + // +optional + PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + + // TLS configures Coder built-in TLS. + // +kubebuilder:default={} + TLS TLSSpec `json:"tls,omitempty"` + + // ReadinessProbe configures the readiness probe for the control plane container. + // +kubebuilder:default={enabled:true} + ReadinessProbe ProbeSpec `json:"readinessProbe,omitempty"` + // LivenessProbe configures the liveness probe for the control plane container. + // +kubebuilder:default={enabled:false} + LivenessProbe ProbeSpec `json:"livenessProbe,omitempty"` + + // EnvUseClusterAccessURL injects a default CODER_ACCESS_URL when not explicitly set. + // +kubebuilder:default=true + EnvUseClusterAccessURL *bool `json:"envUseClusterAccessURL,omitempty"` + + // Expose configures external exposure via Ingress or Gateway API. + // +optional + Expose *ExposeSpec `json:"expose,omitempty"` + + // +kubebuilder:validation:XValidation:rule="self.all(e, !(has(e.configMapRef) && has(e.secretRef)))",message="each envFrom entry may specify at most one of configMapRef or secretRef" + // EnvFrom injects environment variables from ConfigMaps/Secrets. + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` + // Volumes are additional volumes to add to the pod. + Volumes []corev1.Volume `json:"volumes,omitempty"` + // VolumeMounts are additional volume mounts for the control plane container. + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + // Certs configures additional CA certificate mounts. + // +kubebuilder:default={} + Certs CertsSpec `json:"certs,omitempty"` + + // NodeSelector constrains pod scheduling to nodes matching labels. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // Tolerations are applied to the control plane pod. + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // Affinity configures pod affinity/anti-affinity rules. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + // TopologySpreadConstraints control pod topology spread. + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` } // OperatorAccessSpec configures the controller-managed coderd operator user. diff --git a/api/v1alpha1/types_shared.go b/api/v1alpha1/types_shared.go index 1ef264fd..1994daf9 100644 --- a/api/v1alpha1/types_shared.go +++ b/api/v1alpha1/types_shared.go @@ -1,6 +1,9 @@ package v1alpha1 -import corev1 "k8s.io/api/core/v1" +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" +) const ( // DefaultTokenSecretKey is the default key used for proxy session tokens. @@ -28,3 +31,131 @@ type SecretKeySelector struct { // Key is the key inside the Secret data map. Key string `json:"key,omitempty"` } + +// ServiceAccountSpec configures the ServiceAccount used by the Coder pod. +type ServiceAccountSpec struct { + // DisableCreate skips ServiceAccount creation (use an existing SA). + // +kubebuilder:default=false + DisableCreate bool `json:"disableCreate,omitempty"` + // Name overrides the ServiceAccount name. Defaults to the CoderControlPlane name. + Name string `json:"name,omitempty"` + // Annotations are applied to the managed ServiceAccount. + Annotations map[string]string `json:"annotations,omitempty"` + // Labels are applied to the managed ServiceAccount. + Labels map[string]string `json:"labels,omitempty"` +} + +// RBACSpec configures namespace-scoped RBAC for workspace provisioning. +type RBACSpec struct { + // WorkspacePerms enables Role/RoleBinding creation for workspace resources. + // When omitted, the default is true. + // +kubebuilder:default=true + WorkspacePerms *bool `json:"workspacePerms,omitempty"` + // EnableDeployments grants apps/deployments permissions (only when WorkspacePerms is true). + // When omitted, the default is true. + // +kubebuilder:default=true + EnableDeployments *bool `json:"enableDeployments,omitempty"` + // ExtraRules are appended to the managed Role rules. + ExtraRules []rbacv1.PolicyRule `json:"extraRules,omitempty"` + // WorkspaceNamespaces lists additional namespaces for Role/RoleBinding creation. + WorkspaceNamespaces []string `json:"workspaceNamespaces,omitempty"` +} + +// TLSSpec configures Coder built-in TLS. +type TLSSpec struct { + // SecretNames lists TLS secrets to mount for built-in TLS. + // When non-empty, TLS is enabled on the Coder control plane. + SecretNames []string `json:"secretNames,omitempty"` +} + +// ProbeSpec configures a Kubernetes probe with an enable toggle. +type ProbeSpec struct { + // Enabled toggles the probe on or off. + // When omitted, readiness defaults to enabled while liveness defaults to disabled. + Enabled *bool `json:"enabled,omitempty"` + // InitialDelaySeconds is the delay before the probe starts. + // +kubebuilder:default=0 + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` + // PeriodSeconds controls how often the probe is performed. + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` + // TimeoutSeconds is the probe timeout. + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` + // SuccessThreshold is the minimum consecutive successes for the probe to be considered successful. + SuccessThreshold *int32 `json:"successThreshold,omitempty"` + // FailureThreshold is the minimum consecutive failures for the probe to be considered failed. + FailureThreshold *int32 `json:"failureThreshold,omitempty"` +} + +// ExposeSpec configures external exposure for the control plane. +// At most one of Ingress or Gateway may be set. +// +kubebuilder:validation:XValidation:rule="!(has(self.ingress) && has(self.gateway))",message="only one of ingress or gateway may be set" +type ExposeSpec struct { + // Ingress configures a networking.k8s.io/v1 Ingress. + // +optional + Ingress *IngressExposeSpec `json:"ingress,omitempty"` + // Gateway configures a gateway.networking.k8s.io/v1 HTTPRoute. + // +optional + Gateway *GatewayExposeSpec `json:"gateway,omitempty"` +} + +// IngressExposeSpec defines Ingress exposure configuration. +type IngressExposeSpec struct { + // ClassName is the Ingress class name. + ClassName *string `json:"className,omitempty"` + // Host is the primary hostname for the Ingress rule. + Host string `json:"host"` + // WildcardHost is an optional wildcard hostname (e.g., for workspace apps). + WildcardHost string `json:"wildcardHost,omitempty"` + // Annotations are applied to the managed Ingress. + Annotations map[string]string `json:"annotations,omitempty"` + // TLS configures TLS termination at the Ingress. + // +optional + TLS *IngressTLSExposeSpec `json:"tls,omitempty"` +} + +// IngressTLSExposeSpec defines TLS configuration for the Ingress. +type IngressTLSExposeSpec struct { + // SecretName is the TLS Secret for the primary host. + SecretName string `json:"secretName,omitempty"` + // WildcardSecretName is the TLS Secret for the wildcard host. + WildcardSecretName string `json:"wildcardSecretName,omitempty"` +} + +// GatewayExposeSpec defines Gateway API (HTTPRoute) exposure configuration. +type GatewayExposeSpec struct { + // Host is the primary hostname for the HTTPRoute. + Host string `json:"host"` + // WildcardHost is an optional wildcard hostname. + WildcardHost string `json:"wildcardHost,omitempty"` + // ParentRefs are Gateways that the HTTPRoute attaches to. + // At least one parentRef is required when gateway exposure is configured. + // +kubebuilder:validation:MinItems=1 + ParentRefs []GatewayParentRef `json:"parentRefs"` +} + +// GatewayParentRef identifies a Gateway for HTTPRoute attachment. +type GatewayParentRef struct { + // Name is the Gateway name. + Name string `json:"name"` + // Namespace is the Gateway namespace. + // +optional + Namespace *string `json:"namespace,omitempty"` + // SectionName is the listener name within the Gateway. + // +optional + SectionName *string `json:"sectionName,omitempty"` +} + +// CertsSpec configures additional CA certificate mounts. +type CertsSpec struct { + // Secrets lists Secret key selectors for CA certificates. + // Each is mounted at `/etc/ssl/certs/{name}.crt`. + Secrets []CertSecretSelector `json:"secrets,omitempty"` +} + +// CertSecretSelector identifies a key within a Secret for CA cert mounting. +type CertSecretSelector struct { + // Name is the Secret name. + Name string `json:"name"` + // Key is the key within the Secret data map. + Key string `json:"key"` +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 8e91114a..4cee580b 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -7,10 +7,48 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertSecretSelector) DeepCopyInto(out *CertSecretSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertSecretSelector. +func (in *CertSecretSelector) DeepCopy() *CertSecretSelector { + if in == nil { + return nil + } + out := new(CertSecretSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertsSpec) DeepCopyInto(out *CertsSpec) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]CertSecretSelector, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertsSpec. +func (in *CertsSpec) DeepCopy() *CertsSpec { + if in == nil { + return nil + } + out := new(CertsSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CoderControlPlane) DeepCopyInto(out *CoderControlPlane) { *out = *in @@ -104,6 +142,84 @@ func (in *CoderControlPlaneSpec) DeepCopyInto(out *CoderControlPlaneSpec) { *out = new(SecretKeySelector) **out = **in } + in.ServiceAccount.DeepCopyInto(&out.ServiceAccount) + in.RBAC.DeepCopyInto(&out.RBAC) + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.PodSecurityContext != nil { + in, out := &in.PodSecurityContext, &out.PodSecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + in.TLS.DeepCopyInto(&out.TLS) + in.ReadinessProbe.DeepCopyInto(&out.ReadinessProbe) + in.LivenessProbe.DeepCopyInto(&out.LivenessProbe) + if in.EnvUseClusterAccessURL != nil { + in, out := &in.EnvUseClusterAccessURL, &out.EnvUseClusterAccessURL + *out = new(bool) + **out = **in + } + if in.Expose != nil { + in, out := &in.Expose, &out.Expose + *out = new(ExposeSpec) + (*in).DeepCopyInto(*out) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Certs.DeepCopyInto(&out.Certs) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -372,6 +488,130 @@ func (in *CoderWorkspaceProxyList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExposeSpec) DeepCopyInto(out *ExposeSpec) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(IngressExposeSpec) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(GatewayExposeSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposeSpec. +func (in *ExposeSpec) DeepCopy() *ExposeSpec { + if in == nil { + return nil + } + out := new(ExposeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayExposeSpec) DeepCopyInto(out *GatewayExposeSpec) { + *out = *in + if in.ParentRefs != nil { + in, out := &in.ParentRefs, &out.ParentRefs + *out = make([]GatewayParentRef, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayExposeSpec. +func (in *GatewayExposeSpec) DeepCopy() *GatewayExposeSpec { + if in == nil { + return nil + } + out := new(GatewayExposeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayParentRef) DeepCopyInto(out *GatewayParentRef) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.SectionName != nil { + in, out := &in.SectionName, &out.SectionName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayParentRef. +func (in *GatewayParentRef) DeepCopy() *GatewayParentRef { + if in == nil { + return nil + } + out := new(GatewayParentRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressExposeSpec) DeepCopyInto(out *IngressExposeSpec) { + *out = *in + if in.ClassName != nil { + in, out := &in.ClassName, &out.ClassName + *out = new(string) + **out = **in + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(IngressTLSExposeSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressExposeSpec. +func (in *IngressExposeSpec) DeepCopy() *IngressExposeSpec { + if in == nil { + return nil + } + out := new(IngressExposeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressTLSExposeSpec) DeepCopyInto(out *IngressTLSExposeSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLSExposeSpec. +func (in *IngressTLSExposeSpec) DeepCopy() *IngressTLSExposeSpec { + if in == nil { + return nil + } + out := new(IngressTLSExposeSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OperatorAccessSpec) DeepCopyInto(out *OperatorAccessSpec) { *out = *in @@ -388,6 +628,47 @@ func (in *OperatorAccessSpec) DeepCopy() *OperatorAccessSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.PeriodSeconds != nil { + in, out := &in.PeriodSeconds, &out.PeriodSeconds + *out = new(int32) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.SuccessThreshold != nil { + in, out := &in.SuccessThreshold, &out.SuccessThreshold + *out = new(int32) + **out = **in + } + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. +func (in *ProbeSpec) DeepCopy() *ProbeSpec { + if in == nil { + return nil + } + out := new(ProbeSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyBootstrapSpec) DeepCopyInto(out *ProxyBootstrapSpec) { *out = *in @@ -405,6 +686,44 @@ func (in *ProxyBootstrapSpec) DeepCopy() *ProxyBootstrapSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBACSpec) DeepCopyInto(out *RBACSpec) { + *out = *in + if in.WorkspacePerms != nil { + in, out := &in.WorkspacePerms, &out.WorkspacePerms + *out = new(bool) + **out = **in + } + if in.EnableDeployments != nil { + in, out := &in.EnableDeployments, &out.EnableDeployments + *out = new(bool) + **out = **in + } + if in.ExtraRules != nil { + in, out := &in.ExtraRules, &out.ExtraRules + *out = make([]rbacv1.PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkspaceNamespaces != nil { + in, out := &in.WorkspaceNamespaces, &out.WorkspaceNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBACSpec. +func (in *RBACSpec) DeepCopy() *RBACSpec { + if in == nil { + return nil + } + out := new(RBACSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { *out = *in @@ -421,6 +740,36 @@ func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSpec) DeepCopyInto(out *ServiceAccountSpec) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSpec. +func (in *ServiceAccountSpec) DeepCopy() *ServiceAccountSpec { + if in == nil { + return nil + } + out := new(ServiceAccountSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = *in @@ -444,6 +793,27 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSpec) DeepCopyInto(out *TLSSpec) { + *out = *in + if in.SecretNames != nil { + in, out := &in.SecretNames, &out.SecretNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSpec. +func (in *TLSSpec) DeepCopy() *TLSSpec { + if in == nil { + return nil + } + out := new(TLSSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkspaceProxySpec) DeepCopyInto(out *WorkspaceProxySpec) { *out = *in diff --git a/config/crd/bases/coder.com_codercontrolplanes.yaml b/config/crd/bases/coder.com_codercontrolplanes.yaml index 10764eed..ec501caf 100644 --- a/config/crd/bases/coder.com_codercontrolplanes.yaml +++ b/config/crd/bases/coder.com_codercontrolplanes.yaml @@ -39,6 +39,1077 @@ spec: spec: description: CoderControlPlaneSpec defines the desired state of a CoderControlPlane. properties: + affinity: + description: Affinity configures pod affinity/anti-affinity rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + certs: + default: {} + description: Certs configures additional CA certificate mounts. + properties: + secrets: + description: |- + Secrets lists Secret key selectors for CA certificates. + Each is mounted at `/etc/ssl/certs/{name}.crt`. + items: + description: CertSecretSelector identifies a key within a Secret + for CA cert mounting. + properties: + key: + description: Key is the key within the Secret data map. + type: string + name: + description: Name is the Secret name. + type: string + required: + - key + - name + type: object + type: array + type: object + envFrom: + description: EnvFrom injects environment variables from ConfigMaps/Secrets. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-validations: + - message: each envFrom entry may specify at most one of configMapRef + or secretRef + rule: self.all(e, !(has(e.configMapRef) && has(e.secretRef))) + envUseClusterAccessURL: + default: true + description: EnvUseClusterAccessURL injects a default CODER_ACCESS_URL + when not explicitly set. + type: boolean + expose: + description: Expose configures external exposure via Ingress or Gateway + API. + properties: + gateway: + description: Gateway configures a gateway.networking.k8s.io/v1 + HTTPRoute. + properties: + host: + description: Host is the primary hostname for the HTTPRoute. + type: string + parentRefs: + description: |- + ParentRefs are Gateways that the HTTPRoute attaches to. + At least one parentRef is required when gateway exposure is configured. + items: + description: GatewayParentRef identifies a Gateway for HTTPRoute + attachment. + properties: + name: + description: Name is the Gateway name. + type: string + namespace: + description: Namespace is the Gateway namespace. + type: string + sectionName: + description: SectionName is the listener name within + the Gateway. + type: string + required: + - name + type: object + minItems: 1 + type: array + wildcardHost: + description: WildcardHost is an optional wildcard hostname. + type: string + required: + - host + - parentRefs + type: object + ingress: + description: Ingress configures a networking.k8s.io/v1 Ingress. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are applied to the managed Ingress. + type: object + className: + description: ClassName is the Ingress class name. + type: string + host: + description: Host is the primary hostname for the Ingress + rule. + type: string + tls: + description: TLS configures TLS termination at the Ingress. + properties: + secretName: + description: SecretName is the TLS Secret for the primary + host. + type: string + wildcardSecretName: + description: WildcardSecretName is the TLS Secret for + the wildcard host. + type: string + type: object + wildcardHost: + description: WildcardHost is an optional wildcard hostname + (e.g., for workspace apps). + type: string + required: + - host + type: object + type: object + x-kubernetes-validations: + - message: only one of ingress or gateway may be set + rule: '!(has(self.ingress) && has(self.gateway))' extraArgs: description: ExtraArgs are appended to the default Coder server arguments. items: @@ -241,6 +1312,48 @@ spec: required: - name type: object + livenessProbe: + default: + enabled: false + description: LivenessProbe configures the liveness probe for the control + plane container. + properties: + enabled: + description: |- + Enabled toggles the probe on or off. + When omitted, readiness defaults to enabled while liveness defaults to disabled. + type: boolean + failureThreshold: + description: FailureThreshold is the minimum consecutive failures + for the probe to be considered failed. + format: int32 + type: integer + initialDelaySeconds: + default: 0 + description: InitialDelaySeconds is the delay before the probe + starts. + format: int32 + type: integer + periodSeconds: + description: PeriodSeconds controls how often the probe is performed. + format: int32 + type: integer + successThreshold: + description: SuccessThreshold is the minimum consecutive successes + for the probe to be considered successful. + format: int32 + type: integer + timeoutSeconds: + description: TimeoutSeconds is the probe timeout. + format: int32 + type: integer + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector constrains pod scheduling to nodes matching + labels. + type: object operatorAccess: default: {} description: OperatorAccess configures bootstrap API access to the @@ -257,11 +1370,603 @@ spec: API token. type: string type: object + podSecurityContext: + description: PodSecurityContext sets the pod-level security context. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + rbac: + default: {} + description: RBAC configures namespace-scoped RBAC for workspace provisioning. + properties: + enableDeployments: + default: true + description: |- + EnableDeployments grants apps/deployments permissions (only when WorkspacePerms is true). + When omitted, the default is true. + type: boolean + extraRules: + description: ExtraRules are appended to the managed Role rules. + items: + description: |- + PolicyRule holds information that describes a policy rule, but does not contain information + about who the rule applies to or which namespace the rule applies to. + properties: + apiGroups: + description: |- + APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. + items: + type: string + type: array + x-kubernetes-list-type: atomic + nonResourceURLs: + description: |- + NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + items: + type: string + type: array + x-kubernetes-list-type: atomic + resourceNames: + description: ResourceNames is an optional white list of + names that the rule applies to. An empty set means that + everything is allowed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + resources: + description: Resources is a list of resources this rule + applies to. '*' represents all resources. + items: + type: string + type: array + x-kubernetes-list-type: atomic + verbs: + description: Verbs is a list of Verbs that apply to ALL + the ResourceKinds contained in this rule. '*' represents + all verbs. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - verbs + type: object + type: array + workspaceNamespaces: + description: WorkspaceNamespaces lists additional namespaces for + Role/RoleBinding creation. + items: + type: string + type: array + workspacePerms: + default: true + description: |- + WorkspacePerms enables Role/RoleBinding creation for workspace resources. + When omitted, the default is true. + type: boolean + type: object + readinessProbe: + default: + enabled: true + description: ReadinessProbe configures the readiness probe for the + control plane container. + properties: + enabled: + description: |- + Enabled toggles the probe on or off. + When omitted, readiness defaults to enabled while liveness defaults to disabled. + type: boolean + failureThreshold: + description: FailureThreshold is the minimum consecutive failures + for the probe to be considered failed. + format: int32 + type: integer + initialDelaySeconds: + default: 0 + description: InitialDelaySeconds is the delay before the probe + starts. + format: int32 + type: integer + periodSeconds: + description: PeriodSeconds controls how often the probe is performed. + format: int32 + type: integer + successThreshold: + description: SuccessThreshold is the minimum consecutive successes + for the probe to be considered successful. + format: int32 + type: integer + timeoutSeconds: + description: TimeoutSeconds is the probe timeout. + format: int32 + type: integer + type: object replicas: default: 1 description: Replicas is the desired number of control plane pods. format: int32 type: integer + resources: + description: Resources sets resource requests/limits for the control + plane container. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + securityContext: + description: SecurityContext sets the container security context. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object service: default: {} description: Service controls the service created in front of the @@ -283,6 +1988,2224 @@ spec: description: Type controls the Kubernetes service type. type: string type: object + serviceAccount: + default: {} + description: ServiceAccount configures the ServiceAccount for the + control plane pod. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are applied to the managed ServiceAccount. + type: object + disableCreate: + default: false + description: DisableCreate skips ServiceAccount creation (use + an existing SA). + type: boolean + labels: + additionalProperties: + type: string + description: Labels are applied to the managed ServiceAccount. + type: object + name: + description: Name overrides the ServiceAccount name. Defaults + to the CoderControlPlane name. + type: string + type: object + tls: + default: {} + description: TLS configures Coder built-in TLS. + properties: + secretNames: + description: |- + SecretNames lists TLS secrets to mount for built-in TLS. + When non-empty, TLS is enabled on the Coder control plane. + items: + type: string + type: array + type: object + tolerations: + description: Tolerations are applied to the control plane pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints control pod topology spread. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumeMounts: + description: VolumeMounts are additional volume mounts for the control + plane container. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes are additional volumes to add to the pod. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + Users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + properties: + endpoints: + description: endpoints is the endpoint name that details + Glusterfs topology. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + userAnnotations: + additionalProperties: + type: string + description: |- + userAnnotations allow pod authors to pass additional information to + the signer implementation. Kubernetes does not restrict or validate this + metadata in any way. + + These values are copied verbatim into the `spec.unverifiedUserAnnotations` field of + the PodCertificateRequest objects that Kubelet creates. + + Entries are subject to the same validation as object metadata annotations, + with the addition that all keys must be domain-prefixed. No restrictions + are placed on values, except an overall size limitation on the entire field. + + Signers should document the keys and values they support. Signers should + deny requests that contain keys they do not recognize. + type: object + required: + - keyType + - signerName + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array type: object status: description: CoderControlPlaneStatus defines the observed state of a CoderControlPlane. diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 94feb70d..0ebf947a 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -7,32 +7,31 @@ rules: - apiGroups: - "" resources: - - events + - configmaps + - namespaces verbs: - - create - get - list - - patch - watch - apiGroups: - "" resources: - - namespaces + - events verbs: + - create - get - list + - patch - watch - apiGroups: - "" resources: - persistentvolumeclaims - pods - - secrets - - serviceaccounts - - services verbs: - create - delete + - deletecollection - get - list - patch @@ -44,6 +43,20 @@ rules: - pods/log verbs: - get +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - aggregation.coder.com resources: @@ -62,6 +75,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -111,6 +125,30 @@ rules: - patch - update - watch +- apiGroups: + - gateway.networking.k8s.io + resources: + - httproutes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - rbac.authorization.k8s.io resources: diff --git a/docs/design/helm-parity.md b/docs/design/helm-parity.md new file mode 100644 index 00000000..8260d03c --- /dev/null +++ b/docs/design/helm-parity.md @@ -0,0 +1,64 @@ +# Helm Chart Parity Tracking + +This document tracks the mapping between upstream `coder/coder` Helm chart +values and the `CoderControlPlane` CRD fields managed by this operator. + +## Legend + +| Status | Meaning | +|--------|---------| +| ✅ | Implemented in CRD | +| 🚧 | Planned / in progress | +| ❌ | Not planned / out of scope | + +## Phase 1 — Production Readiness + +| Helm Chart Value | CRD Field | Status | Notes | +|------------------|-----------|--------|-------| +| `coder.image.repo` / `coder.image.tag` | `spec.image` | ✅ | Combined as full image reference | +| `coder.replicaCount` | `spec.replicas` | ✅ | | +| `coder.env` | `spec.extraEnv` | ✅ | | +| `coder.service.type` | `spec.service.type` | ✅ | | +| `coder.service.httpNodePort` | `spec.service.port` | ✅ | Port only; nodePort inferred by Kubernetes | +| `coder.service.annotations` | `spec.service.annotations` | ✅ | | +| `coder.serviceAccount.create` | `spec.serviceAccount.disableCreate` | ✅ | Inverted sense | +| `coder.serviceAccount.name` | `spec.serviceAccount.name` | ✅ | | +| `coder.serviceAccount.annotations` | `spec.serviceAccount.annotations` | ✅ | | +| `coder.serviceAccount.labels` | `spec.serviceAccount.labels` | ✅ | | +| `coder.workspaceProxy` | — | ❌ | Workspace proxy mode not in scope | +| `coder.resources` | `spec.resources` | ✅ | | +| `coder.securityContext` | `spec.securityContext` | ✅ | Container-level | +| `coder.podSecurityContext` | `spec.podSecurityContext` | ✅ | Pod-level | +| `coder.tls.secretNames` | `spec.tls.secretNames` | ✅ | Enables Coder built-in TLS | +| `coder.readinessProbe` | `spec.readinessProbe` | ✅ | | +| `coder.livenessProbe` | `spec.livenessProbe` | ✅ | | +| `coder.env` (`CODER_ACCESS_URL`) | `spec.envUseClusterAccessURL` | ✅ | Auto-injects default in-cluster URL | +| `coder.rbac.createWorkspacePerms` | `spec.rbac.workspacePerms` | ✅ | | +| `coder.rbac.enableDeployments` | `spec.rbac.enableDeployments` | ✅ | | +| `coder.rbac.extraRules` | `spec.rbac.extraRules` | ✅ | | + +## Phase 2 — Operability & HA + +| Helm Chart Value | CRD Field | Status | Notes | +|------------------|-----------|--------|-------| +| `coder.envFrom` | `spec.envFrom` | ✅ | | +| `coder.volumes` | `spec.volumes` | ✅ | | +| `coder.volumeMounts` | `spec.volumeMounts` | ✅ | | +| `coder.certs.secrets` | `spec.certs.secrets` | ✅ | CA cert Secret selectors | +| `coder.nodeSelector` | `spec.nodeSelector` | ✅ | | +| `coder.tolerations` | `spec.tolerations` | ✅ | | +| `coder.affinity` | `spec.affinity` | ✅ | | +| `coder.topologySpreadConstraints` | `spec.topologySpreadConstraints` | ✅ | | +| `coder.ingress.*` | `spec.expose.ingress` | ✅ | Part of unified expose API | +| Gateway API | `spec.expose.gateway` | ✅ | HTTPRoute; Gateway CRDs optional | +| `coder.imagePullSecrets` | `spec.imagePullSecrets` | ✅ | | + +## Not Planned + +| Helm Chart Value | Reason | +|------------------|--------| +| `coder.workspaceProxy` | Workspace proxy mode is a separate concern | +| `coder.podDisruptionBudget` | Future enhancement | +| `coder.initContainers` | Future enhancement | +| `coder.command` | Not safe to override in operator mode | +| `provisionerDaemon.*` | Separate provisioner deployment (future) | diff --git a/docs/reference/api/codercontrolplane.md b/docs/reference/api/codercontrolplane.md index c373aeb8..a927920e 100644 --- a/docs/reference/api/codercontrolplane.md +++ b/docs/reference/api/codercontrolplane.md @@ -21,6 +21,24 @@ | `imagePullSecrets` | [LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#localobjectreference-v1-core) array | ImagePullSecrets are used by the pod to pull private images. | | `operatorAccess` | [OperatorAccessSpec](#operatoraccessspec) | OperatorAccess configures bootstrap API access to the coderd instance. | | `licenseSecretRef` | [SecretKeySelector](#secretkeyselector) | LicenseSecretRef references a Secret key containing a Coder Enterprise license JWT. When set, the controller uploads the license after the control plane is ready and re-uploads when the Secret value changes. | +| `serviceAccount` | [ServiceAccountSpec](#serviceaccountspec) | ServiceAccount configures the ServiceAccount for the control plane pod. | +| `rbac` | [RBACSpec](#rbacspec) | RBAC configures namespace-scoped RBAC for workspace provisioning. | +| `resources` | [ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#resourcerequirements-v1-core) | Resources sets resource requests/limits for the control plane container. | +| `securityContext` | [SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#securitycontext-v1-core) | SecurityContext sets the container security context. | +| `podSecurityContext` | [PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#podsecuritycontext-v1-core) | PodSecurityContext sets the pod-level security context. | +| `tls` | [TLSSpec](#tlsspec) | TLS configures Coder built-in TLS. | +| `readinessProbe` | [ProbeSpec](#probespec) | ReadinessProbe configures the readiness probe for the control plane container. | +| `livenessProbe` | [ProbeSpec](#probespec) | LivenessProbe configures the liveness probe for the control plane container. | +| `envUseClusterAccessURL` | boolean | EnvUseClusterAccessURL injects a default CODER_ACCESS_URL when not explicitly set. | +| `expose` | [ExposeSpec](#exposespec) | Expose configures external exposure via Ingress or Gateway API. | +| `envFrom` | [EnvFromSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#envfromsource-v1-core) array | EnvFrom injects environment variables from ConfigMaps/Secrets. | +| `volumes` | [Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#volume-v1-core) array | Volumes are additional volumes to add to the pod. | +| `volumeMounts` | [VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#volumemount-v1-core) array | VolumeMounts are additional volume mounts for the control plane container. | +| `certs` | [CertsSpec](#certsspec) | Certs configures additional CA certificate mounts. | +| `nodeSelector` | object (keys:string, values:string) | NodeSelector constrains pod scheduling to nodes matching labels. | +| `tolerations` | [Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#toleration-v1-core) array | Tolerations are applied to the control plane pod. | +| `affinity` | [Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#affinity-v1-core) | Affinity configures pod affinity/anti-affinity rules. | +| `topologySpreadConstraints` | [TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#topologyspreadconstraint-v1-core) array | TopologySpreadConstraints control pod topology spread. | ## Status @@ -41,6 +59,75 @@ ## Referenced types +### CertSecretSelector + +CertSecretSelector identifies a key within a Secret for CA cert mounting. + +| Field | Type | Description | +| --- | --- | --- | +| `name` | string | Name is the Secret name. | +| `key` | string | Key is the key within the Secret data map. | + +### CertsSpec + +CertsSpec configures additional CA certificate mounts. + +| Field | Type | Description | +| --- | --- | --- | +| `secrets` | [CertSecretSelector](#certsecretselector) array | Secrets lists Secret key selectors for CA certificates. Each is mounted at `/etc/ssl/certs/\{name\}.crt`. | + +### ExposeSpec + +ExposeSpec configures external exposure for the control plane. +At most one of Ingress or Gateway may be set. ++kubebuilder:validation:XValidation:rule="!(has(self.ingress) && has(self.gateway))",message="only one of ingress or gateway may be set" + +| Field | Type | Description | +| --- | --- | --- | +| `ingress` | [IngressExposeSpec](#ingressexposespec) | Ingress configures a networking.k8s.io/v1 Ingress. | +| `gateway` | [GatewayExposeSpec](#gatewayexposespec) | Gateway configures a gateway.networking.k8s.io/v1 HTTPRoute. | + +### GatewayExposeSpec + +GatewayExposeSpec defines Gateway API (HTTPRoute) exposure configuration. + +| Field | Type | Description | +| --- | --- | --- | +| `host` | string | Host is the primary hostname for the HTTPRoute. | +| `wildcardHost` | string | WildcardHost is an optional wildcard hostname. | +| `parentRefs` | [GatewayParentRef](#gatewayparentref) array | ParentRefs are Gateways that the HTTPRoute attaches to. At least one parentRef is required when gateway exposure is configured. | + +### GatewayParentRef + +GatewayParentRef identifies a Gateway for HTTPRoute attachment. + +| Field | Type | Description | +| --- | --- | --- | +| `name` | string | Name is the Gateway name. | +| `namespace` | string | Namespace is the Gateway namespace. | +| `sectionName` | string | SectionName is the listener name within the Gateway. | + +### IngressExposeSpec + +IngressExposeSpec defines Ingress exposure configuration. + +| Field | Type | Description | +| --- | --- | --- | +| `className` | string | ClassName is the Ingress class name. | +| `host` | string | Host is the primary hostname for the Ingress rule. | +| `wildcardHost` | string | WildcardHost is an optional wildcard hostname (e.g., for workspace apps). | +| `annotations` | object (keys:string, values:string) | Annotations are applied to the managed Ingress. | +| `tls` | [IngressTLSExposeSpec](#ingresstlsexposespec) | TLS configures TLS termination at the Ingress. | + +### IngressTLSExposeSpec + +IngressTLSExposeSpec defines TLS configuration for the Ingress. + +| Field | Type | Description | +| --- | --- | --- | +| `secretName` | string | SecretName is the TLS Secret for the primary host. | +| `wildcardSecretName` | string | WildcardSecretName is the TLS Secret for the wildcard host. | + ### OperatorAccessSpec OperatorAccessSpec configures the controller-managed coderd operator user. @@ -50,6 +137,30 @@ OperatorAccessSpec configures the controller-managed coderd operator user. | `disabled` | boolean | Disabled turns off creation and management of the `coder-k8s-operator` user and API token. | | `generatedTokenSecretName` | string | GeneratedTokenSecretName stores the generated operator API token. | +### ProbeSpec + +ProbeSpec configures a Kubernetes probe with an enable toggle. + +| Field | Type | Description | +| --- | --- | --- | +| `enabled` | boolean | Enabled toggles the probe on or off. When omitted, readiness defaults to enabled while liveness defaults to disabled. | +| `initialDelaySeconds` | integer | InitialDelaySeconds is the delay before the probe starts. | +| `periodSeconds` | integer | PeriodSeconds controls how often the probe is performed. | +| `timeoutSeconds` | integer | TimeoutSeconds is the probe timeout. | +| `successThreshold` | integer | SuccessThreshold is the minimum consecutive successes for the probe to be considered successful. | +| `failureThreshold` | integer | FailureThreshold is the minimum consecutive failures for the probe to be considered failed. | + +### RBACSpec + +RBACSpec configures namespace-scoped RBAC for workspace provisioning. + +| Field | Type | Description | +| --- | --- | --- | +| `workspacePerms` | boolean | WorkspacePerms enables Role/RoleBinding creation for workspace resources. When omitted, the default is true. | +| `enableDeployments` | boolean | EnableDeployments grants apps/deployments permissions (only when WorkspacePerms is true). When omitted, the default is true. | +| `extraRules` | [PolicyRule](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#policyrule-v1-rbac) array | ExtraRules are appended to the managed Role rules. | +| `workspaceNamespaces` | string array | WorkspaceNamespaces lists additional namespaces for Role/RoleBinding creation. | + ### SecretKeySelector SecretKeySelector identifies a key in a Secret. @@ -59,6 +170,17 @@ SecretKeySelector identifies a key in a Secret. | `name` | string | Name is the Kubernetes Secret name. | | `key` | string | Key is the key inside the Secret data map. | +### ServiceAccountSpec + +ServiceAccountSpec configures the ServiceAccount used by the Coder pod. + +| Field | Type | Description | +| --- | --- | --- | +| `disableCreate` | boolean | DisableCreate skips ServiceAccount creation (use an existing SA). | +| `name` | string | Name overrides the ServiceAccount name. Defaults to the CoderControlPlane name. | +| `annotations` | object (keys:string, values:string) | Annotations are applied to the managed ServiceAccount. | +| `labels` | object (keys:string, values:string) | Labels are applied to the managed ServiceAccount. | + ### ServiceSpec ServiceSpec defines the Service configuration reconciled by the operator. @@ -69,6 +191,14 @@ ServiceSpec defines the Service configuration reconciled by the operator. | `port` | integer | Port controls the exposed service port. | | `annotations` | object (keys:string, values:string) | Annotations are applied to the reconciled service object. | +### TLSSpec + +TLSSpec configures Coder built-in TLS. + +| Field | Type | Description | +| --- | --- | --- | +| `secretNames` | string array | SecretNames lists TLS secrets to mount for built-in TLS. When non-empty, TLS is enabled on the Coder control plane. | + ## Source - Go type: `api/v1alpha1/codercontrolplane_types.go` diff --git a/go.mod b/go.mod index 5a792883..7f0a8872 100644 --- a/go.mod +++ b/go.mod @@ -10,12 +10,14 @@ require ( github.com/stretchr/testify v1.11.1 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da k8s.io/api v0.35.0 + k8s.io/apiextensions-apiserver v0.35.0 k8s.io/apimachinery v0.36.0-alpha.1 k8s.io/apiserver v0.35.0 k8s.io/client-go v0.35.0 k8s.io/code-generator v0.35.0 k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 sigs.k8s.io/controller-runtime v0.23.1 + sigs.k8s.io/gateway-api v1.4.1 ) tool ( @@ -130,7 +132,7 @@ require ( github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect github.com/ebitengine/purego v0.9.1 // indirect github.com/elastic/crd-ref-docs v0.3.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/ettle/strcase v0.2.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fatih/color v1.18.0 // indirect @@ -148,7 +150,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.21.2 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect @@ -273,7 +275,7 @@ require ( github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.67.4 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/quasilyte/go-ruleguard v0.4.5 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.23 // indirect @@ -397,7 +399,6 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.6.1 // indirect - k8s.io/apiextensions-apiserver v0.35.0 // indirect k8s.io/component-base v0.35.0 // indirect k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/go.sum b/go.sum index 13deedb7..8d40fd56 100644 --- a/go.sum +++ b/go.sum @@ -236,8 +236,8 @@ github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/elastic/crd-ref-docs v0.3.0 h1:9bGSUkBR56Z7TuDGQAu3KGbBkagwwZ6RkZmS+qvDuDM= github.com/elastic/crd-ref-docs v0.3.0/go.mod h1:8td3UC8CaO5M+G115O3FRKLmplmX+p0EqLMLGM6uNdk= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= @@ -279,8 +279,8 @@ github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonpointer v0.21.2 h1:AqQaNADVwq/VnkCmQg6ogE+M3FOsKTytwges0JdwVuA= +github.com/go-openapi/jsonpointer v0.21.2/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= @@ -655,8 +655,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/quasilyte/go-ruleguard v0.4.5 h1:AGY0tiOT5hJX9BTdx/xBdoCubQUAE2grkqY2lSwvZcA= @@ -1171,6 +1171,8 @@ sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20260209102324-2d3d1ad sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20260209102324-2d3d1ad92e9a/go.mod h1:IlbgWQkCYbpbkygXxnd/sLJWL9WQk9NvvVhkJCm5P7Q= sigs.k8s.io/controller-tools v0.20.0 h1:VWZF71pwSQ2lZZCt7hFGJsOfDc5dVG28/IysjjMWXL8= sigs.k8s.io/controller-tools v0.20.0/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU= +sigs.k8s.io/gateway-api v1.4.1 h1:NPxFutNkKNa8UfLd2CMlEuhIPMQgDQ6DXNKG9sHbJU8= +sigs.k8s.io/gateway-api v1.4.1/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= diff --git a/internal/app/sharedscheme/sharedscheme.go b/internal/app/sharedscheme/sharedscheme.go index 136de400..7fb29e9b 100644 --- a/internal/app/sharedscheme/sharedscheme.go +++ b/internal/app/sharedscheme/sharedscheme.go @@ -5,6 +5,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" aggregationv1alpha1 "github.com/coder/coder-k8s/api/aggregation/v1alpha1" coderv1alpha1 "github.com/coder/coder-k8s/api/v1alpha1" @@ -16,5 +17,6 @@ func New() *runtime.Scheme { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(coderv1alpha1.AddToScheme(scheme)) utilruntime.Must(aggregationv1alpha1.AddToScheme(scheme)) + utilruntime.Must(gatewayv1.Install(scheme)) return scheme } diff --git a/internal/controller/codercontrolplane_controller.go b/internal/controller/codercontrolplane_controller.go index faab84a2..97e5917d 100644 --- a/internal/controller/codercontrolplane_controller.go +++ b/internal/controller/codercontrolplane_controller.go @@ -17,11 +17,14 @@ import ( "github.com/coder/coder/v2/codersdk" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/util/retry" @@ -30,15 +33,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" coderv1alpha1 "github.com/coder/coder-k8s/api/v1alpha1" "github.com/coder/coder-k8s/internal/coderbootstrap" ) const ( - defaultCoderImage = "ghcr.io/coder/coder:latest" - defaultControlPlanePort = int32(80) - controlPlaneTargetPort = int32(3000) + defaultCoderImage = "ghcr.io/coder/coder:latest" + defaultControlPlanePort = int32(80) + controlPlaneTargetPort = int32(8080) + controlPlaneTLSTargetPort = int32(8443) postgresConnectionURLEnvVar = "CODER_PG_CONNECTION_URL" @@ -51,8 +56,15 @@ const ( operatorAccessRetryInterval = 30 * time.Second operatorTokenSecretSuffix = "-operator-token" - // #nosec G101 -- this is a field index key, not a credential. - licenseSecretNameFieldIndex = ".spec.licenseSecretRef.name" + workspaceRBACFinalizer = "coder.com/workspace-rbac-cleanup" + workspaceRBACOwnerUIDAnnotation = "coder.com/workspace-rbac-owner-uid" + workspaceRoleNameSuffix = "-workspace-perms" + kubernetesObjectNameMaxLength = 253 + + // #nosec G101 -- these are field index keys, not credentials. + licenseSecretNameFieldIndex = ".spec.licenseSecretRef.name" + envFromConfigMapNameFieldIndex = ".spec.envFrom.configMapRef.name" + envFromSecretNameFieldIndex = ".spec.envFrom.secretRef.name" // #nosec G101 -- this is a field index key, not a credential. licenseConditionReasonApplied = "Applied" licenseConditionReasonPending = "Pending" @@ -61,6 +73,8 @@ const ( licenseConditionReasonNotSupported = "NotSupported" licenseConditionReasonError = "Error" + workspaceRBACDriftRequeueInterval = 2 * time.Minute + gatewayExposureRequeueInterval = 2 * time.Minute licenseUploadRequestTimeout = 30 * time.Second entitlementsStatusRefreshInterval = 2 * time.Minute ) @@ -188,8 +202,15 @@ type CoderControlPlaneReconciler struct { // +kubebuilder:rbac:groups=coder.com,resources=codercontrolplanes/status,verbs=get;update;patch // +kubebuilder:rbac:groups=coder.com,resources=codercontrolplanes/finalizers,verbs=update // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=pods;persistentvolumeclaims,verbs=deletecollection +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=deletecollection +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=gateway.networking.k8s.io,resources=httproutes,verbs=get;list;watch;create;update;patch;delete // Reconcile converges the desired CoderControlPlane spec into Deployment and Service resources. func (r *CoderControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -213,6 +234,21 @@ func (r *CoderControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Re coderControlPlane.Namespace, coderControlPlane.Name, req.Namespace, req.Name) } + if !coderControlPlane.DeletionTimestamp.IsZero() { + return r.finalizeWorkspaceRBAC(ctx, coderControlPlane) + } + + if err := r.ensureWorkspaceRBACFinalizer(ctx, req.NamespacedName, coderControlPlane); err != nil { + return ctrl.Result{}, err + } + + if err := r.reconcileServiceAccount(ctx, coderControlPlane); err != nil { + return ctrl.Result{}, err + } + if err := r.reconcileWorkspaceRBAC(ctx, coderControlPlane); err != nil { + return ctrl.Result{}, err + } + deployment, err := r.reconcileDeployment(ctx, coderControlPlane) if err != nil { return ctrl.Result{}, err @@ -221,6 +257,10 @@ func (r *CoderControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Re if err != nil { return ctrl.Result{}, err } + gatewayExposureNeedsRequeue, err := r.reconcileExposure(ctx, coderControlPlane) + if err != nil { + return ctrl.Result{}, err + } originalStatus := *coderControlPlane.Status.DeepCopy() nextStatus := r.desiredStatus(coderControlPlane, deployment, service) @@ -244,12 +284,620 @@ func (r *CoderControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, err } - return mergeResults(operatorResult, licenseResult, entitlementsResult), nil + result := mergeResults(operatorResult, licenseResult, entitlementsResult) + if requiresWorkspaceRBACDriftRequeue(coderControlPlane) { + result = mergeResults(result, ctrl.Result{RequeueAfter: workspaceRBACDriftRequeueInterval}) + } + if gatewayExposureNeedsRequeue { + result = mergeResults(result, ctrl.Result{RequeueAfter: gatewayExposureRequeueInterval}) + } + + return result, nil +} + +func resolveServiceAccountName(cp *coderv1alpha1.CoderControlPlane) string { + if cp.Spec.ServiceAccount.Name != "" { + return cp.Spec.ServiceAccount.Name + } + return cp.Name +} + +func workspaceRBACScopeHash(coderControlPlane *coderv1alpha1.CoderControlPlane) (string, error) { + if coderControlPlane == nil { + return "", fmt.Errorf("assertion failed: coder control plane must not be nil") + } + if strings.TrimSpace(coderControlPlane.Namespace) == "" { + return "", fmt.Errorf("assertion failed: coder control plane namespace must not be empty") + } + if strings.TrimSpace(coderControlPlane.Name) == "" { + return "", fmt.Errorf("assertion failed: coder control plane name must not be empty") + } + + hasher := fnv.New32a() + _, _ = hasher.Write([]byte(coderControlPlane.Namespace)) + _, _ = hasher.Write([]byte{0}) + _, _ = hasher.Write([]byte(coderControlPlane.Name)) + + return fmt.Sprintf("%08x", hasher.Sum32()), nil +} + +func scopedWorkspaceRBACName(baseName, scopeHash, suffix string) (string, error) { + normalizedBaseName := strings.TrimSpace(baseName) + if normalizedBaseName == "" { + return "", fmt.Errorf("assertion failed: workspace RBAC base name must not be empty") + } + if strings.TrimSpace(scopeHash) == "" { + return "", fmt.Errorf("assertion failed: workspace RBAC scope hash must not be empty") + } + + candidate := fmt.Sprintf("%s-%s%s", normalizedBaseName, scopeHash, suffix) + if len(candidate) <= kubernetesObjectNameMaxLength { + return candidate, nil + } + + available := kubernetesObjectNameMaxLength - len(scopeHash) - len(suffix) - 1 + if available < 1 { + return "", fmt.Errorf("assertion failed: workspace RBAC name prefix capacity must be positive") + } + + truncatedPrefix := normalizedBaseName + if len(truncatedPrefix) > available { + truncatedPrefix = truncatedPrefix[:available] + } + truncatedPrefix = strings.Trim(truncatedPrefix, "-.") + if truncatedPrefix == "" { + truncatedPrefix = "workspace" + } + + result := fmt.Sprintf("%s-%s%s", truncatedPrefix, scopeHash, suffix) + if len(result) > kubernetesObjectNameMaxLength { + return "", fmt.Errorf("assertion failed: workspace RBAC name %q exceeds %d characters", result, kubernetesObjectNameMaxLength) + } + + return result, nil +} + +func workspaceRoleName(coderControlPlane *coderv1alpha1.CoderControlPlane, serviceAccountName string) (string, error) { + scopeHash, err := workspaceRBACScopeHash(coderControlPlane) + if err != nil { + return "", err + } + + return scopedWorkspaceRBACName(serviceAccountName, scopeHash, workspaceRoleNameSuffix) +} + +func workspaceRoleBindingName(coderControlPlane *coderv1alpha1.CoderControlPlane, serviceAccountName string) (string, error) { + scopeHash, err := workspaceRBACScopeHash(coderControlPlane) + if err != nil { + return "", err + } + + return scopedWorkspaceRBACName(serviceAccountName, scopeHash, "") +} + +func boolOrDefault(explicit *bool, defaultValue bool) bool { + if explicit == nil { + return defaultValue + } + + return *explicit +} + +func workspacePermsEnabled(explicit *bool) bool { + return boolOrDefault(explicit, true) +} + +func workspaceDeploymentsEnabled(explicit *bool) bool { + return boolOrDefault(explicit, true) +} + +func controlPlaneTLSEnabled(cp *coderv1alpha1.CoderControlPlane) bool { + if cp == nil { + return false + } + return len(cp.Spec.TLS.SecretNames) > 0 +} + +func httpRouteBackendServicePort(coderControlPlane *coderv1alpha1.CoderControlPlane) (int32, error) { + if coderControlPlane == nil { + return 0, fmt.Errorf("assertion failed: coder control plane must not be nil") + } + + servicePort := coderControlPlane.Spec.Service.Port + if servicePort == 0 { + servicePort = defaultControlPlanePort + } + + if controlPlaneTLSEnabled(coderControlPlane) && servicePort == 443 { + return defaultControlPlanePort, nil + } + + return servicePort, nil +} + +func requiresWorkspaceRBACDriftRequeue(cp *coderv1alpha1.CoderControlPlane) bool { + if cp == nil || !workspacePermsEnabled(cp.Spec.RBAC.WorkspacePerms) { + return false + } + + for _, namespace := range cp.Spec.RBAC.WorkspaceNamespaces { + namespace = strings.TrimSpace(namespace) + if namespace == "" || namespace == cp.Namespace { + continue + } + return true + } + + return false +} + +func workspaceRBACLabels(cp *coderv1alpha1.CoderControlPlane) map[string]string { + labels := maps.Clone(controlPlaneLabels(cp.Name)) + labels["coder.com/control-plane"] = cp.Name + labels["coder.com/control-plane-namespace"] = cp.Namespace + return labels +} + +func workspaceRBACAnnotations(ownerUID string) map[string]string { + return map[string]string{workspaceRBACOwnerUIDAnnotation: ownerUID} +} + +func hasWorkspaceRBACIdentityLabels(object metav1.Object, coderControlPlane *coderv1alpha1.CoderControlPlane) bool { + if object == nil || coderControlPlane == nil { + return false + } + + labels := object.GetLabels() + if labels == nil { + return false + } + + return labels["coder.com/control-plane"] == coderControlPlane.Name && + labels["coder.com/control-plane-namespace"] == coderControlPlane.Namespace +} + +func hasWorkspaceRBACOwnerUID(object metav1.Object, coderControlPlane *coderv1alpha1.CoderControlPlane) bool { + if object == nil || coderControlPlane == nil { + return false + } + + ownerUID := strings.TrimSpace(string(coderControlPlane.UID)) + if ownerUID == "" { + return false + } + + annotations := object.GetAnnotations() + if annotations == nil { + return false + } + + return strings.TrimSpace(annotations[workspaceRBACOwnerUIDAnnotation]) == ownerUID +} + +func isManagedWorkspaceRole( + role *rbacv1.Role, + coderControlPlane *coderv1alpha1.CoderControlPlane, + expectedRoleName string, +) bool { + if role == nil || coderControlPlane == nil { + return false + } + if isOwnedByCoderControlPlane(role, coderControlPlane) { + return true + } + if !hasWorkspaceRBACIdentityLabels(role, coderControlPlane) { + return false + } + if hasWorkspaceRBACOwnerUID(role, coderControlPlane) { + return true + } + + return role.Namespace != coderControlPlane.Namespace && role.Name == expectedRoleName +} + +func isManagedWorkspaceRoleBinding( + roleBinding *rbacv1.RoleBinding, + coderControlPlane *coderv1alpha1.CoderControlPlane, + expectedRoleName string, + expectedRoleBindingName string, + expectedServiceAccountName string, +) bool { + if roleBinding == nil || coderControlPlane == nil { + return false + } + if isOwnedByCoderControlPlane(roleBinding, coderControlPlane) { + return true + } + if !hasWorkspaceRBACIdentityLabels(roleBinding, coderControlPlane) { + return false + } + if hasWorkspaceRBACOwnerUID(roleBinding, coderControlPlane) { + return true + } + if roleBinding.Namespace == coderControlPlane.Namespace { + return false + } + if roleBinding.Name != expectedRoleBindingName { + return false + } + if roleBinding.RoleRef.APIGroup != rbacv1.GroupName || roleBinding.RoleRef.Kind != "Role" || roleBinding.RoleRef.Name != expectedRoleName { + return false + } + if len(roleBinding.Subjects) != 1 { + return false + } + + subject := roleBinding.Subjects[0] + return subject.Kind == rbacv1.ServiceAccountKind && + subject.Name == expectedServiceAccountName && + subject.Namespace == coderControlPlane.Namespace +} + +func (r *CoderControlPlaneReconciler) ensureWorkspaceRBACFinalizer( + ctx context.Context, + namespacedName types.NamespacedName, + coderControlPlane *coderv1alpha1.CoderControlPlane, +) error { + if coderControlPlane == nil { + return fmt.Errorf("assertion failed: coder control plane must not be nil") + } + if coderControlPlane.Name != namespacedName.Name || coderControlPlane.Namespace != namespacedName.Namespace { + return fmt.Errorf("assertion failed: finalizer target %s/%s does not match request %s", coderControlPlane.Namespace, coderControlPlane.Name, namespacedName) + } + if controllerutil.ContainsFinalizer(coderControlPlane, workspaceRBACFinalizer) { + return nil + } + + original := coderControlPlane.DeepCopy() + controllerutil.AddFinalizer(coderControlPlane, workspaceRBACFinalizer) + if err := r.Patch(ctx, coderControlPlane, client.MergeFrom(original)); err != nil { + return fmt.Errorf("add workspace RBAC finalizer: %w", err) + } + if err := r.Get(ctx, namespacedName, coderControlPlane); err != nil { + return fmt.Errorf("reload codercontrolplane %s after finalizer update: %w", namespacedName, err) + } + + return nil +} + +func (r *CoderControlPlaneReconciler) finalizeWorkspaceRBAC( + ctx context.Context, + coderControlPlane *coderv1alpha1.CoderControlPlane, +) (ctrl.Result, error) { + if coderControlPlane == nil { + return ctrl.Result{}, fmt.Errorf("assertion failed: coder control plane must not be nil") + } + if !controllerutil.ContainsFinalizer(coderControlPlane, workspaceRBACFinalizer) { + return ctrl.Result{}, nil + } + + if err := r.cleanupManagedWorkspaceRBAC(ctx, coderControlPlane, nil, nil); err != nil { + return ctrl.Result{}, err + } + + original := coderControlPlane.DeepCopy() + controllerutil.RemoveFinalizer(coderControlPlane, workspaceRBACFinalizer) + if err := r.Patch(ctx, coderControlPlane, client.MergeFrom(original)); err != nil { + return ctrl.Result{}, fmt.Errorf("remove workspace RBAC finalizer: %w", err) + } + + return ctrl.Result{}, nil +} + +func (r *CoderControlPlaneReconciler) reconcileServiceAccount(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) error { + if coderControlPlane == nil { + return fmt.Errorf("assertion failed: coder control plane must not be nil") + } + if coderControlPlane.Spec.ServiceAccount.DisableCreate { + return r.detachManagedServiceAccounts(ctx, coderControlPlane) + } + + serviceAccountName := resolveServiceAccountName(coderControlPlane) + if strings.TrimSpace(serviceAccountName) == "" { + return fmt.Errorf("assertion failed: service account name must not be empty") + } + + serviceAccount := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: serviceAccountName, Namespace: coderControlPlane.Namespace}} + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, serviceAccount, func() error { + labels := maps.Clone(controlPlaneLabels(coderControlPlane.Name)) + maps.Copy(labels, coderControlPlane.Spec.ServiceAccount.Labels) + serviceAccount.Labels = labels + serviceAccount.Annotations = maps.Clone(coderControlPlane.Spec.ServiceAccount.Annotations) + + if err := controllerutil.SetControllerReference(coderControlPlane, serviceAccount, r.Scheme); err != nil { + return fmt.Errorf("set controller reference: %w", err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("reconcile control plane serviceaccount: %w", err) + } + + return nil +} + +func (r *CoderControlPlaneReconciler) detachManagedServiceAccounts( + ctx context.Context, + coderControlPlane *coderv1alpha1.CoderControlPlane, +) error { + if coderControlPlane == nil { + return fmt.Errorf("assertion failed: coder control plane must not be nil") + } + + serviceAccounts := &corev1.ServiceAccountList{} + if err := r.List( + ctx, + serviceAccounts, + client.InNamespace(coderControlPlane.Namespace), + ); err != nil { + return fmt.Errorf("list service accounts for detachment: %w", err) + } + + for i := range serviceAccounts.Items { + serviceAccount := &serviceAccounts.Items[i] + if !isOwnedByCoderControlPlane(serviceAccount, coderControlPlane) { + continue + } + + original := serviceAccount.DeepCopy() + if err := controllerutil.RemoveControllerReference(coderControlPlane, serviceAccount, r.Scheme); err != nil { + return fmt.Errorf("remove controller reference from service account %s/%s: %w", serviceAccount.Namespace, serviceAccount.Name, err) + } + if equality.Semantic.DeepEqual(original.OwnerReferences, serviceAccount.OwnerReferences) { + continue + } + + if err := r.Patch(ctx, serviceAccount, client.MergeFrom(original)); err != nil { + return fmt.Errorf("patch detached service account %s/%s: %w", serviceAccount.Namespace, serviceAccount.Name, err) + } + } + + return nil +} + +func (r *CoderControlPlaneReconciler) reconcileWorkspaceRBAC(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) error { + if coderControlPlane == nil { + return fmt.Errorf("assertion failed: coder control plane must not be nil") + } + + serviceAccountName := resolveServiceAccountName(coderControlPlane) + if strings.TrimSpace(serviceAccountName) == "" { + return fmt.Errorf("assertion failed: service account name must not be empty") + } + ownerUID := strings.TrimSpace(string(coderControlPlane.UID)) + if ownerUID == "" { + return fmt.Errorf("assertion failed: coder control plane UID must not be empty") + } + roleName, err := workspaceRoleName(coderControlPlane, serviceAccountName) + if err != nil { + return err + } + roleBindingName, err := workspaceRoleBindingName(coderControlPlane, serviceAccountName) + if err != nil { + return err + } + + if !workspacePermsEnabled(coderControlPlane.Spec.RBAC.WorkspacePerms) { + return r.cleanupManagedWorkspaceRBAC(ctx, coderControlPlane, nil, nil) + } + + rules := []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"persistentvolumeclaims"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"}, + }, + } + if workspaceDeploymentsEnabled(coderControlPlane.Spec.RBAC.EnableDeployments) { + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"}, + }) + } + rules = append(rules, coderControlPlane.Spec.RBAC.ExtraRules...) + + targetNamespaces := append([]string{coderControlPlane.Namespace}, coderControlPlane.Spec.RBAC.WorkspaceNamespaces...) + seenNamespaces := make(map[string]struct{}, len(targetNamespaces)) + keepRoles := make(map[string]struct{}, len(targetNamespaces)) + keepRoleBindings := make(map[string]struct{}, len(targetNamespaces)) + for _, namespace := range targetNamespaces { + namespace = strings.TrimSpace(namespace) + if namespace == "" { + return fmt.Errorf("assertion failed: workspace namespace must not be empty") + } + if _, seen := seenNamespaces[namespace]; seen { + continue + } + seenNamespaces[namespace] = struct{}{} + + labels := workspaceRBACLabels(coderControlPlane) + annotations := workspaceRBACAnnotations(ownerUID) + + role := &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: roleName, Namespace: namespace}} + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, role, func() error { + role.Labels = maps.Clone(labels) + role.Annotations = maps.Clone(annotations) + role.Rules = append([]rbacv1.PolicyRule(nil), rules...) + + if namespace == coderControlPlane.Namespace { + if err := controllerutil.SetControllerReference(coderControlPlane, role, r.Scheme); err != nil { + return fmt.Errorf("set controller reference: %w", err) + } + } else { + role.OwnerReferences = nil + } + + return nil + }) + if err != nil { + return fmt.Errorf("reconcile workspace role %s/%s: %w", namespace, roleName, err) + } + + roleBinding := &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: roleBindingName, Namespace: namespace}} + _, err = controllerutil.CreateOrUpdate(ctx, r.Client, roleBinding, func() error { + roleBinding.Labels = maps.Clone(labels) + roleBinding.Annotations = maps.Clone(annotations) + roleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: roleName, + } + roleBinding.Subjects = []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Name: serviceAccountName, + Namespace: coderControlPlane.Namespace, + }} + + if namespace == coderControlPlane.Namespace { + if err := controllerutil.SetControllerReference(coderControlPlane, roleBinding, r.Scheme); err != nil { + return fmt.Errorf("set controller reference: %w", err) + } + } else { + roleBinding.OwnerReferences = nil + } + + return nil + }) + if err != nil { + return fmt.Errorf("reconcile workspace role binding %s/%s: %w", namespace, roleBindingName, err) + } + + keepRoles[namespacedResourceKey(namespace, roleName)] = struct{}{} + keepRoleBindings[namespacedResourceKey(namespace, roleBindingName)] = struct{}{} + } + + return r.cleanupManagedWorkspaceRBAC(ctx, coderControlPlane, keepRoles, keepRoleBindings) +} + +func namespacedResourceKey(namespace, name string) string { + return fmt.Sprintf("%s/%s", namespace, name) +} + +func (r *CoderControlPlaneReconciler) cleanupManagedWorkspaceRBAC( + ctx context.Context, + coderControlPlane *coderv1alpha1.CoderControlPlane, + keepRoles map[string]struct{}, + keepRoleBindings map[string]struct{}, +) error { + if coderControlPlane == nil { + return fmt.Errorf("assertion failed: coder control plane must not be nil") + } + + serviceAccountName := strings.TrimSpace(resolveServiceAccountName(coderControlPlane)) + if serviceAccountName == "" { + serviceAccountName = coderControlPlane.Name + } + expectedRoleName, err := workspaceRoleName(coderControlPlane, serviceAccountName) + if err != nil { + return err + } + expectedRoleBindingName, err := workspaceRoleBindingName(coderControlPlane, serviceAccountName) + if err != nil { + return err + } + + labels := workspaceRBACLabels(coderControlPlane) + + roles := &rbacv1.RoleList{} + if err := r.List(ctx, roles, client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("list managed workspace roles: %w", err) + } + for i := range roles.Items { + role := &roles.Items[i] + if keepRoles != nil { + if _, ok := keepRoles[namespacedResourceKey(role.Namespace, role.Name)]; ok { + continue + } + } + if !isManagedWorkspaceRole(role, coderControlPlane, expectedRoleName) { + continue + } + if err := r.Delete(ctx, role); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("delete managed workspace role %s/%s: %w", role.Namespace, role.Name, err) + } + } + + roleBindings := &rbacv1.RoleBindingList{} + if err := r.List(ctx, roleBindings, client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("list managed workspace role bindings: %w", err) + } + for i := range roleBindings.Items { + roleBinding := &roleBindings.Items[i] + if keepRoleBindings != nil { + if _, ok := keepRoleBindings[namespacedResourceKey(roleBinding.Namespace, roleBinding.Name)]; ok { + continue + } + } + if !isManagedWorkspaceRoleBinding(roleBinding, coderControlPlane, expectedRoleName, expectedRoleBindingName, serviceAccountName) { + continue + } + if err := r.Delete(ctx, roleBinding); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("delete managed workspace role binding %s/%s: %w", roleBinding.Namespace, roleBinding.Name, err) + } + } + + return nil +} + +func probeEnabled(explicit *bool, defaultEnabled bool) bool { + return boolOrDefault(explicit, defaultEnabled) +} + +func buildProbe(spec coderv1alpha1.ProbeSpec, path, portName string) *corev1.Probe { + probe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: path, + Port: intstr.FromString(portName), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: spec.InitialDelaySeconds, + PeriodSeconds: 10, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + } + if spec.PeriodSeconds != nil { + probe.PeriodSeconds = *spec.PeriodSeconds + } + if spec.TimeoutSeconds != nil { + probe.TimeoutSeconds = *spec.TimeoutSeconds + } + if spec.SuccessThreshold != nil { + probe.SuccessThreshold = *spec.SuccessThreshold + } + if spec.FailureThreshold != nil { + probe.FailureThreshold = *spec.FailureThreshold + } + + return probe } func (r *CoderControlPlaneReconciler) reconcileDeployment(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) (*appsv1.Deployment, error) { + if coderControlPlane == nil { + return nil, fmt.Errorf("assertion failed: coder control plane must not be nil") + } + deployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: coderControlPlane.Name, Namespace: coderControlPlane.Namespace}} + injectClusterAccessURL := coderControlPlane.Spec.EnvUseClusterAccessURL == nil || *coderControlPlane.Spec.EnvUseClusterAccessURL + accessURLConfiguredViaEnvFrom := false + if injectClusterAccessURL { + var err error + accessURLConfiguredViaEnvFrom, err = r.envFromDefinesEnvVar(ctx, coderControlPlane.Namespace, coderControlPlane.Spec.EnvFrom, "CODER_ACCESS_URL") + if err != nil { + return nil, err + } + } + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, deployment, func() error { labels := controlPlaneLabels(coderControlPlane.Name) deployment.Labels = maps.Clone(labels) @@ -268,78 +916,640 @@ func (r *CoderControlPlaneReconciler) reconcileDeployment(ctx context.Context, c image = defaultCoderImage } - args := []string{"--http-address=0.0.0.0:3000"} + serviceAccountName := resolveServiceAccountName(coderControlPlane) + if strings.TrimSpace(serviceAccountName) == "" { + return fmt.Errorf("assertion failed: service account name must not be empty") + } + + args := []string{"--http-address=0.0.0.0:8080"} args = append(args, coderControlPlane.Spec.ExtraArgs...) + env := []corev1.EnvVar{ + { + Name: "KUBE_POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, + }, + }, + { + Name: "CODER_DERP_SERVER_RELAY_URL", + Value: "http://$(KUBE_POD_IP):8080", + }, + } + + tlsEnabled := controlPlaneTLSEnabled(coderControlPlane) + if injectClusterAccessURL { + configuredAccessURL, err := findEnvVar(coderControlPlane.Spec.ExtraEnv, "CODER_ACCESS_URL") + if err != nil { + return err + } + if configuredAccessURL == nil && !accessURLConfiguredViaEnvFrom { + scheme := "http" + accessURLPort := coderControlPlane.Spec.Service.Port + if accessURLPort == 0 { + accessURLPort = defaultControlPlanePort + } + if tlsEnabled { + scheme = "https" + accessURLPort = 443 + } + + accessURL := fmt.Sprintf("%s://%s.%s.svc.cluster.local", scheme, coderControlPlane.Name, coderControlPlane.Namespace) + if (scheme == "http" && accessURLPort != 80) || (scheme == "https" && accessURLPort != 443) { + accessURL = fmt.Sprintf("%s:%d", accessURL, accessURLPort) + } + env = append(env, corev1.EnvVar{ + Name: "CODER_ACCESS_URL", + Value: accessURL, + }) + } + } + + ports := []corev1.ContainerPort{{ + Name: "http", + ContainerPort: controlPlaneTargetPort, + Protocol: corev1.ProtocolTCP, + }} + + volumes := make([]corev1.Volume, 0, len(coderControlPlane.Spec.TLS.SecretNames)+len(coderControlPlane.Spec.Certs.Secrets)+len(coderControlPlane.Spec.Volumes)) + volumeMounts := make([]corev1.VolumeMount, 0, len(coderControlPlane.Spec.TLS.SecretNames)+len(coderControlPlane.Spec.Certs.Secrets)+len(coderControlPlane.Spec.VolumeMounts)) + if tlsEnabled { + tlsCertFiles := make([]string, 0, len(coderControlPlane.Spec.TLS.SecretNames)) + tlsKeyFiles := make([]string, 0, len(coderControlPlane.Spec.TLS.SecretNames)) + + tlsSecretSeen := make(map[string]struct{}, len(coderControlPlane.Spec.TLS.SecretNames)) + + for _, secretName := range coderControlPlane.Spec.TLS.SecretNames { + secretName = strings.TrimSpace(secretName) + if secretName == "" { + return fmt.Errorf("assertion failed: tls secret name must not be empty") + } + if _, seen := tlsSecretSeen[secretName]; seen { + continue + } + tlsSecretSeen[secretName] = struct{}{} + + volumeName := volumeNameForSecret("tls", secretName) + mountPath := fmt.Sprintf("/etc/ssl/certs/coder/%s", secretName) + volumes = append(volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: secretName}, + }, + }) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: mountPath, + ReadOnly: true, + }) + + tlsCertFiles = append(tlsCertFiles, fmt.Sprintf("%s/tls.crt", mountPath)) + tlsKeyFiles = append(tlsKeyFiles, fmt.Sprintf("%s/tls.key", mountPath)) + } + + env = append(env, + corev1.EnvVar{Name: "CODER_TLS_ENABLE", Value: "true"}, + corev1.EnvVar{Name: "CODER_TLS_ADDRESS", Value: "0.0.0.0:8443"}, + corev1.EnvVar{Name: "CODER_TLS_CERT_FILE", Value: strings.Join(tlsCertFiles, ",")}, + corev1.EnvVar{Name: "CODER_TLS_KEY_FILE", Value: strings.Join(tlsKeyFiles, ",")}, + ) + + ports = append(ports, corev1.ContainerPort{ + Name: "https", + ContainerPort: controlPlaneTLSTargetPort, + Protocol: corev1.ProtocolTCP, + }) + } + + certSecretNameCounts := make(map[string]int, len(coderControlPlane.Spec.Certs.Secrets)) + for i := range coderControlPlane.Spec.Certs.Secrets { + secretName := strings.TrimSpace(coderControlPlane.Spec.Certs.Secrets[i].Name) + if secretName == "" { + continue + } + certSecretNameCounts[secretName]++ + } + + certVolumeNameBySecret := make(map[string]string, len(certSecretNameCounts)) + certMountFileCount := make(map[string]int, len(coderControlPlane.Spec.Certs.Secrets)) + certSelectorSeen := make(map[string]struct{}, len(coderControlPlane.Spec.Certs.Secrets)) + for i := range coderControlPlane.Spec.Certs.Secrets { + secret := coderControlPlane.Spec.Certs.Secrets[i] + secret.Name = strings.TrimSpace(secret.Name) + secret.Key = strings.TrimSpace(secret.Key) + if secret.Name == "" { + return fmt.Errorf("assertion failed: cert secret name must not be empty") + } + if secret.Key == "" { + return fmt.Errorf("assertion failed: cert secret key must not be empty") + } + + selectorKey := fmt.Sprintf("%s\x00%s", secret.Name, secret.Key) + if _, seen := certSelectorSeen[selectorKey]; seen { + continue + } + certSelectorSeen[selectorKey] = struct{}{} + + volumeName, volumeExists := certVolumeNameBySecret[secret.Name] + if !volumeExists { + volumeName = volumeNameForSecret("ca-cert", secret.Name) + certVolumeNameBySecret[secret.Name] = volumeName + volumes = append(volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: secret.Name}, + }, + }) + } + + mountFileBase := secret.Name + if certSecretNameCounts[secret.Name] > 1 { + mountFileBase = fmt.Sprintf("%s-%s", secret.Name, secret.Key) + } + mountFileName := mountFileBase + if !strings.HasSuffix(mountFileName, ".crt") { + mountFileName += ".crt" + } + + mountFileCount := certMountFileCount[mountFileName] + certMountFileCount[mountFileName] = mountFileCount + 1 + if mountFileCount > 0 { + mountFileName = strings.TrimSuffix(mountFileName, ".crt") + mountFileName = fmt.Sprintf("%s-%d.crt", mountFileName, mountFileCount+1) + } + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/ssl/certs/%s", mountFileName), + SubPath: secret.Key, + ReadOnly: true, + }) + } + + env = append(env, coderControlPlane.Spec.ExtraEnv...) + volumes = append(volumes, coderControlPlane.Spec.Volumes...) + volumeMounts = append(volumeMounts, coderControlPlane.Spec.VolumeMounts...) + + container := corev1.Container{ + Name: "coder", + Image: image, + Args: args, + Env: env, + EnvFrom: coderControlPlane.Spec.EnvFrom, + Ports: ports, + VolumeMounts: volumeMounts, + } + if coderControlPlane.Spec.SecurityContext != nil { + container.SecurityContext = coderControlPlane.Spec.SecurityContext + } + if coderControlPlane.Spec.Resources != nil { + container.Resources = *coderControlPlane.Spec.Resources + } + if probeEnabled(coderControlPlane.Spec.ReadinessProbe.Enabled, true) { + container.ReadinessProbe = buildProbe(coderControlPlane.Spec.ReadinessProbe, "/healthz", "http") + } + if probeEnabled(coderControlPlane.Spec.LivenessProbe.Enabled, false) { + container.LivenessProbe = buildProbe(coderControlPlane.Spec.LivenessProbe, "/healthz", "http") + } + + podSpec := corev1.PodSpec{ + ServiceAccountName: serviceAccountName, + ImagePullSecrets: coderControlPlane.Spec.ImagePullSecrets, + Containers: []corev1.Container{container}, + Volumes: volumes, + NodeSelector: maps.Clone(coderControlPlane.Spec.NodeSelector), + Tolerations: append([]corev1.Toleration(nil), coderControlPlane.Spec.Tolerations...), + TopologySpreadConstraints: append( + []corev1.TopologySpreadConstraint(nil), + coderControlPlane.Spec.TopologySpreadConstraints..., + ), + } + if coderControlPlane.Spec.PodSecurityContext != nil { + podSpec.SecurityContext = coderControlPlane.Spec.PodSecurityContext + } + if coderControlPlane.Spec.Affinity != nil { + podSpec.Affinity = coderControlPlane.Spec.Affinity + } + deployment.Spec.Replicas = &replicas deployment.Spec.Selector = &metav1.LabelSelector{MatchLabels: maps.Clone(labels)} deployment.Spec.Template = corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: maps.Clone(labels)}, - Spec: corev1.PodSpec{ - ImagePullSecrets: coderControlPlane.Spec.ImagePullSecrets, - Containers: []corev1.Container{{ - Name: "coder", - Image: image, - Args: args, - Env: coderControlPlane.Spec.ExtraEnv, - Ports: []corev1.ContainerPort{{ - Name: "http", - ContainerPort: controlPlaneTargetPort, - Protocol: corev1.ProtocolTCP, - }}, - }}, + Spec: podSpec, + } + + return nil + }) + if err != nil { + return nil, fmt.Errorf("reconcile control plane deployment: %w", err) + } + + // Avoid an immediate cached read-after-write here; cache propagation lag can + // transiently return NotFound for just-created objects and produce noisy reconcile errors. + return deployment, nil +} + +func (r *CoderControlPlaneReconciler) reconcileService(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) (*corev1.Service, error) { + service := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: coderControlPlane.Name, Namespace: coderControlPlane.Namespace}} + + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, service, func() error { + labels := controlPlaneLabels(coderControlPlane.Name) + service.Labels = maps.Clone(labels) + service.Annotations = maps.Clone(coderControlPlane.Spec.Service.Annotations) + + if err := controllerutil.SetControllerReference(coderControlPlane, service, r.Scheme); err != nil { + return fmt.Errorf("set controller reference: %w", err) + } + + serviceType := coderControlPlane.Spec.Service.Type + if serviceType == "" { + serviceType = corev1.ServiceTypeClusterIP + } + servicePort := coderControlPlane.Spec.Service.Port + if servicePort == 0 { + servicePort = defaultControlPlanePort + } + + tlsEnabled := controlPlaneTLSEnabled(coderControlPlane) + primaryServicePort := corev1.ServicePort{ + Name: "http", + Port: servicePort, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(int(controlPlaneTargetPort)), + } + if tlsEnabled && servicePort == 443 { + primaryServicePort.Name = "https" + primaryServicePort.TargetPort = intstr.FromInt(int(controlPlaneTLSTargetPort)) + } + + servicePorts := []corev1.ServicePort{primaryServicePort} + if tlsEnabled && servicePort == 443 { + servicePorts = append(servicePorts, corev1.ServicePort{ + Name: "http", + Port: defaultControlPlanePort, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(int(controlPlaneTargetPort)), + }) + } + if tlsEnabled && servicePort != 443 { + servicePorts = append(servicePorts, corev1.ServicePort{ + Name: "https", + Port: 443, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(int(controlPlaneTLSTargetPort)), + }) + } + + service.Spec.Type = serviceType + service.Spec.Selector = maps.Clone(labels) + service.Spec.Ports = servicePorts + return nil + }) + if err != nil { + return nil, fmt.Errorf("reconcile control plane service: %w", err) + } + + // Avoid an immediate cached read-after-write here; cache propagation lag can + // transiently return NotFound for just-created objects and produce noisy reconcile errors. + return service, nil +} + +func (r *CoderControlPlaneReconciler) reconcileExposure(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) (bool, error) { + if coderControlPlane == nil { + return false, fmt.Errorf("assertion failed: coder control plane must not be nil") + } + + exposeSpec := coderControlPlane.Spec.Expose + if exposeSpec == nil || (exposeSpec.Ingress == nil && exposeSpec.Gateway == nil) { + if err := r.cleanupOwnedIngress(ctx, coderControlPlane); err != nil { + return false, fmt.Errorf("cleanup managed ingress: %w", err) + } + if err := r.cleanupOwnedHTTPRoute(ctx, coderControlPlane); err != nil { + return false, fmt.Errorf("cleanup managed httproute: %w", err) + } + return false, nil + } + + if exposeSpec.Ingress != nil && exposeSpec.Gateway != nil { + return false, fmt.Errorf("assertion failed: only one of ingress or gateway exposure may be configured") + } + + if exposeSpec.Ingress != nil { + if err := r.reconcileIngress(ctx, coderControlPlane); err != nil { + return false, err + } + if err := r.cleanupOwnedHTTPRoute(ctx, coderControlPlane); err != nil { + return false, fmt.Errorf("cleanup managed httproute: %w", err) + } + return false, nil + } + + httpRouteReconciled, err := r.reconcileHTTPRoute(ctx, coderControlPlane) + if err != nil { + return false, err + } + if err := r.cleanupOwnedIngress(ctx, coderControlPlane); err != nil { + return false, fmt.Errorf("cleanup managed ingress: %w", err) + } + + return httpRouteReconciled, nil +} + +func (r *CoderControlPlaneReconciler) reconcileIngress(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) error { + if coderControlPlane == nil { + return fmt.Errorf("assertion failed: coder control plane must not be nil") + } + if coderControlPlane.Spec.Expose == nil || coderControlPlane.Spec.Expose.Ingress == nil { + return fmt.Errorf("assertion failed: ingress exposure spec must not be nil") + } + + ingressExpose := coderControlPlane.Spec.Expose.Ingress + primaryHost := strings.TrimSpace(ingressExpose.Host) + if primaryHost == "" { + return fmt.Errorf("assertion failed: ingress host must not be empty") + } + + wildcardHost := strings.TrimSpace(ingressExpose.WildcardHost) + backendServicePort, backendPortErr := httpRouteBackendServicePort(coderControlPlane) + if backendPortErr != nil { + return backendPortErr + } + + ingress := &networkingv1.Ingress{ObjectMeta: metav1.ObjectMeta{Name: coderControlPlane.Name, Namespace: coderControlPlane.Namespace}} + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, ingress, func() error { + labels := controlPlaneLabels(coderControlPlane.Name) + ingress.Labels = maps.Clone(labels) + ingress.Annotations = maps.Clone(ingressExpose.Annotations) + + pathTypePrefix := networkingv1.PathTypePrefix + rules := []networkingv1.IngressRule{ + { + Host: primaryHost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/", + PathType: &pathTypePrefix, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: coderControlPlane.Name, + Port: networkingv1.ServiceBackendPort{Number: backendServicePort}, + }, + }, + }, + }, + }, + }, + }, + } + if wildcardHost != "" { + rules = append(rules, networkingv1.IngressRule{ + Host: wildcardHost, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/", + PathType: &pathTypePrefix, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: coderControlPlane.Name, + Port: networkingv1.ServiceBackendPort{Number: backendServicePort}, + }, + }, + }, + }, + }, + }, + }) + } + + var tls []networkingv1.IngressTLS + if ingressExpose.TLS != nil { + secretName := strings.TrimSpace(ingressExpose.TLS.SecretName) + if secretName != "" { + tls = append(tls, networkingv1.IngressTLS{ + SecretName: secretName, + Hosts: []string{primaryHost}, + }) + } + + wildcardSecretName := strings.TrimSpace(ingressExpose.TLS.WildcardSecretName) + if wildcardSecretName != "" { + if wildcardHost == "" { + return fmt.Errorf("assertion failed: ingress wildcard host must not be empty when wildcard TLS secret is set") + } + tls = append(tls, networkingv1.IngressTLS{ + SecretName: wildcardSecretName, + Hosts: []string{wildcardHost}, + }) + } + } + + ingress.Spec = networkingv1.IngressSpec{ + IngressClassName: ingressExpose.ClassName, + Rules: rules, + TLS: tls, + } + + if err := controllerutil.SetControllerReference(coderControlPlane, ingress, r.Scheme); err != nil { + return fmt.Errorf("set controller reference: %w", err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("reconcile control plane ingress: %w", err) + } + + return nil +} + +func (r *CoderControlPlaneReconciler) reconcileHTTPRoute(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) (bool, error) { + if coderControlPlane == nil { + return false, fmt.Errorf("assertion failed: coder control plane must not be nil") + } + if coderControlPlane.Spec.Expose == nil || coderControlPlane.Spec.Expose.Gateway == nil { + return false, fmt.Errorf("assertion failed: gateway exposure spec must not be nil") + } + + gatewayExpose := coderControlPlane.Spec.Expose.Gateway + primaryHost := strings.TrimSpace(gatewayExpose.Host) + if primaryHost == "" { + return false, fmt.Errorf("assertion failed: gateway host must not be empty") + } + + if len(gatewayExpose.ParentRefs) == 0 { + return false, fmt.Errorf("assertion failed: gateway parentRefs must not be empty") + } + + httpRoute := &gatewayv1.HTTPRoute{ObjectMeta: metav1.ObjectMeta{Name: coderControlPlane.Name, Namespace: coderControlPlane.Namespace}} + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, httpRoute, func() error { + labels := controlPlaneLabels(coderControlPlane.Name) + httpRoute.Labels = maps.Clone(labels) + + parentRefs := make([]gatewayv1.ParentReference, 0, len(gatewayExpose.ParentRefs)) + for i := range gatewayExpose.ParentRefs { + parentRefSpec := gatewayExpose.ParentRefs[i] + parentRefName := strings.TrimSpace(parentRefSpec.Name) + if parentRefName == "" { + return fmt.Errorf("assertion failed: gateway parentRef[%d] name must not be empty", i) + } + + parentRef := gatewayv1.ParentReference{Name: gatewayv1.ObjectName(parentRefName)} + if parentRefSpec.Namespace != nil { + namespace := strings.TrimSpace(*parentRefSpec.Namespace) + if namespace == "" { + return fmt.Errorf("assertion failed: gateway parentRef[%d] namespace must not be empty when set", i) + } + namespaceRef := gatewayv1.Namespace(namespace) + parentRef.Namespace = &namespaceRef + } + if parentRefSpec.SectionName != nil { + sectionName := strings.TrimSpace(*parentRefSpec.SectionName) + if sectionName == "" { + return fmt.Errorf("assertion failed: gateway parentRef[%d] sectionName must not be empty when set", i) + } + sectionNameRef := gatewayv1.SectionName(sectionName) + parentRef.SectionName = §ionNameRef + } + + parentRefs = append(parentRefs, parentRef) + } + + hostnames := []gatewayv1.Hostname{gatewayv1.Hostname(primaryHost)} + wildcardHost := strings.TrimSpace(gatewayExpose.WildcardHost) + if wildcardHost != "" { + hostnames = append(hostnames, gatewayv1.Hostname(wildcardHost)) + } + + servicePort, err := httpRouteBackendServicePort(coderControlPlane) + if err != nil { + return err + } + backendPort := gatewayv1.PortNumber(servicePort) + serviceKind := gatewayv1.Kind("Service") + serviceGroup := gatewayv1.Group("") + pathTypePrefix := gatewayv1.PathMatchPathPrefix + pathPrefix := "/" + + httpRoute.Spec = gatewayv1.HTTPRouteSpec{ + CommonRouteSpec: gatewayv1.CommonRouteSpec{ParentRefs: parentRefs}, + Hostnames: hostnames, + Rules: []gatewayv1.HTTPRouteRule{ + { + Matches: []gatewayv1.HTTPRouteMatch{ + { + Path: &gatewayv1.HTTPPathMatch{ + Type: &pathTypePrefix, + Value: &pathPrefix, + }, + }, + }, + BackendRefs: []gatewayv1.HTTPBackendRef{ + { + BackendRef: gatewayv1.BackendRef{ + BackendObjectReference: gatewayv1.BackendObjectReference{ + Group: &serviceGroup, + Kind: &serviceKind, + Name: gatewayv1.ObjectName(coderControlPlane.Name), + Port: &backendPort, + }, + }, + }, + }, + }, }, } + if err := controllerutil.SetControllerReference(coderControlPlane, httpRoute, r.Scheme); err != nil { + return fmt.Errorf("set controller reference: %w", err) + } + return nil }) if err != nil { - return nil, fmt.Errorf("reconcile control plane deployment: %w", err) + if meta.IsNoMatchError(err) { + ctrl.LoggerFrom(ctx).WithName("controller").WithName("codercontrolplane").Info( + "Gateway API CRDs not available, retrying HTTPRoute reconciliation", + ) + return true, nil + } + return false, fmt.Errorf("reconcile control plane httproute: %w", err) } - // Avoid an immediate cached read-after-write here; cache propagation lag can - // transiently return NotFound for just-created objects and produce noisy reconcile errors. - return deployment, nil + return true, nil } -func (r *CoderControlPlaneReconciler) reconcileService(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) (*corev1.Service, error) { - service := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: coderControlPlane.Name, Namespace: coderControlPlane.Namespace}} +func (r *CoderControlPlaneReconciler) cleanupOwnedIngress(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) error { + if coderControlPlane == nil { + return fmt.Errorf("assertion failed: coder control plane must not be nil") + } - _, err := controllerutil.CreateOrUpdate(ctx, r.Client, service, func() error { - labels := controlPlaneLabels(coderControlPlane.Name) - service.Labels = maps.Clone(labels) - service.Annotations = maps.Clone(coderControlPlane.Spec.Service.Annotations) + ingress := &networkingv1.Ingress{} + namespacedName := types.NamespacedName{Name: coderControlPlane.Name, Namespace: coderControlPlane.Namespace} + err := r.Get(ctx, namespacedName, ingress) + switch { + case err == nil: + case apierrors.IsNotFound(err): + return nil + default: + return fmt.Errorf("get control plane ingress %s: %w", namespacedName, err) + } - if err := controllerutil.SetControllerReference(coderControlPlane, service, r.Scheme); err != nil { - return fmt.Errorf("set controller reference: %w", err) - } + if !isOwnedByCoderControlPlane(ingress, coderControlPlane) { + return nil + } - serviceType := coderControlPlane.Spec.Service.Type - if serviceType == "" { - serviceType = corev1.ServiceTypeClusterIP - } - servicePort := coderControlPlane.Spec.Service.Port - if servicePort == 0 { - servicePort = defaultControlPlanePort - } + if err := r.Delete(ctx, ingress); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("delete control plane ingress %s: %w", namespacedName, err) + } - service.Spec.Type = serviceType - service.Spec.Selector = maps.Clone(labels) - service.Spec.Ports = []corev1.ServicePort{{ - Name: "http", - Port: servicePort, - Protocol: corev1.ProtocolTCP, - TargetPort: intstr.FromInt(int(controlPlaneTargetPort)), - }} + return nil +} + +func (r *CoderControlPlaneReconciler) cleanupOwnedHTTPRoute(ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane) error { + if coderControlPlane == nil { + return fmt.Errorf("assertion failed: coder control plane must not be nil") + } + + httpRoute := &gatewayv1.HTTPRoute{} + namespacedName := types.NamespacedName{Name: coderControlPlane.Name, Namespace: coderControlPlane.Namespace} + err := r.Get(ctx, namespacedName, httpRoute) + switch { + case err == nil: + case apierrors.IsNotFound(err), meta.IsNoMatchError(err): return nil - }) - if err != nil { - return nil, fmt.Errorf("reconcile control plane service: %w", err) + default: + return fmt.Errorf("get control plane httproute %s: %w", namespacedName, err) } - // Avoid an immediate cached read-after-write here; cache propagation lag can - // transiently return NotFound for just-created objects and produce noisy reconcile errors. - return service, nil + if !isOwnedByCoderControlPlane(httpRoute, coderControlPlane) { + return nil + } + + if err := r.Delete(ctx, httpRoute); err != nil && !apierrors.IsNotFound(err) && !meta.IsNoMatchError(err) { + return fmt.Errorf("delete control plane httproute %s: %w", namespacedName, err) + } + + return nil +} + +func isOwnedByCoderControlPlane(object metav1.Object, coderControlPlane *coderv1alpha1.CoderControlPlane) bool { + if object == nil || coderControlPlane == nil { + return false + } + + ownerReference := metav1.GetControllerOf(object) + if ownerReference == nil { + return false + } + + return ownerReference.APIVersion == coderv1alpha1.GroupVersion.String() && + ownerReference.Kind == "CoderControlPlane" && + ownerReference.Name == coderControlPlane.Name && + ownerReference.UID == coderControlPlane.UID } func (r *CoderControlPlaneReconciler) desiredStatus( @@ -359,14 +1569,36 @@ func (r *CoderControlPlaneReconciler) desiredStatus( phase = coderv1alpha1.CoderControlPlanePhaseReady } + scheme := "http" + statusPort := servicePort + if controlPlaneTLSEnabled(coderControlPlane) { + scheme = "https" + statusPort = 443 + } + nextStatus.ObservedGeneration = coderControlPlane.Generation nextStatus.ReadyReplicas = deployment.Status.ReadyReplicas - nextStatus.URL = fmt.Sprintf("http://%s.%s.svc.cluster.local:%d", service.Name, service.Namespace, servicePort) + nextStatus.URL = fmt.Sprintf("%s://%s.%s.svc.cluster.local:%d", scheme, service.Name, service.Namespace, statusPort) nextStatus.Phase = phase return nextStatus } +func controlPlaneSDKURL(coderControlPlane *coderv1alpha1.CoderControlPlane) string { + if coderControlPlane == nil { + return "" + } + + // Always use HTTP for in-cluster SDK calls. TLS certs are typically provisioned + // for external hostnames and may fail verification against *.svc.cluster.local. + servicePort, err := httpRouteBackendServicePort(coderControlPlane) + if err != nil { + return "" + } + + return fmt.Sprintf("http://%s.%s.svc.cluster.local:%d", coderControlPlane.Name, coderControlPlane.Namespace, servicePort) +} + func (r *CoderControlPlaneReconciler) reconcileOperatorAccess( ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane, @@ -527,8 +1759,9 @@ func (r *CoderControlPlaneReconciler) reconcileLicense( return ctrl.Result{}, nil } - if strings.TrimSpace(nextStatus.URL) == "" { - return ctrl.Result{}, fmt.Errorf("assertion failed: control plane URL must not be empty when licenseSecretRef is configured") + controlPlaneURL := controlPlaneSDKURL(coderControlPlane) + if strings.TrimSpace(controlPlaneURL) == "" { + return ctrl.Result{}, fmt.Errorf("assertion failed: control plane SDK URL must not be empty when licenseSecretRef is configured") } operatorTokenSecretName := strings.TrimSpace(nextStatus.OperatorTokenSecretRef.Name) @@ -628,7 +1861,7 @@ func (r *CoderControlPlaneReconciler) reconcileLicense( } if nextStatus.LicenseLastApplied != nil && nextStatus.LicenseLastAppliedHash == licenseHash { - hasAnyLicense, hasLicenseErr := r.LicenseUploader.HasAnyLicense(ctx, nextStatus.URL, operatorToken) + hasAnyLicense, hasLicenseErr := r.LicenseUploader.HasAnyLicense(ctx, controlPlaneURL, operatorToken) if hasLicenseErr != nil { var sdkErr *codersdk.Error if errors.As(hasLicenseErr, &sdkErr) { @@ -686,7 +1919,7 @@ func (r *CoderControlPlaneReconciler) reconcileLicense( } } - if err := r.LicenseUploader.AddLicense(ctx, nextStatus.URL, operatorToken, licenseJWT); err != nil { + if err := r.LicenseUploader.AddLicense(ctx, controlPlaneURL, operatorToken, licenseJWT); err != nil { if isDuplicateLicenseUploadError(err) { now := metav1.Now() nextStatus.LicenseLastApplied = &now @@ -787,8 +2020,9 @@ func (r *CoderControlPlaneReconciler) reconcileEntitlements( nextStatus.OperatorTokenSecretRef == nil { return ctrl.Result{}, nil } - if strings.TrimSpace(nextStatus.URL) == "" { - return ctrl.Result{}, fmt.Errorf("assertion failed: control plane URL must not be empty when querying entitlements") + controlPlaneURL := controlPlaneSDKURL(coderControlPlane) + if strings.TrimSpace(controlPlaneURL) == "" { + return ctrl.Result{}, fmt.Errorf("assertion failed: control plane SDK URL must not be empty when querying entitlements") } if r.EntitlementsInspector == nil { return ctrl.Result{}, nil @@ -812,7 +2046,7 @@ func (r *CoderControlPlaneReconciler) reconcileEntitlements( return ctrl.Result{RequeueAfter: operatorAccessRetryInterval}, nil } - entitlements, err := r.EntitlementsInspector.Entitlements(ctx, nextStatus.URL, operatorToken) + entitlements, err := r.EntitlementsInspector.Entitlements(ctx, controlPlaneURL, operatorToken) if err != nil { var sdkErr *codersdk.Error if errors.As(err, &sdkErr) { @@ -962,19 +2196,7 @@ func (r *CoderControlPlaneReconciler) cleanupDisabledOperatorAccess( } func isManagedOperatorTokenSecret(secret *corev1.Secret, coderControlPlane *coderv1alpha1.CoderControlPlane) bool { - if secret == nil || coderControlPlane == nil { - return false - } - - ownerReference := metav1.GetControllerOf(secret) - if ownerReference == nil { - return false - } - - return ownerReference.APIVersion == coderv1alpha1.GroupVersion.String() && - ownerReference.Kind == "CoderControlPlane" && - ownerReference.Name == coderControlPlane.Name && - ownerReference.UID == coderControlPlane.UID + return isOwnedByCoderControlPlane(secret, coderControlPlane) } func (r *CoderControlPlaneReconciler) resolvePostgresURLFromExtraEnv( @@ -1015,6 +2237,92 @@ func (r *CoderControlPlaneReconciler) resolvePostgresURLFromExtraEnv( return r.readSecretValue(ctx, coderControlPlane.Namespace, secretRef.Name, secretRef.Key) } +func (r *CoderControlPlaneReconciler) envFromDefinesEnvVar( + ctx context.Context, + namespace string, + envFromSources []corev1.EnvFromSource, + envVarName string, +) (bool, error) { + if strings.TrimSpace(namespace) == "" { + return false, fmt.Errorf("assertion failed: namespace must not be empty") + } + if strings.TrimSpace(envVarName) == "" { + return false, fmt.Errorf("assertion failed: environment variable name must not be empty") + } + + var reader client.Reader = r.Client + if r.APIReader != nil { + reader = r.APIReader + } + if reader == nil { + return false, fmt.Errorf("assertion failed: reader must not be nil") + } + + for i := range envFromSources { + envFromSource := envFromSources[i] + lookupKey, includeSource, err := envFromLookupKeyForEnvVar(envFromSource.Prefix, envVarName) + if err != nil { + return false, err + } + if !includeSource { + continue + } + + if envFromSource.ConfigMapRef != nil { + configMapName := strings.TrimSpace(envFromSource.ConfigMapRef.Name) + if configMapName == "" { + return false, fmt.Errorf("assertion failed: envFrom[%d].configMapRef.name must not be empty", i) + } + + configMap := &corev1.ConfigMap{} + err := reader.Get(ctx, types.NamespacedName{Namespace: namespace, Name: configMapName}, configMap) + if err != nil { + if !apierrors.IsNotFound(err) { + return false, fmt.Errorf("get envFrom[%d] configmap %s/%s: %w", i, namespace, configMapName, err) + } + } else if _, ok := configMap.Data[lookupKey]; ok { + return true, nil + } + } + + if envFromSource.SecretRef != nil { + secretName := strings.TrimSpace(envFromSource.SecretRef.Name) + if secretName == "" { + return false, fmt.Errorf("assertion failed: envFrom[%d].secretRef.name must not be empty", i) + } + + secret := &corev1.Secret{} + err := reader.Get(ctx, types.NamespacedName{Namespace: namespace, Name: secretName}, secret) + if err != nil { + if !apierrors.IsNotFound(err) { + return false, fmt.Errorf("get envFrom[%d] secret %s/%s: %w", i, namespace, secretName, err) + } + } else if _, ok := secret.Data[lookupKey]; ok { + return true, nil + } + } + } + + return false, nil +} + +func envFromLookupKeyForEnvVar(prefix, envVarName string) (string, bool, error) { + prefix = strings.TrimSpace(prefix) + if prefix == "" { + return envVarName, true, nil + } + if !strings.HasPrefix(envVarName, prefix) { + return "", false, nil + } + + lookupKey := strings.TrimPrefix(envVarName, prefix) + if strings.TrimSpace(lookupKey) == "" { + return "", false, nil + } + + return lookupKey, true, nil +} + func findEnvVar(envVars []corev1.EnvVar, name string) (*corev1.EnvVar, error) { if strings.TrimSpace(name) == "" { return nil, fmt.Errorf("assertion failed: environment variable name must not be empty") @@ -1075,6 +2383,65 @@ func operatorAccessTokenSecretName(coderControlPlane *coderv1alpha1.CoderControl return fmt.Sprintf("%s-%s%s", coderControlPlane.Name[:available], hashSuffix, operatorTokenSecretSuffix) } +func volumeNameForSecret(prefix, secretName string) string { + normalizedSecretName := strings.TrimSpace(strings.ToLower(secretName)) + sanitizedSecretName := sanitizeDNSLabel(normalizedSecretName) + candidate := fmt.Sprintf("%s-%s", prefix, sanitizedSecretName) + if len(candidate) <= 63 && sanitizedSecretName == normalizedSecretName { + return candidate + } + + hasher := fnv.New32a() + _, _ = hasher.Write([]byte(prefix)) + _, _ = hasher.Write([]byte{0}) + _, _ = hasher.Write([]byte(secretName)) + hashSuffix := fmt.Sprintf("%08x", hasher.Sum32()) + + available := 63 - len(prefix) - len(hashSuffix) - 2 + if available < 1 { + available = 1 + } + if len(sanitizedSecretName) > available { + sanitizedSecretName = sanitizedSecretName[:available] + sanitizedSecretName = strings.Trim(sanitizedSecretName, "-") + if sanitizedSecretName == "" { + sanitizedSecretName = "x" + } + } + + return fmt.Sprintf("%s-%s-%s", prefix, sanitizedSecretName, hashSuffix) +} + +func sanitizeDNSLabel(value string) string { + value = strings.TrimSpace(strings.ToLower(value)) + if value == "" { + return "x" + } + + builder := strings.Builder{} + builder.Grow(len(value)) + lastWasDash := false + for i := 0; i < len(value); i++ { + char := value[i] + if (char >= 'a' && char <= 'z') || (char >= '0' && char <= '9') { + builder.WriteByte(char) + lastWasDash = false + continue + } + if !lastWasDash { + builder.WriteByte('-') + lastWasDash = true + } + } + + sanitized := strings.Trim(builder.String(), "-") + if sanitized == "" { + return "x" + } + + return sanitized +} + func (r *CoderControlPlaneReconciler) ensureOperatorTokenSecret( ctx context.Context, coderControlPlane *coderv1alpha1.CoderControlPlane, @@ -1207,7 +2574,73 @@ func indexByLicenseSecretName(obj client.Object) []string { return []string{licenseSecretName} } -func (r *CoderControlPlaneReconciler) reconcileRequestsForLicenseSecret( +func indexByEnvFromConfigMapName(obj client.Object) []string { + coderControlPlane, ok := obj.(*coderv1alpha1.CoderControlPlane) + if !ok { + return nil + } + + configMapNames := map[string]struct{}{} + for i := range coderControlPlane.Spec.EnvFrom { + configMapRef := coderControlPlane.Spec.EnvFrom[i].ConfigMapRef + if configMapRef == nil { + continue + } + configMapName := strings.TrimSpace(configMapRef.Name) + if configMapName == "" { + continue + } + configMapNames[configMapName] = struct{}{} + } + + indexedNames := make([]string, 0, len(configMapNames)) + for configMapName := range configMapNames { + indexedNames = append(indexedNames, configMapName) + } + + return indexedNames +} + +func indexByEnvFromSecretName(obj client.Object) []string { + coderControlPlane, ok := obj.(*coderv1alpha1.CoderControlPlane) + if !ok { + return nil + } + + secretNames := map[string]struct{}{} + for i := range coderControlPlane.Spec.EnvFrom { + secretRef := coderControlPlane.Spec.EnvFrom[i].SecretRef + if secretRef == nil { + continue + } + secretName := strings.TrimSpace(secretRef.Name) + if secretName == "" { + continue + } + secretNames[secretName] = struct{}{} + } + + indexedNames := make([]string, 0, len(secretNames)) + for secretName := range secretNames { + indexedNames = append(indexedNames, secretName) + } + + return indexedNames +} + +func (r *CoderControlPlaneReconciler) reconcileRequestsForEnvFromConfigMap( + ctx context.Context, + obj client.Object, +) []reconcile.Request { + configMap, ok := obj.(*corev1.ConfigMap) + if !ok { + return nil + } + + return r.reconcileRequestsForIndexedControlPlanes(ctx, configMap.Namespace, envFromConfigMapNameFieldIndex, configMap.Name) +} + +func (r *CoderControlPlaneReconciler) reconcileRequestsForEnvFromSecret( ctx context.Context, obj client.Object, ) []reconcile.Request { @@ -1215,7 +2648,45 @@ func (r *CoderControlPlaneReconciler) reconcileRequestsForLicenseSecret( if !ok { return nil } - if strings.TrimSpace(secret.Name) == "" || strings.TrimSpace(secret.Namespace) == "" { + + return r.reconcileRequestsForIndexedControlPlanes(ctx, secret.Namespace, envFromSecretNameFieldIndex, secret.Name) +} + +func mergeReconcileRequests(requestGroups ...[]reconcile.Request) []reconcile.Request { + if len(requestGroups) == 0 { + return nil + } + + seen := map[string]struct{}{} + merged := make([]reconcile.Request, 0) + for i := range requestGroups { + for j := range requestGroups[i] { + request := requestGroups[i][j] + if strings.TrimSpace(request.Name) == "" || strings.TrimSpace(request.Namespace) == "" { + continue + } + key := namespacedResourceKey(request.Namespace, request.Name) + if _, exists := seen[key]; exists { + continue + } + seen[key] = struct{}{} + merged = append(merged, request) + } + } + + return merged +} + +func (r *CoderControlPlaneReconciler) reconcileRequestsForIndexedControlPlanes( + ctx context.Context, + namespace string, + indexField string, + indexValue string, +) []reconcile.Request { + namespace = strings.TrimSpace(namespace) + indexField = strings.TrimSpace(indexField) + indexValue = strings.TrimSpace(indexValue) + if namespace == "" || indexField == "" || indexValue == "" { return nil } @@ -1223,14 +2694,15 @@ func (r *CoderControlPlaneReconciler) reconcileRequestsForLicenseSecret( if err := r.List( ctx, &coderControlPlanes, - client.InNamespace(secret.Namespace), - client.MatchingFields{licenseSecretNameFieldIndex: secret.Name}, + client.InNamespace(namespace), + client.MatchingFields{indexField: indexValue}, ); err != nil { return nil } requests := make([]reconcile.Request, 0, len(coderControlPlanes.Items)) - for _, coderControlPlane := range coderControlPlanes.Items { + for i := range coderControlPlanes.Items { + coderControlPlane := coderControlPlanes.Items[i] if strings.TrimSpace(coderControlPlane.Name) == "" || strings.TrimSpace(coderControlPlane.Namespace) == "" { continue } @@ -1243,6 +2715,29 @@ func (r *CoderControlPlaneReconciler) reconcileRequestsForLicenseSecret( return requests } +func (r *CoderControlPlaneReconciler) reconcileRequestsForLicenseSecret( + ctx context.Context, + obj client.Object, +) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + if strings.TrimSpace(secret.Name) == "" || strings.TrimSpace(secret.Namespace) == "" { + return nil + } + + licenseSecretRequests := r.reconcileRequestsForIndexedControlPlanes( + ctx, + secret.Namespace, + licenseSecretNameFieldIndex, + secret.Name, + ) + envFromSecretRequests := r.reconcileRequestsForEnvFromSecret(ctx, secret) + + return mergeReconcileRequests(licenseSecretRequests, envFromSecretRequests) +} + func isDuplicateLicenseUploadError(err error) bool { var sdkErr *codersdk.Error if !errors.As(err, &sdkErr) { @@ -1414,16 +2909,50 @@ func (r *CoderControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error { ); err != nil { return fmt.Errorf("index coder control planes by license secret name: %w", err) } + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &coderv1alpha1.CoderControlPlane{}, + envFromConfigMapNameFieldIndex, + indexByEnvFromConfigMapName, + ); err != nil { + return fmt.Errorf("index coder control planes by envFrom ConfigMap name: %w", err) + } + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &coderv1alpha1.CoderControlPlane{}, + envFromSecretNameFieldIndex, + indexByEnvFromSecretName, + ); err != nil { + return fmt.Errorf("index coder control planes by envFrom Secret name: %w", err) + } - return ctrl.NewControllerManagedBy(mgr). + builder := ctrl.NewControllerManagedBy(mgr). For(&coderv1alpha1.CoderControlPlane{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Service{}). + Owns(&networkingv1.Ingress{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). Owns(&corev1.Secret{}). Watches( &corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(r.reconcileRequestsForLicenseSecret), ). + Watches( + &corev1.ConfigMap{}, + handler.EnqueueRequestsFromMapFunc(r.reconcileRequestsForEnvFromConfigMap), + ) + + // Gateway API is optional; only watch HTTPRoutes when the CRD is installed. + httpRouteGVK := schema.GroupVersionKind{Group: gatewayv1.GroupVersion.Group, Version: gatewayv1.GroupVersion.Version, Kind: "HTTPRoute"} + if _, err := mgr.GetRESTMapper().RESTMapping(httpRouteGVK.GroupKind(), httpRouteGVK.Version); err == nil { + builder = builder.Owns(&gatewayv1.HTTPRoute{}) + } else if !meta.IsNoMatchError(err) { + return fmt.Errorf("check HTTPRoute REST mapping: %w", err) + } + + return builder. Named("codercontrolplane"). Complete(r) } diff --git a/internal/controller/codercontrolplane_controller_test.go b/internal/controller/codercontrolplane_controller_test.go index 69d67986..f9de3c1d 100644 --- a/internal/controller/codercontrolplane_controller_test.go +++ b/internal/controller/codercontrolplane_controller_test.go @@ -3,19 +3,33 @@ package controller_test import ( "context" "errors" + "fmt" + "hash/fnv" "net/http" "reflect" "strings" + "sync" "testing" "time" "github.com/coder/coder/v2/codersdk" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" coderv1alpha1 "github.com/coder/coder-k8s/api/v1alpha1" "github.com/coder/coder-k8s/internal/coderbootstrap" @@ -110,6 +124,7 @@ func (f *fakeEntitlementsInspector) Entitlements(_ context.Context, coderURL, se } func TestReconcile_NotFound(t *testing.T) { + ensureGatewaySchemeRegistered(t) r := &controller.CoderControlPlaneReconciler{ Client: k8sClient, Scheme: scheme, @@ -130,6 +145,7 @@ func TestReconcile_NotFound(t *testing.T) { } func TestReconcile_ExistingResource(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() replicas := int32(2) @@ -185,7 +201,7 @@ func TestReconcile_ExistingResource(t *testing.T) { t.Fatalf("expected one container in deployment pod spec, got %d", len(deployment.Spec.Template.Spec.Containers)) } container := deployment.Spec.Template.Spec.Containers[0] - expectedArgs := []string{"--http-address=0.0.0.0:3000", "--prometheus-enable=false"} + expectedArgs := []string{"--http-address=0.0.0.0:8080", "--prometheus-enable=false"} if !reflect.DeepEqual(container.Args, expectedArgs) { t.Fatalf("expected container args %v, got %v", expectedArgs, container.Args) } @@ -200,6 +216,7 @@ func TestReconcile_ExistingResource(t *testing.T) { } func TestReconcile_StatusPersistence(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() replicas := int32(1) @@ -250,6 +267,7 @@ func TestReconcile_StatusPersistence(t *testing.T) { } func TestReconcile_OwnerReferences(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -288,6 +306,7 @@ func TestReconcile_OwnerReferences(t *testing.T) { } func TestReconcile_SpecUpdatePropagates(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() initialReplicas := int32(1) @@ -358,6 +377,7 @@ func TestReconcile_SpecUpdatePropagates(t *testing.T) { } func TestReconcile_PhaseTransitionToReady(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -417,6 +437,7 @@ func TestReconcile_PhaseTransitionToReady(t *testing.T) { } func TestReconcile_LicenseSecretRefNil_DoesNotUpload(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -481,6 +502,7 @@ func TestReconcile_LicenseSecretRefNil_DoesNotUpload(t *testing.T) { } func TestReconcile_LicensePendingUntilControlPlaneReady(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() licenseSecret := &corev1.Secret{ @@ -543,6 +565,7 @@ func TestReconcile_LicensePendingUntilControlPlaneReady(t *testing.T) { } func TestReconcile_LicenseAppliesOnceAndTracksHash(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() licenseSecret := &corev1.Secret{ @@ -636,7 +659,165 @@ func TestReconcile_LicenseAppliesOnceAndTracksHash(t *testing.T) { } } +func TestReconcile_LicenseUsesInternalHTTPURLWhenTLSEnabled(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + licenseSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test-license-tls-internal-url-secret", Namespace: "default"}, + Data: map[string][]byte{ + coderv1alpha1.DefaultLicenseSecretKey: []byte("license-jwt-tls-internal-url"), + }, + } + if err := k8sClient.Create(ctx, licenseSecret); err != nil { + t.Fatalf("create license secret: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, licenseSecret) + }) + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-license-tls-internal-url", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + ExtraEnv: []corev1.EnvVar{{ + Name: "CODER_PG_CONNECTION_URL", + Value: "postgres://example/license-tls-internal-url", + }}, + TLS: coderv1alpha1.TLSSpec{ + SecretNames: []string{"coder-internal-tls-secret"}, + }, + LicenseSecretRef: &coderv1alpha1.SecretKeySelector{Name: licenseSecret.Name}, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create test CoderControlPlane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + provisioner := &fakeOperatorAccessProvisioner{token: "operator-token-license-tls-url"} + uploader := &fakeLicenseUploader{} + r := &controller.CoderControlPlaneReconciler{ + Client: k8sClient, + Scheme: scheme, + OperatorAccessProvisioner: provisioner, + LicenseUploader: uploader, + } + + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("first reconcile control plane: %v", err) + } + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get reconciled deployment: %v", err) + } + deployment.Status.ReadyReplicas = 1 + deployment.Status.Replicas = 1 + if err := k8sClient.Status().Update(ctx, deployment); err != nil { + t.Fatalf("update deployment status: %v", err) + } + + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("second reconcile control plane: %v", err) + } + if len(uploader.calls) != 1 { + t.Fatalf("expected one license upload call, got %d", len(uploader.calls)) + } + if got := uploader.calls[0].coderURL; got != "http://test-license-tls-internal-url.default.svc.cluster.local:80" { + t.Fatalf("expected license upload URL %q, got %q", "http://test-license-tls-internal-url.default.svc.cluster.local:80", got) + } + + reconciled := &coderv1alpha1.CoderControlPlane{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, reconciled); err != nil { + t.Fatalf("get reconciled control plane: %v", err) + } + if !strings.HasPrefix(reconciled.Status.URL, "https://") { + t.Fatalf("expected status URL to remain https when TLS is enabled, got %q", reconciled.Status.URL) + } +} + +func TestReconcile_LicenseUsesInternalHTTPURLWhenTLSAndServicePort443(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + licenseSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test-license-tls-443-url-secret", Namespace: "default"}, + Data: map[string][]byte{ + coderv1alpha1.DefaultLicenseSecretKey: []byte("license-jwt-tls-443-url"), + }, + } + if err := k8sClient.Create(ctx, licenseSecret); err != nil { + t.Fatalf("create license secret: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, licenseSecret) + }) + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-license-tls-443-url", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + ExtraEnv: []corev1.EnvVar{{ + Name: "CODER_PG_CONNECTION_URL", + Value: "postgres://example/license-tls-443-url", + }}, + Service: coderv1alpha1.ServiceSpec{Port: 443}, + TLS: coderv1alpha1.TLSSpec{ + SecretNames: []string{"coder-internal-tls-443-secret"}, + }, + LicenseSecretRef: &coderv1alpha1.SecretKeySelector{Name: licenseSecret.Name}, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create test CoderControlPlane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + provisioner := &fakeOperatorAccessProvisioner{token: "operator-token-license-tls-443-url"} + uploader := &fakeLicenseUploader{} + r := &controller.CoderControlPlaneReconciler{ + Client: k8sClient, + Scheme: scheme, + OperatorAccessProvisioner: provisioner, + LicenseUploader: uploader, + } + + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("first reconcile control plane: %v", err) + } + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get reconciled deployment: %v", err) + } + deployment.Status.ReadyReplicas = 1 + deployment.Status.Replicas = 1 + if err := k8sClient.Status().Update(ctx, deployment); err != nil { + t.Fatalf("update deployment status: %v", err) + } + + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("second reconcile control plane: %v", err) + } + if len(uploader.calls) != 1 { + t.Fatalf("expected one license upload call, got %d", len(uploader.calls)) + } + if got := uploader.calls[0].coderURL; got != "http://test-license-tls-443-url.default.svc.cluster.local:80" { + t.Fatalf("expected license upload URL %q, got %q", "http://test-license-tls-443-url.default.svc.cluster.local:80", got) + } + + reconciled := &coderv1alpha1.CoderControlPlane{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, reconciled); err != nil { + t.Fatalf("get reconciled control plane: %v", err) + } + if !strings.HasPrefix(reconciled.Status.URL, "https://") { + t.Fatalf("expected status URL to remain https when TLS is enabled, got %q", reconciled.Status.URL) + } +} + func TestReconcile_LicenseReuploadsWhenBackendHasNoLicenses(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() licenseSecret := &corev1.Secret{ @@ -712,6 +893,7 @@ func TestReconcile_LicenseReuploadsWhenBackendHasNoLicenses(t *testing.T) { } func TestReconcile_LicenseRotationUploadsNewSecretValue(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() licenseSecret := &corev1.Secret{ @@ -811,6 +993,7 @@ func TestReconcile_LicenseRotationUploadsNewSecretValue(t *testing.T) { } func TestReconcile_LicenseRollbackDuplicateUploadConverges(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() licenseSecret := &corev1.Secret{ @@ -937,6 +1120,7 @@ func TestReconcile_LicenseRollbackDuplicateUploadConverges(t *testing.T) { } func TestReconcile_LicenseNotSupportedSetsConditionWithoutRequeue(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() licenseSecret := &corev1.Secret{ @@ -1022,6 +1206,7 @@ func TestReconcile_LicenseNotSupportedSetsConditionWithoutRequeue(t *testing.T) } func TestReconcile_DefaultsApplied(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1071,6 +1256,7 @@ func TestReconcile_DefaultsApplied(t *testing.T) { } func TestReconcile_DefaultOperatorAccess_MissingPostgresURL(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1122,6 +1308,7 @@ func TestReconcile_DefaultOperatorAccess_MissingPostgresURL(t *testing.T) { } func TestReconcile_OperatorAccess_Disabled(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1179,6 +1366,7 @@ func TestReconcile_OperatorAccess_Disabled(t *testing.T) { } func TestReconcile_OperatorAccess_Disabled_DoesNotDeleteUnmanagedSecret(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1248,6 +1436,7 @@ func TestReconcile_OperatorAccess_Disabled_DoesNotDeleteUnmanagedSecret(t *testi } func TestReconcile_OperatorAccess_Disabled_RevokesWithoutStatusOrManagedSecret(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1304,6 +1493,7 @@ func TestReconcile_OperatorAccess_Disabled_RevokesWithoutStatusOrManagedSecret(t } func TestReconcile_OperatorAccess_Disabled_RevokesTokenAndDeletesSecret(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1396,6 +1586,7 @@ func TestReconcile_OperatorAccess_Disabled_RevokesTokenAndDeletesSecret(t *testi } func TestReconcile_OperatorAccess_Disabled_RetriesRevocationAfterFailure(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1494,6 +1685,7 @@ func TestReconcile_OperatorAccess_Disabled_RetriesRevocationAfterFailure(t *test } func TestReconcile_OperatorAccess_MalformedManagedSecret_ReprovisionsToken(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1558,6 +1750,7 @@ func TestReconcile_OperatorAccess_MalformedManagedSecret_ReprovisionsToken(t *te } func TestReconcile_OperatorAccess_UsesDistinctTokenNamesPerControlPlane(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp1 := &coderv1alpha1.CoderControlPlane{ @@ -1624,6 +1817,7 @@ func TestReconcile_OperatorAccess_UsesDistinctTokenNamesPerControlPlane(t *testi } func TestReconcile_OperatorAccess_ResolvesLiteralPostgresURLAndCreatesTokenSecret(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() cp := &coderv1alpha1.CoderControlPlane{ @@ -1711,6 +1905,7 @@ func TestReconcile_OperatorAccess_ResolvesLiteralPostgresURLAndCreatesTokenSecre } func TestReconcile_OperatorAccess_ResolvesPostgresURLFromSecretRef(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() postgresURLSecret := &corev1.Secret{ @@ -1799,6 +1994,7 @@ func TestReconcile_OperatorAccess_ResolvesPostgresURLFromSecretRef(t *testing.T) } func TestReconcile_EntitlementsStatusFields(t *testing.T) { + ensureGatewaySchemeRegistered(t) ctx := context.Background() testCases := []struct { @@ -1946,6 +2142,2638 @@ func TestReconcile_EntitlementsStatusFields(t *testing.T) { } } +func TestReconcile_ServiceAccount(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + testCases := []struct { + name string + controlPlaneName string + serviceAccount coderv1alpha1.ServiceAccountSpec + expectedName string + expectCreated bool + expectedLabels map[string]string + expectedAnnotations map[string]string + }{ + { + name: "DefaultName", + controlPlaneName: "test-serviceaccount-default", + expectedName: "test-serviceaccount-default", + expectCreated: true, + }, + { + name: "CustomName", + controlPlaneName: "test-serviceaccount-custom", + serviceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: "custom-service-account", + }, + expectedName: "custom-service-account", + expectCreated: true, + }, + { + name: "CustomLabelsAndAnnotations", + controlPlaneName: "test-serviceaccount-metadata", + serviceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: "test-serviceaccount-metadata-sa", + Labels: map[string]string{ + "custom-label": "label-value", + }, + Annotations: map[string]string{ + "custom-annotation": "annotation-value", + }, + }, + expectedName: "test-serviceaccount-metadata-sa", + expectCreated: true, + expectedLabels: map[string]string{ + "custom-label": "label-value", + }, + expectedAnnotations: map[string]string{ + "custom-annotation": "annotation-value", + }, + }, + { + name: "CreationDisabled", + controlPlaneName: "test-serviceaccount-disabled", + serviceAccount: coderv1alpha1.ServiceAccountSpec{ + DisableCreate: true, + }, + expectedName: "test-serviceaccount-disabled", + expectCreated: false, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: testCase.controlPlaneName, Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-serviceaccount:latest", + ServiceAccount: testCase.serviceAccount, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + serviceAccount := &corev1.ServiceAccount{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: testCase.expectedName, Namespace: cp.Namespace}, serviceAccount) + if !testCase.expectCreated { + if !apierrors.IsNotFound(err) { + t.Fatalf("expected service account %s/%s to be absent, got error: %v", cp.Namespace, testCase.expectedName, err) + } + return + } + if err != nil { + t.Fatalf("get service account: %v", err) + } + + if serviceAccount.Name != testCase.expectedName { + t.Fatalf("expected service account name %q, got %q", testCase.expectedName, serviceAccount.Name) + } + if serviceAccount.Labels["app.kubernetes.io/name"] != "coder-control-plane" { + t.Fatalf("expected managed label app.kubernetes.io/name=coder-control-plane, got %q", serviceAccount.Labels["app.kubernetes.io/name"]) + } + if serviceAccount.Labels["app.kubernetes.io/instance"] != cp.Name { + t.Fatalf("expected managed label app.kubernetes.io/instance=%q, got %q", cp.Name, serviceAccount.Labels["app.kubernetes.io/instance"]) + } + if serviceAccount.Labels["app.kubernetes.io/managed-by"] != "coder-k8s" { + t.Fatalf("expected managed label app.kubernetes.io/managed-by=coder-k8s, got %q", serviceAccount.Labels["app.kubernetes.io/managed-by"]) + } + for key, value := range testCase.expectedLabels { + if serviceAccount.Labels[key] != value { + t.Fatalf("expected service account label %q=%q, got %q", key, value, serviceAccount.Labels[key]) + } + } + for key, value := range testCase.expectedAnnotations { + if serviceAccount.Annotations[key] != value { + t.Fatalf("expected service account annotation %q=%q, got %q", key, value, serviceAccount.Annotations[key]) + } + } + }) + } + + t.Run("DisableCreateDetachesManagedServiceAccount", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-serviceaccount-disable-detach", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-serviceaccount:latest", + ServiceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: "test-serviceaccount-disable-detach-sa", + Labels: map[string]string{ + "app.kubernetes.io/instance": "custom-instance-label", + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane before disabling service account creation: %v", err) + } + + serviceAccountName := cp.Spec.ServiceAccount.Name + serviceAccount := &corev1.ServiceAccount{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: cp.Namespace}, serviceAccount); err != nil { + t.Fatalf("get managed service account: %v", err) + } + if got := serviceAccount.Labels["app.kubernetes.io/instance"]; got != "custom-instance-label" { + t.Fatalf("expected reserved instance label override to be applied, got %q", got) + } + ownerReference := metav1.GetControllerOf(serviceAccount) + if ownerReference == nil || ownerReference.UID != cp.UID { + t.Fatalf("expected service account to be controller-owned before disableCreate=true, got %#v", ownerReference) + } + + latest := &coderv1alpha1.CoderControlPlane{} + if err := k8sClient.Get(ctx, namespacedName, latest); err != nil { + t.Fatalf("get latest control plane for disableCreate update: %v", err) + } + latest.Spec.ServiceAccount.DisableCreate = true + if err := k8sClient.Update(ctx, latest); err != nil { + t.Fatalf("update control plane to disable service account creation: %v", err) + } + + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane after disabling service account creation: %v", err) + } + + serviceAccount = &corev1.ServiceAccount{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: cp.Namespace}, serviceAccount); err != nil { + t.Fatalf("get service account after disableCreate=true: %v", err) + } + if ownerReference := metav1.GetControllerOf(serviceAccount); ownerReference != nil { + t.Fatalf("expected service account controller reference to be removed when disableCreate=true, got %#v", ownerReference) + } + }) +} + +func TestReconcile_WorkspaceRBAC(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + t.Run("RoleAndRoleBindingCreated", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-workspace-rbac-default", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-workspace-rbac:latest", + ServiceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: "test-workspace-rbac-default-sa", + }, + RBAC: coderv1alpha1.RBACSpec{ + WorkspacePerms: ptrTo(true), + EnableDeployments: ptrTo(true), + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + serviceAccountName := cp.Spec.ServiceAccount.Name + roleName := expectedWorkspaceRoleName(t, cp, serviceAccountName) + roleBindingName := expectedWorkspaceRoleBindingName(t, cp, serviceAccountName) + role := &rbacv1.Role{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: cp.Namespace}, role); err != nil { + t.Fatalf("get workspace role: %v", err) + } + if !roleContainsRuleForResource(role.Rules, "", "pods") { + t.Fatal("expected workspace role to include pods permissions") + } + if !roleContainsRuleForResource(role.Rules, "", "persistentvolumeclaims") { + t.Fatal("expected workspace role to include persistentvolumeclaims permissions") + } + if !roleContainsRuleForResource(role.Rules, "apps", "deployments") { + t.Fatal("expected workspace role to include deployments permissions") + } + + roleBinding := &rbacv1.RoleBinding{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: cp.Namespace}, roleBinding); err != nil { + t.Fatalf("get workspace role binding: %v", err) + } + if roleBinding.RoleRef.Kind != "Role" || roleBinding.RoleRef.Name != roleName { + t.Fatalf("expected role binding roleRef to Role %q, got %#v", roleName, roleBinding.RoleRef) + } + if len(roleBinding.Subjects) != 1 { + t.Fatalf("expected one role binding subject, got %d", len(roleBinding.Subjects)) + } + subject := roleBinding.Subjects[0] + if subject.Kind != rbacv1.ServiceAccountKind || subject.Name != serviceAccountName || subject.Namespace != cp.Namespace { + t.Fatalf("expected role binding service account subject %s/%s, got %#v", cp.Namespace, serviceAccountName, subject) + } + }) + + t.Run("LongServiceAccountNameKeepsRoleNameWithinKubernetesLimit", func(t *testing.T) { + serviceAccountName := strings.Repeat("a", 253) + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-workspace-rbac-long-role-name", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-workspace-rbac:latest", + ServiceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: serviceAccountName, + }, + RBAC: coderv1alpha1.RBACSpec{ + WorkspacePerms: ptrTo(true), + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane with long service account name: %v", err) + } + + roleBindingName := expectedWorkspaceRoleBindingName(t, cp, serviceAccountName) + roleBinding := &rbacv1.RoleBinding{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: cp.Namespace}, roleBinding); err != nil { + t.Fatalf("get workspace role binding: %v", err) + } + roleName := roleBinding.RoleRef.Name + expectedRoleName := expectedWorkspaceRoleName(t, cp, serviceAccountName) + if roleName != expectedRoleName { + t.Fatalf("expected workspace role name %q, got %q", expectedRoleName, roleName) + } + if len(roleName) > 253 { + t.Fatalf("expected workspace role name length <= 253, got %d (%q)", len(roleName), roleName) + } + if !strings.HasSuffix(roleName, "-workspace-perms") { + t.Fatalf("expected workspace role name to retain suffix %q, got %q", "-workspace-perms", roleName) + } + + role := &rbacv1.Role{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: cp.Namespace}, role); err != nil { + t.Fatalf("get truncated workspace role: %v", err) + } + if !roleContainsRuleForResource(role.Rules, "", "pods") { + t.Fatal("expected truncated workspace role to include pods permissions") + } + }) + + t.Run("DeploymentsRuleDisabled", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-workspace-rbac-no-deployments", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-workspace-rbac:latest", + ServiceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: "test-workspace-rbac-no-deployments-sa", + }, + RBAC: coderv1alpha1.RBACSpec{ + WorkspacePerms: ptrTo(true), + EnableDeployments: ptrTo(false), + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + roleName := expectedWorkspaceRoleName(t, cp, cp.Spec.ServiceAccount.Name) + role := &rbacv1.Role{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: cp.Namespace}, role); err != nil { + t.Fatalf("get workspace role: %v", err) + } + if roleContainsRuleForResource(role.Rules, "apps", "deployments") { + t.Fatal("expected workspace role deployments permissions to be omitted when enableDeployments=false") + } + }) + + t.Run("RBACDisabled", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-workspace-rbac-disabled", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-workspace-rbac:latest", + RBAC: coderv1alpha1.RBACSpec{ + WorkspacePerms: ptrTo(false), + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + roleName := expectedWorkspaceRoleName(t, cp, cp.Name) + roleBindingName := expectedWorkspaceRoleBindingName(t, cp, cp.Name) + role := &rbacv1.Role{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: cp.Namespace}, role) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected workspace role to be absent when RBAC is disabled, got error: %v", err) + } + + roleBinding := &rbacv1.RoleBinding{} + err = k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: cp.Namespace}, roleBinding) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected workspace role binding to be absent when RBAC is disabled, got error: %v", err) + } + }) + + t.Run("RBACDisabledCleansPreviouslyManagedRoles", func(t *testing.T) { + workspaceNamespace := "workspace-rbac-cleanup-disabled" + namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: workspaceNamespace}} + if err := k8sClient.Create(ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { + t.Fatalf("create workspace namespace: %v", err) + } + + serviceAccountName := "test-workspace-rbac-cleanup-disabled-sa" + cp := createCoderControlPlaneUnstructured(ctx, t, "test-workspace-rbac-cleanup-disabled", "default", map[string]any{ + "image": "test-workspace-rbac:latest", + "serviceAccount": map[string]any{ + "name": serviceAccountName, + }, + "rbac": map[string]any{ + "workspacePerms": true, + "enableDeployments": true, + "workspaceNamespaces": []any{workspaceNamespace}, + }, + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane before disable: %v", err) + } + + roleName := expectedWorkspaceRoleName(t, cp, serviceAccountName) + roleBindingName := expectedWorkspaceRoleBindingName(t, cp, serviceAccountName) + for _, namespaceName := range []string{cp.Namespace, workspaceNamespace} { + role := &rbacv1.Role{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: namespaceName}, role); err != nil { + t.Fatalf("expected workspace role %s/%s to exist before disabling RBAC: %v", namespaceName, roleName, err) + } + roleBinding := &rbacv1.RoleBinding{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: namespaceName}, roleBinding); err != nil { + t.Fatalf("expected workspace role binding %s/%s to exist before disabling RBAC: %v", namespaceName, roleBindingName, err) + } + } + + unstructuredCP := &unstructured.Unstructured{} + unstructuredCP.SetAPIVersion(coderv1alpha1.GroupVersion.String()) + unstructuredCP.SetKind("CoderControlPlane") + if err := k8sClient.Get(ctx, namespacedName, unstructuredCP); err != nil { + t.Fatalf("get unstructured control plane for RBAC disable update: %v", err) + } + if err := unstructured.SetNestedField(unstructuredCP.Object, false, "spec", "rbac", "workspacePerms"); err != nil { + t.Fatalf("set spec.rbac.workspacePerms=false: %v", err) + } + if err := k8sClient.Update(ctx, unstructuredCP); err != nil { + t.Fatalf("update control plane to disable workspace RBAC: %v", err) + } + + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane after disable: %v", err) + } + + for _, namespaceName := range []string{cp.Namespace, workspaceNamespace} { + role := &rbacv1.Role{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: namespaceName}, role) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected workspace role %s/%s to be removed after disabling RBAC, got: %v", namespaceName, roleName, err) + } + roleBinding := &rbacv1.RoleBinding{} + err = k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: namespaceName}, roleBinding) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected workspace role binding %s/%s to be removed after disabling RBAC, got: %v", namespaceName, roleBindingName, err) + } + } + }) + + t.Run("DeleteControlPlaneCleansCrossNamespaceRBAC", func(t *testing.T) { + workspaceNamespace := "workspace-rbac-cleanup-delete" + namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: workspaceNamespace}} + if err := k8sClient.Create(ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { + t.Fatalf("create workspace namespace: %v", err) + } + + serviceAccountName := "test-workspace-rbac-cleanup-delete-sa" + cp := createCoderControlPlaneUnstructured(ctx, t, "test-workspace-rbac-cleanup-delete", "default", map[string]any{ + "image": "test-workspace-rbac:latest", + "serviceAccount": map[string]any{ + "name": serviceAccountName, + }, + "rbac": map[string]any{ + "workspacePerms": true, + "enableDeployments": true, + "workspaceNamespaces": []any{workspaceNamespace}, + }, + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}) + if err != nil { + t.Fatalf("reconcile control plane before delete: %v", err) + } + if result.RequeueAfter <= 0 { + t.Fatalf("expected cross-namespace workspace RBAC to request periodic drift requeue, got %+v", result) + } + + roleName := expectedWorkspaceRoleName(t, cp, serviceAccountName) + roleBindingName := expectedWorkspaceRoleBindingName(t, cp, serviceAccountName) + role := &rbacv1.Role{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: workspaceNamespace}, role); err != nil { + t.Fatalf("expected cross-namespace role %s/%s before delete: %v", workspaceNamespace, roleName, err) + } + roleBinding := &rbacv1.RoleBinding{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: workspaceNamespace}, roleBinding); err != nil { + t.Fatalf("expected cross-namespace role binding %s/%s before delete: %v", workspaceNamespace, roleBindingName, err) + } + + if err := k8sClient.Delete(ctx, cp); err != nil { + t.Fatalf("delete control plane: %v", err) + } + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane deletion: %v", err) + } + + role = &rbacv1.Role{} + err = k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: workspaceNamespace}, role) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected cross-namespace role %s/%s to be removed after control plane deletion, got: %v", workspaceNamespace, roleName, err) + } + roleBinding = &rbacv1.RoleBinding{} + err = k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: workspaceNamespace}, roleBinding) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected cross-namespace role binding %s/%s to be removed after control plane deletion, got: %v", workspaceNamespace, roleBindingName, err) + } + }) + + t.Run("DeleteControlPlaneWithWhitespaceServiceAccountNameStillFinalizes", func(t *testing.T) { + workspaceNamespace := "workspace-rbac-finalizer-invalid-sa" + namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: workspaceNamespace}} + if err := k8sClient.Create(ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { + t.Fatalf("create workspace namespace: %v", err) + } + + serviceAccountName := "test-workspace-rbac-finalizer-invalid-sa" + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-workspace-rbac-finalizer-invalid-sa", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-workspace-rbac:latest", + ServiceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: serviceAccountName, + }, + RBAC: coderv1alpha1.RBACSpec{ + WorkspacePerms: ptrTo(true), + WorkspaceNamespaces: []string{workspaceNamespace}, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(context.Background(), cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane before invalidating service account name: %v", err) + } + + roleName := expectedWorkspaceRoleName(t, cp, serviceAccountName) + roleBindingName := expectedWorkspaceRoleBindingName(t, cp, serviceAccountName) + role := &rbacv1.Role{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: workspaceNamespace}, role); err != nil { + t.Fatalf("expected cross-namespace role %s/%s before delete: %v", workspaceNamespace, roleName, err) + } + roleBinding := &rbacv1.RoleBinding{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: workspaceNamespace}, roleBinding); err != nil { + t.Fatalf("expected cross-namespace role binding %s/%s before delete: %v", workspaceNamespace, roleBindingName, err) + } + + latest := &coderv1alpha1.CoderControlPlane{} + if err := k8sClient.Get(ctx, namespacedName, latest); err != nil { + t.Fatalf("get control plane before invalid service account update: %v", err) + } + latest.Spec.ServiceAccount.Name = " " + if err := k8sClient.Update(ctx, latest); err != nil { + t.Fatalf("update control plane with whitespace service account name: %v", err) + } + + if err := k8sClient.Delete(ctx, latest); err != nil { + t.Fatalf("delete control plane: %v", err) + } + + for i := 0; i < 3; i++ { + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane deletion with invalid service account name: %v", err) + } + + current := &coderv1alpha1.CoderControlPlane{} + err := k8sClient.Get(ctx, namespacedName, current) + if apierrors.IsNotFound(err) { + break + } + if err != nil { + t.Fatalf("get control plane after deletion reconcile: %v", err) + } + if i == 2 { + t.Fatalf("expected control plane to be finalized and deleted, finalizers=%v", current.Finalizers) + } + } + + role = &rbacv1.Role{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: workspaceNamespace}, role) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected cross-namespace role %s/%s to be removed after control plane deletion, got: %v", workspaceNamespace, roleName, err) + } + roleBinding = &rbacv1.RoleBinding{} + err = k8sClient.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: workspaceNamespace}, roleBinding) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected cross-namespace role binding %s/%s to be removed after control plane deletion, got: %v", workspaceNamespace, roleBindingName, err) + } + }) + + t.Run("RBACCleanupPreservesUnmanagedLabeledResources", func(t *testing.T) { + workspaceNamespace := "workspace-rbac-preserve-unmanaged" + namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: workspaceNamespace}} + if err := k8sClient.Create(ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { + t.Fatalf("create workspace namespace: %v", err) + } + + serviceAccountName := "test-workspace-rbac-preserve-unmanaged-sa" + cp := createCoderControlPlaneUnstructured(ctx, t, "test-workspace-rbac-preserve-unmanaged", "default", map[string]any{ + "image": "test-workspace-rbac:latest", + "serviceAccount": map[string]any{ + "name": serviceAccountName, + }, + "rbac": map[string]any{ + "workspacePerms": true, + "enableDeployments": true, + "workspaceNamespaces": []any{workspaceNamespace}, + }, + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane before creating unmanaged RBAC: %v", err) + } + + workspaceLabels := map[string]string{ + "app.kubernetes.io/name": "coder-control-plane", + "app.kubernetes.io/instance": cp.Name, + "app.kubernetes.io/managed-by": "coder-k8s", + "coder.com/control-plane": cp.Name, + "coder.com/control-plane-namespace": cp.Namespace, + } + manualRoleName := "manual-external-workspace-role" + manualRole := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: manualRoleName, + Namespace: workspaceNamespace, + Labels: workspaceLabels, + }, + Rules: []rbacv1.PolicyRule{{ + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get"}, + }}, + } + if err := k8sClient.Create(ctx, manualRole); err != nil { + t.Fatalf("create unmanaged role with matching labels: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(context.Background(), manualRole) + }) + + manualRoleBindingName := "manual-external-workspace-rolebinding" + manualRoleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: manualRoleBindingName, + Namespace: workspaceNamespace, + Labels: workspaceLabels, + }, + RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "Role", Name: manualRoleName}, + Subjects: []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Name: serviceAccountName, + Namespace: cp.Namespace, + }}, + } + if err := k8sClient.Create(ctx, manualRoleBinding); err != nil { + t.Fatalf("create unmanaged role binding with matching labels: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(context.Background(), manualRoleBinding) + }) + + unstructuredCP := &unstructured.Unstructured{} + unstructuredCP.SetAPIVersion(coderv1alpha1.GroupVersion.String()) + unstructuredCP.SetKind("CoderControlPlane") + if err := k8sClient.Get(ctx, namespacedName, unstructuredCP); err != nil { + t.Fatalf("get unstructured control plane for RBAC disable update: %v", err) + } + if err := unstructured.SetNestedField(unstructuredCP.Object, false, "spec", "rbac", "workspacePerms"); err != nil { + t.Fatalf("set spec.rbac.workspacePerms=false: %v", err) + } + if err := k8sClient.Update(ctx, unstructuredCP); err != nil { + t.Fatalf("update control plane to disable workspace RBAC: %v", err) + } + + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane after disabling workspace RBAC: %v", err) + } + + manualRole = &rbacv1.Role{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: manualRoleName, Namespace: workspaceNamespace}, manualRole); err != nil { + t.Fatalf("expected unmanaged role with matching labels to be preserved, got: %v", err) + } + manualRoleBinding = &rbacv1.RoleBinding{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: manualRoleBindingName, Namespace: workspaceNamespace}, manualRoleBinding); err != nil { + t.Fatalf("expected unmanaged role binding with matching labels to be preserved, got: %v", err) + } + }) + + t.Run("ExtraRulesAppended", func(t *testing.T) { + extraRule := rbacv1.PolicyRule{ + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list"}, + } + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-workspace-rbac-extra-rules", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-workspace-rbac:latest", + ServiceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: "test-workspace-rbac-extra-rules-sa", + }, + RBAC: coderv1alpha1.RBACSpec{ + WorkspacePerms: ptrTo(true), + EnableDeployments: ptrTo(true), + ExtraRules: []rbacv1.PolicyRule{extraRule}, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + roleName := expectedWorkspaceRoleName(t, cp, cp.Spec.ServiceAccount.Name) + role := &rbacv1.Role{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: roleName, Namespace: cp.Namespace}, role); err != nil { + t.Fatalf("get workspace role: %v", err) + } + if !roleContainsRuleForResource(role.Rules, "", "configmaps") { + t.Fatal("expected workspace role to include extra configmaps rule") + } + }) +} + +func TestReconcile_DeploymentAlignment(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + t.Run("PortAndHAEnvAndDefaultAccessURL", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-default", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + ServiceAccount: coderv1alpha1.ServiceAccountSpec{ + Name: "test-deployment-alignment-sa", + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + if len(deployment.Spec.Template.Spec.Containers) != 1 { + t.Fatalf("expected one deployment container, got %d", len(deployment.Spec.Template.Spec.Containers)) + } + container := deployment.Spec.Template.Spec.Containers[0] + if len(container.Args) == 0 || container.Args[0] != "--http-address=0.0.0.0:8080" { + t.Fatalf("expected deployment arg --http-address=0.0.0.0:8080, got %v", container.Args) + } + if !containerHasPort(container, "http", 8080) { + t.Fatalf("expected deployment container to expose http port 8080, got %+v", container.Ports) + } + + kubePodIPEnv := mustFindEnvVar(t, container.Env, "KUBE_POD_IP") + if kubePodIPEnv.ValueFrom == nil || kubePodIPEnv.ValueFrom.FieldRef == nil || kubePodIPEnv.ValueFrom.FieldRef.FieldPath != "status.podIP" { + t.Fatalf("expected KUBE_POD_IP fieldRef status.podIP, got %#v", kubePodIPEnv.ValueFrom) + } + if got := mustFindEnvVar(t, container.Env, "CODER_DERP_SERVER_RELAY_URL").Value; got != "http://$(KUBE_POD_IP):8080" { + t.Fatalf("expected CODER_DERP_SERVER_RELAY_URL %q, got %q", "http://$(KUBE_POD_IP):8080", got) + } + expectedAccessURL := "http://" + cp.Name + "." + cp.Namespace + ".svc.cluster.local" + if got := mustFindEnvVar(t, container.Env, "CODER_ACCESS_URL").Value; got != expectedAccessURL { + t.Fatalf("expected default CODER_ACCESS_URL %q, got %q", expectedAccessURL, got) + } + + if deployment.Spec.Template.Spec.ServiceAccountName != cp.Spec.ServiceAccount.Name { + t.Fatalf("expected pod serviceAccountName %q, got %q", cp.Spec.ServiceAccount.Name, deployment.Spec.Template.Spec.ServiceAccountName) + } + }) + + t.Run("DefaultAccessURLIncludesCustomServicePort", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-custom-service-port", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + Service: coderv1alpha1.ServiceSpec{ + Port: 8080, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + expectedAccessURL := "http://" + cp.Name + "." + cp.Namespace + ".svc.cluster.local:8080" + if got := mustFindEnvVar(t, container.Env, "CODER_ACCESS_URL").Value; got != expectedAccessURL { + t.Fatalf("expected default CODER_ACCESS_URL %q, got %q", expectedAccessURL, got) + } + }) + + t.Run("UserDefinedAccessURLTakesPrecedence", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-custom-access-url", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + ExtraEnv: []corev1.EnvVar{{ + Name: "CODER_ACCESS_URL", + Value: "https://coder.example.com", + }}, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if countEnvVar(container.Env, "CODER_ACCESS_URL") != 1 { + t.Fatalf("expected exactly one CODER_ACCESS_URL env var, got %d", countEnvVar(container.Env, "CODER_ACCESS_URL")) + } + if got := mustFindEnvVar(t, container.Env, "CODER_ACCESS_URL").Value; got != "https://coder.example.com" { + t.Fatalf("expected user-defined CODER_ACCESS_URL to win, got %q", got) + } + }) + + t.Run("EnvFromAccessURLTakesPrecedence", func(t *testing.T) { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-access-url", Namespace: "default"}, + Data: map[string]string{ + "CODER_ACCESS_URL": "https://envfrom.example.test", + }, + } + if err := k8sClient.Create(ctx, configMap); err != nil { + t.Fatalf("create configmap: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, configMap) + }) + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-access-url", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMap.Name}, + }, + }}, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if countEnvVar(container.Env, "CODER_ACCESS_URL") != 0 { + t.Fatalf("expected operator to skip default CODER_ACCESS_URL when envFrom is configured, got %v", container.Env) + } + }) + + t.Run("EnvFromWithoutAccessURLKeepsDefault", func(t *testing.T) { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-unrelated", Namespace: "default"}, + Data: map[string]string{ + "UNRELATED_ENV": "value", + }, + } + if err := k8sClient.Create(ctx, configMap); err != nil { + t.Fatalf("create configmap: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, configMap) + }) + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-unrelated", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMap.Name}, + }, + }}, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, APIReader: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if countEnvVar(container.Env, "CODER_ACCESS_URL") != 1 { + t.Fatalf("expected exactly one default CODER_ACCESS_URL env var, got %d", countEnvVar(container.Env, "CODER_ACCESS_URL")) + } + expectedAccessURL := "http://" + cp.Name + "." + cp.Namespace + ".svc.cluster.local" + if got := mustFindEnvVar(t, container.Env, "CODER_ACCESS_URL").Value; got != expectedAccessURL { + t.Fatalf("expected default CODER_ACCESS_URL %q, got %q", expectedAccessURL, got) + } + }) + + t.Run("OptionalMissingEnvFromKeepsDefault", func(t *testing.T) { + optional := true + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-optional-missing", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "missing-optional-envfrom"}, + Optional: &optional, + }, + }}, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, APIReader: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + expectedAccessURL := "http://" + cp.Name + "." + cp.Namespace + ".svc.cluster.local" + if got := mustFindEnvVar(t, container.Env, "CODER_ACCESS_URL").Value; got != expectedAccessURL { + t.Fatalf("expected default CODER_ACCESS_URL %q, got %q", expectedAccessURL, got) + } + }) + + t.Run("EnvFromPrefixEqualToAccessURLKeepsDefault", func(t *testing.T) { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-prefix-equal", Namespace: "default"}, + Data: map[string]string{ + "UNRELATED_ENV": "value", + }, + } + if err := k8sClient.Create(ctx, configMap); err != nil { + t.Fatalf("create configmap: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, configMap) + }) + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-prefix-equal", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + EnvFrom: []corev1.EnvFromSource{{ + Prefix: "CODER_ACCESS_URL", + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMap.Name}, + }, + }}, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, APIReader: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + expectedAccessURL := "http://" + cp.Name + "." + cp.Namespace + ".svc.cluster.local" + if got := mustFindEnvVar(t, container.Env, "CODER_ACCESS_URL").Value; got != expectedAccessURL { + t.Fatalf("expected default CODER_ACCESS_URL %q, got %q", expectedAccessURL, got) + } + }) + + t.Run("EnvFromBinaryDataDoesNotSuppressDefault", func(t *testing.T) { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-binary", Namespace: "default"}, + BinaryData: map[string][]byte{ + "CODER_ACCESS_URL": []byte("https://binary-data.example.test"), + }, + } + if err := k8sClient.Create(ctx, configMap); err != nil { + t.Fatalf("create configmap: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, configMap) + }) + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-binary", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMap.Name}, + }, + }}, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, APIReader: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + expectedAccessURL := "http://" + cp.Name + "." + cp.Namespace + ".svc.cluster.local" + if got := mustFindEnvVar(t, container.Env, "CODER_ACCESS_URL").Value; got != expectedAccessURL { + t.Fatalf("expected default CODER_ACCESS_URL %q, got %q", expectedAccessURL, got) + } + }) + + t.Run("EnvFromWithConfigMapAndSecretRefsIsRejectedAtAdmission", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-envfrom-both-refs", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "both-refs-configmap"}, + }, + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "both-refs-secret"}, + }, + }}, + }, + } + err := k8sClient.Create(ctx, cp) + if err == nil { + t.Fatal("expected create to fail when envFrom entry sets both configMapRef and secretRef") + } + if !apierrors.IsInvalid(err) { + t.Fatalf("expected invalid error for envFrom entry with both refs, got: %v", err) + } + if !strings.Contains(err.Error(), "envFrom") { + t.Fatalf("expected envFrom validation error, got: %v", err) + } + }) + + t.Run("ResourcesAndSecurityContextsApplied", func(t *testing.T) { + runAsUser := int64(1001) + allowPrivilegeEscalation := false + fsGroup := int64(2001) + resources := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resourceMustParse(t, "250m"), + corev1.ResourceMemory: resourceMustParse(t, "128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resourceMustParse(t, "500m"), + corev1.ResourceMemory: resourceMustParse(t, "256Mi"), + }, + } + securityContext := &corev1.SecurityContext{ + RunAsUser: &runAsUser, + AllowPrivilegeEscalation: &allowPrivilegeEscalation, + } + podSecurityContext := &corev1.PodSecurityContext{FSGroup: &fsGroup} + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment-alignment-security", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-deployment-alignment:latest", + Resources: resources, + SecurityContext: securityContext, + PodSecurityContext: podSecurityContext, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if !reflect.DeepEqual(container.Resources, *resources) { + t.Fatalf("expected container resources %#v, got %#v", *resources, container.Resources) + } + if !reflect.DeepEqual(container.SecurityContext, securityContext) { + t.Fatalf("expected container security context %#v, got %#v", securityContext, container.SecurityContext) + } + if !reflect.DeepEqual(deployment.Spec.Template.Spec.SecurityContext, podSecurityContext) { + t.Fatalf("expected pod security context %#v, got %#v", podSecurityContext, deployment.Spec.Template.Spec.SecurityContext) + } + }) +} + +func TestReconcile_ProbeConfiguration(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + t.Run("ReadinessEnabledByDefault", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-probe-defaults", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-probes:latest", + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if container.ReadinessProbe == nil { + t.Fatal("expected readiness probe to be configured by default") + } + if container.ReadinessProbe.HTTPGet == nil { + t.Fatal("expected readiness probe to use HTTP GET") + } + if container.ReadinessProbe.HTTPGet.Path != "/healthz" { + t.Fatalf("expected readiness probe path %q, got %q", "/healthz", container.ReadinessProbe.HTTPGet.Path) + } + if container.ReadinessProbe.HTTPGet.Port != intstr.FromString("http") { + t.Fatalf("expected readiness probe port name %q, got %#v", "http", container.ReadinessProbe.HTTPGet.Port) + } + if container.ReadinessProbe.PeriodSeconds != 10 { + t.Fatalf("expected readiness probe default periodSeconds=10, got %d", container.ReadinessProbe.PeriodSeconds) + } + if container.ReadinessProbe.TimeoutSeconds != 1 { + t.Fatalf("expected readiness probe default timeoutSeconds=1, got %d", container.ReadinessProbe.TimeoutSeconds) + } + if container.ReadinessProbe.SuccessThreshold != 1 { + t.Fatalf("expected readiness probe default successThreshold=1, got %d", container.ReadinessProbe.SuccessThreshold) + } + if container.ReadinessProbe.FailureThreshold != 3 { + t.Fatalf("expected readiness probe default failureThreshold=3, got %d", container.ReadinessProbe.FailureThreshold) + } + }) + + t.Run("LivenessProbeDisabled", func(t *testing.T) { + cp := createCoderControlPlaneUnstructured(ctx, t, "test-probe-liveness-disabled", "default", map[string]any{ + "image": "test-probes:latest", + "livenessProbe": map[string]any{ + "enabled": false, + }, + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if container.LivenessProbe != nil { + t.Fatalf("expected liveness probe to be disabled, got %#v", container.LivenessProbe) + } + }) + + t.Run("LivenessProbeEmptyObjectRemainsDisabled", func(t *testing.T) { + cp := createCoderControlPlaneUnstructured(ctx, t, "test-probe-liveness-empty-object", "default", map[string]any{ + "image": "test-probes:latest", + "livenessProbe": map[string]any{}, + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if container.LivenessProbe != nil { + t.Fatalf("expected liveness probe to stay disabled for empty object, got %#v", container.LivenessProbe) + } + }) + + t.Run("BothProbesEnabledWithCustomTiming", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-probe-custom", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-probes:latest", + ReadinessProbe: coderv1alpha1.ProbeSpec{ + Enabled: ptrTo(true), + InitialDelaySeconds: 3, + PeriodSeconds: ptrTo(int32(7)), + TimeoutSeconds: ptrTo(int32(2)), + SuccessThreshold: ptrTo(int32(2)), + FailureThreshold: ptrTo(int32(5)), + }, + LivenessProbe: coderv1alpha1.ProbeSpec{ + Enabled: ptrTo(true), + InitialDelaySeconds: 11, + PeriodSeconds: ptrTo(int32(13)), + TimeoutSeconds: ptrTo(int32(4)), + SuccessThreshold: ptrTo(int32(1)), + FailureThreshold: ptrTo(int32(6)), + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if container.ReadinessProbe == nil || container.LivenessProbe == nil { + t.Fatalf("expected both probes to be configured, got readiness=%#v liveness=%#v", container.ReadinessProbe, container.LivenessProbe) + } + if container.ReadinessProbe.InitialDelaySeconds != 3 || container.ReadinessProbe.PeriodSeconds != 7 || container.ReadinessProbe.TimeoutSeconds != 2 || container.ReadinessProbe.SuccessThreshold != 2 || container.ReadinessProbe.FailureThreshold != 5 { + t.Fatalf("unexpected readiness probe settings: %#v", container.ReadinessProbe) + } + if container.LivenessProbe.InitialDelaySeconds != 11 || container.LivenessProbe.PeriodSeconds != 13 || container.LivenessProbe.TimeoutSeconds != 4 || container.LivenessProbe.SuccessThreshold != 1 || container.LivenessProbe.FailureThreshold != 6 { + t.Fatalf("unexpected liveness probe settings: %#v", container.LivenessProbe) + } + }) +} + +func TestReconcile_TLSAlignment(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-tls-alignment", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-tls:latest", + TLS: coderv1alpha1.TLSSpec{ + SecretNames: []string{"my-tls"}, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + if got := mustFindEnvVar(t, container.Env, "CODER_TLS_ENABLE").Value; got != "true" { + t.Fatalf("expected CODER_TLS_ENABLE=true, got %q", got) + } + if got := mustFindEnvVar(t, container.Env, "CODER_TLS_ADDRESS").Value; got != "0.0.0.0:8443" { + t.Fatalf("expected CODER_TLS_ADDRESS=0.0.0.0:8443, got %q", got) + } + if got := mustFindEnvVar(t, container.Env, "CODER_TLS_CERT_FILE").Value; got != "/etc/ssl/certs/coder/my-tls/tls.crt" { + t.Fatalf("expected CODER_TLS_CERT_FILE for my-tls secret, got %q", got) + } + if got := mustFindEnvVar(t, container.Env, "CODER_TLS_KEY_FILE").Value; got != "/etc/ssl/certs/coder/my-tls/tls.key" { + t.Fatalf("expected CODER_TLS_KEY_FILE for my-tls secret, got %q", got) + } + if !containerHasPort(container, "https", 8443) { + t.Fatalf("expected deployment container to expose https port 8443, got %+v", container.Ports) + } + if !podHasSecretVolume(deployment.Spec.Template.Spec, "tls-my-tls", "my-tls") { + t.Fatalf("expected pod volume tls-my-tls to mount secret my-tls, got %+v", deployment.Spec.Template.Spec.Volumes) + } + if !containerHasVolumeMount(container, "tls-my-tls", "/etc/ssl/certs/coder/my-tls") { + t.Fatalf("expected container volume mount tls-my-tls at /etc/ssl/certs/coder/my-tls, got %+v", container.VolumeMounts) + } + if got := mustFindEnvVar(t, container.Env, "CODER_ACCESS_URL").Value; got != "https://test-tls-alignment.default.svc.cluster.local" { + t.Fatalf("expected default CODER_ACCESS_URL to use https, got %q", got) + } + + service := &corev1.Service{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, service); err != nil { + t.Fatalf("get service: %v", err) + } + if !serviceHasPort(service.Spec.Ports, "https", 443) { + t.Fatalf("expected service https port 443, got %+v", service.Spec.Ports) + } + + reconciled := &coderv1alpha1.CoderControlPlane{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, reconciled); err != nil { + t.Fatalf("get reconciled control plane: %v", err) + } + expectedStatusURL := "https://" + cp.Name + "." + cp.Namespace + ".svc.cluster.local:443" + if reconciled.Status.URL != expectedStatusURL { + t.Fatalf("expected status URL %q when TLS is enabled, got %q", expectedStatusURL, reconciled.Status.URL) + } +} + +func TestReconcile_TLSDeduplicatesSecretNames(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-tls-secret-dedup", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-tls-dedup:latest", + TLS: coderv1alpha1.TLSSpec{ + SecretNames: []string{"my-tls-dedup", "my-tls-dedup"}, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + + podSpec := deployment.Spec.Template.Spec + tlsVolumeName := secretVolumeName(podSpec, "my-tls-dedup") + if tlsVolumeName == "" { + t.Fatalf("expected TLS volume for secret my-tls-dedup, got %+v", podSpec.Volumes) + } + volumeCount := 0 + for _, volume := range podSpec.Volumes { + if volume.Name == tlsVolumeName { + volumeCount++ + } + } + if volumeCount != 1 { + t.Fatalf("expected exactly one TLS volume named %q, got %+v", tlsVolumeName, podSpec.Volumes) + } + + container := podSpec.Containers[0] + mountCount := 0 + for _, mount := range container.VolumeMounts { + if mount.Name == tlsVolumeName { + mountCount++ + } + } + if mountCount != 1 { + t.Fatalf("expected exactly one TLS volume mount named %q, got %+v", tlsVolumeName, container.VolumeMounts) + } + + if got := mustFindEnvVar(t, container.Env, "CODER_TLS_CERT_FILE").Value; got != "/etc/ssl/certs/coder/my-tls-dedup/tls.crt" { + t.Fatalf("expected CODER_TLS_CERT_FILE for my-tls-dedup secret, got %q", got) + } + if got := mustFindEnvVar(t, container.Env, "CODER_TLS_KEY_FILE").Value; got != "/etc/ssl/certs/coder/my-tls-dedup/tls.key" { + t.Fatalf("expected CODER_TLS_KEY_FILE for my-tls-dedup secret, got %q", got) + } +} + +func TestReconcile_TLSWithServicePort443(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-tls-service-port-443", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-tls-443:latest", + Service: coderv1alpha1.ServiceSpec{ + Port: 443, + }, + TLS: coderv1alpha1.TLSSpec{ + SecretNames: []string{"my-tls-443"}, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + service := &corev1.Service{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, service); err != nil { + t.Fatalf("get service: %v", err) + } + if len(service.Spec.Ports) != 2 { + t.Fatalf("expected two service ports when service.port=443 and TLS is enabled, got %+v", service.Spec.Ports) + } + if !serviceHasPort(service.Spec.Ports, "https", 443) { + t.Fatalf("expected TLS-enabled service to expose https port 443, got %+v", service.Spec.Ports) + } + if !serviceHasPort(service.Spec.Ports, "http", 80) { + t.Fatalf("expected TLS-enabled service on port 443 to also expose http port 80, got %+v", service.Spec.Ports) + } + for _, port := range service.Spec.Ports { + if port.Name == "https" && port.TargetPort != intstr.FromInt(8443) { + t.Fatalf("expected https service port target 8443, got %+v", port.TargetPort) + } + if port.Name == "http" && port.TargetPort != intstr.FromInt(8080) { + t.Fatalf("expected http service port target 8080, got %+v", port.TargetPort) + } + } +} + +func TestReconcile_TLSAndCertSecretVolumeNameSanitization(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-tls-cert-volume-sanitization", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-tls-sanitization:latest", + TLS: coderv1alpha1.TLSSpec{ + SecretNames: []string{"my.tls.secret"}, + }, + Certs: coderv1alpha1.CertsSpec{ + Secrets: []coderv1alpha1.CertSecretSelector{{ + Name: "extra.ca.secret", + Key: "ca.crt", + }}, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + podSpec := deployment.Spec.Template.Spec + container := podSpec.Containers[0] + + tlsVolumeName := secretVolumeName(podSpec, "my.tls.secret") + if tlsVolumeName == "" { + t.Fatalf("expected TLS volume for dotted secret, got %+v", podSpec.Volumes) + } + if !strings.HasPrefix(tlsVolumeName, "tls-my-tls-secret") { + t.Fatalf("expected TLS volume name to start with %q, got %q", "tls-my-tls-secret", tlsVolumeName) + } + if !containerHasVolumeMount(container, tlsVolumeName, "/etc/ssl/certs/coder/my.tls.secret") { + t.Fatalf("expected TLS volume mount name %q for dotted secret, got %+v", tlsVolumeName, container.VolumeMounts) + } + + certVolumeName := secretVolumeName(podSpec, "extra.ca.secret") + if certVolumeName == "" { + t.Fatalf("expected cert volume for dotted secret, got %+v", podSpec.Volumes) + } + if !strings.HasPrefix(certVolumeName, "ca-cert-extra-ca-secret") { + t.Fatalf("expected cert volume name to start with %q, got %q", "ca-cert-extra-ca-secret", certVolumeName) + } + if !containerHasVolumeMount(container, certVolumeName, "/etc/ssl/certs/extra.ca.secret.crt") { + t.Fatalf("expected cert volume mount name %q for dotted secret, got %+v", certVolumeName, container.VolumeMounts) + } +} + +func TestReconcile_CertSecretsWithSharedSecretNameReuseVolume(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cert-secret-shared-volume", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-certs-shared-volume:latest", + Certs: coderv1alpha1.CertsSpec{ + Secrets: []coderv1alpha1.CertSecretSelector{ + {Name: "shared.ca.secret", Key: "ca.crt"}, + {Name: "shared.ca.secret", Key: "alt.crt"}, + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + podSpec := deployment.Spec.Template.Spec + container := podSpec.Containers[0] + + certVolumeName := "" + certVolumeCount := 0 + for _, volume := range podSpec.Volumes { + if volume.Secret == nil || volume.Secret.SecretName != "shared.ca.secret" { + continue + } + certVolumeCount++ + certVolumeName = volume.Name + } + if certVolumeCount != 1 { + t.Fatalf("expected exactly one cert volume for shared secret, got %d volumes: %+v", certVolumeCount, podSpec.Volumes) + } + + expectedMountBySubPath := map[string]string{ + "ca.crt": "/etc/ssl/certs/shared.ca.secret-ca.crt", + "alt.crt": "/etc/ssl/certs/shared.ca.secret-alt.crt", + } + mountCount := 0 + for _, mount := range container.VolumeMounts { + if mount.Name != certVolumeName { + continue + } + expectedPath, ok := expectedMountBySubPath[mount.SubPath] + if !ok { + t.Fatalf("unexpected cert subPath %q for mount %#v", mount.SubPath, mount) + } + if mount.MountPath != expectedPath { + t.Fatalf("expected mount path %q for subPath %q, got %q", expectedPath, mount.SubPath, mount.MountPath) + } + mountCount++ + } + if mountCount != len(expectedMountBySubPath) { + t.Fatalf("expected %d cert volume mounts for shared secret, got %d mounts: %+v", len(expectedMountBySubPath), mountCount, container.VolumeMounts) + } +} + +func TestReconcile_CertSecretMountFileNormalizationAvoidsPathCollisions(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cert-normalized-mount-collision", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-certs-mount-collision:latest", + Certs: coderv1alpha1.CertsSpec{ + Secrets: []coderv1alpha1.CertSecretSelector{ + {Name: "foo", Key: "ca.crt"}, + {Name: "foo.crt", Key: "ca.crt"}, + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + container := deployment.Spec.Template.Spec.Containers[0] + + mountPaths := map[string]struct{}{} + for _, mount := range container.VolumeMounts { + if !strings.HasPrefix(mount.MountPath, "/etc/ssl/certs/foo") { + continue + } + mountPaths[mount.MountPath] = struct{}{} + } + if len(mountPaths) != 2 { + t.Fatalf("expected two unique normalized cert mount paths, got %d: %#v", len(mountPaths), mountPaths) + } + if _, ok := mountPaths["/etc/ssl/certs/foo.crt"]; !ok { + t.Fatalf("expected normalized mount path %q, got %#v", "/etc/ssl/certs/foo.crt", mountPaths) + } + if _, ok := mountPaths["/etc/ssl/certs/foo-2.crt"]; !ok { + t.Fatalf("expected deduplicated mount path %q, got %#v", "/etc/ssl/certs/foo-2.crt", mountPaths) + } +} + +func TestReconcile_PassThroughConfiguration(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + affinity := &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{{ + Key: "kubernetes.io/os", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"linux"}, + }}, + }, + }, + }, + }, + } + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pass-through", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-pass-through:latest", + Volumes: []corev1.Volume{{ + Name: "extra-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "extra-volume", + MountPath: "/var/lib/coder-extra", + }}, + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "coder-extra-env"}, + }, + }}, + NodeSelector: map[string]string{"topology.kubernetes.io/region": "us-west"}, + Tolerations: []corev1.Toleration{{ + Key: "dedicated", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }}, + Affinity: affinity, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, deployment); err != nil { + t.Fatalf("get deployment: %v", err) + } + podSpec := deployment.Spec.Template.Spec + container := podSpec.Containers[0] + + if !reflect.DeepEqual(container.EnvFrom, cp.Spec.EnvFrom) { + t.Fatalf("expected container EnvFrom %#v, got %#v", cp.Spec.EnvFrom, container.EnvFrom) + } + if !containerHasVolumeMount(container, "extra-volume", "/var/lib/coder-extra") { + t.Fatalf("expected container to include extra volume mount, got %+v", container.VolumeMounts) + } + if !podHasVolume(podSpec, "extra-volume") { + t.Fatalf("expected pod to include extra volume, got %+v", podSpec.Volumes) + } + if !reflect.DeepEqual(podSpec.NodeSelector, cp.Spec.NodeSelector) { + t.Fatalf("expected pod node selector %#v, got %#v", cp.Spec.NodeSelector, podSpec.NodeSelector) + } + if !reflect.DeepEqual(podSpec.Tolerations, cp.Spec.Tolerations) { + t.Fatalf("expected pod tolerations %#v, got %#v", cp.Spec.Tolerations, podSpec.Tolerations) + } + if !reflect.DeepEqual(podSpec.Affinity, cp.Spec.Affinity) { + t.Fatalf("expected pod affinity %#v, got %#v", cp.Spec.Affinity, podSpec.Affinity) + } +} + +func TestReconcile_IngressExposure(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + + t.Run("IngressCreated", func(t *testing.T) { + className := "nginx" + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ingress-created", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-ingress:latest", + Expose: &coderv1alpha1.ExposeSpec{ + Ingress: &coderv1alpha1.IngressExposeSpec{ + Host: "coder.example.test", + ClassName: &className, + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/proxy-read-timeout": "300", + }, + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + ingress := &networkingv1.Ingress{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, ingress); err != nil { + t.Fatalf("get ingress: %v", err) + } + if ingress.Spec.IngressClassName == nil || *ingress.Spec.IngressClassName != className { + t.Fatalf("expected ingress className %q, got %#v", className, ingress.Spec.IngressClassName) + } + if ingress.Annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"] != "300" { + t.Fatalf("expected ingress annotation to be preserved, got %q", ingress.Annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"]) + } + if len(ingress.Spec.Rules) != 1 { + t.Fatalf("expected one ingress rule, got %d", len(ingress.Spec.Rules)) + } + rule := ingress.Spec.Rules[0] + if rule.Host != "coder.example.test" { + t.Fatalf("expected ingress host %q, got %q", "coder.example.test", rule.Host) + } + if rule.HTTP == nil || len(rule.HTTP.Paths) != 1 { + t.Fatalf("expected one ingress HTTP path, got %#v", rule.HTTP) + } + path := rule.HTTP.Paths[0] + if path.Path != "/" { + t.Fatalf("expected ingress path %q, got %q", "/", path.Path) + } + if path.Backend.Service == nil { + t.Fatal("expected ingress backend service to be configured") + } + if path.Backend.Service.Name != cp.Name { + t.Fatalf("expected ingress backend service name %q, got %q", cp.Name, path.Backend.Service.Name) + } + if path.Backend.Service.Port.Number != 80 { + t.Fatalf("expected ingress backend service port 80, got %d", path.Backend.Service.Port.Number) + } + }) + + t.Run("IngressTLSServicePort443UsesHTTPBackend", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ingress-tls-service-port-443-backend", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-ingress:latest", + Service: coderv1alpha1.ServiceSpec{Port: 443}, + TLS: coderv1alpha1.TLSSpec{SecretNames: []string{"ingress-backend-tls"}}, + Expose: &coderv1alpha1.ExposeSpec{ + Ingress: &coderv1alpha1.IngressExposeSpec{ + Host: "tls-443.ingress.example.test", + WildcardHost: "*.tls-443.ingress.example.test", + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + ingress := &networkingv1.Ingress{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, ingress); err != nil { + t.Fatalf("get ingress: %v", err) + } + for _, rule := range ingress.Spec.Rules { + if rule.HTTP == nil || len(rule.HTTP.Paths) != 1 { + t.Fatalf("expected one ingress HTTP path for host %q, got %#v", rule.Host, rule.HTTP) + } + path := rule.HTTP.Paths[0] + if path.Backend.Service == nil { + t.Fatalf("expected ingress backend service for host %q", rule.Host) + } + if got := path.Backend.Service.Port.Number; got != 80 { + t.Fatalf("expected ingress backend service port 80 for host %q, got %d", rule.Host, got) + } + } + }) + + t.Run("IngressTLSAndWildcardHost", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ingress-tls-wildcard", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-ingress:latest", + Expose: &coderv1alpha1.ExposeSpec{ + Ingress: &coderv1alpha1.IngressExposeSpec{ + Host: "coder.example.test", + WildcardHost: "*.apps.example.test", + TLS: &coderv1alpha1.IngressTLSExposeSpec{ + SecretName: "coder-tls", + WildcardSecretName: "coder-wildcard-tls", + }, + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + ingress := &networkingv1.Ingress{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace}, ingress); err != nil { + t.Fatalf("get ingress: %v", err) + } + if len(ingress.Spec.Rules) != 2 { + t.Fatalf("expected two ingress rules for primary and wildcard hosts, got %d", len(ingress.Spec.Rules)) + } + if !ingressHasHost(ingress.Spec.Rules, "coder.example.test") { + t.Fatal("expected ingress rules to include primary host") + } + if !ingressHasHost(ingress.Spec.Rules, "*.apps.example.test") { + t.Fatal("expected ingress rules to include wildcard host") + } + if len(ingress.Spec.TLS) != 2 { + t.Fatalf("expected two ingress TLS entries, got %d", len(ingress.Spec.TLS)) + } + if !ingressTLSContainsSecretAndHost(ingress.Spec.TLS, "coder-tls", "coder.example.test") { + t.Fatal("expected ingress TLS to include primary host secret") + } + if !ingressTLSContainsSecretAndHost(ingress.Spec.TLS, "coder-wildcard-tls", "*.apps.example.test") { + t.Fatal("expected ingress TLS to include wildcard host secret") + } + }) + + t.Run("IngressCleanupOnRemoval", func(t *testing.T) { + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ingress-cleanup", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-ingress:latest", + Expose: &coderv1alpha1.ExposeSpec{ + Ingress: &coderv1alpha1.IngressExposeSpec{ + Host: "cleanup.example.test", + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("first reconcile control plane: %v", err) + } + ingress := &networkingv1.Ingress{} + if err := k8sClient.Get(ctx, namespacedName, ingress); err != nil { + t.Fatalf("expected ingress to exist before cleanup, got: %v", err) + } + + latest := &coderv1alpha1.CoderControlPlane{} + if err := k8sClient.Get(ctx, namespacedName, latest); err != nil { + t.Fatalf("get latest control plane: %v", err) + } + latest.Spec.Expose = nil + if err := k8sClient.Update(ctx, latest); err != nil { + t.Fatalf("update control plane to remove exposure: %v", err) + } + + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("second reconcile control plane: %v", err) + } + err := k8sClient.Get(ctx, namespacedName, ingress) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected ingress to be deleted after exposure removal, got: %v", err) + } + }) +} + +func TestReconcile_HTTPRouteExposure(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + ensureHTTPRouteCRDInstalled(t) + + gatewayNamespace := "default" + sectionName := "https" + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-httproute-created", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-httproute:latest", + Expose: &coderv1alpha1.ExposeSpec{ + Gateway: &coderv1alpha1.GatewayExposeSpec{ + Host: "coder.gateway.example.test", + WildcardHost: "*.apps.gateway.example.test", + ParentRefs: []coderv1alpha1.GatewayParentRef{ + { + Name: "coder-gateway", + Namespace: &gatewayNamespace, + SectionName: §ionName, + }, + }, + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}) + if err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + if result.RequeueAfter <= 0 { + t.Fatalf("expected gateway exposure to request periodic drift requeue, got %+v", result) + } + + httpRoute := &gatewayv1.HTTPRoute{} + if err := k8sClient.Get(ctx, namespacedName, httpRoute); err != nil { + t.Fatalf("get httproute: %v", err) + } + if len(httpRoute.Spec.Hostnames) != 2 { + t.Fatalf("expected two hostnames on httproute, got %d", len(httpRoute.Spec.Hostnames)) + } + if !httpRouteHasHostname(httpRoute.Spec.Hostnames, "coder.gateway.example.test") { + t.Fatal("expected httproute to include primary host") + } + if !httpRouteHasHostname(httpRoute.Spec.Hostnames, "*.apps.gateway.example.test") { + t.Fatal("expected httproute to include wildcard host") + } + if len(httpRoute.Spec.ParentRefs) != 1 { + t.Fatalf("expected one parentRef, got %d", len(httpRoute.Spec.ParentRefs)) + } + parentRef := httpRoute.Spec.ParentRefs[0] + if string(parentRef.Name) != "coder-gateway" { + t.Fatalf("expected parentRef name %q, got %q", "coder-gateway", parentRef.Name) + } + if parentRef.Namespace == nil || string(*parentRef.Namespace) != gatewayNamespace { + t.Fatalf("expected parentRef namespace %q, got %#v", gatewayNamespace, parentRef.Namespace) + } + if parentRef.SectionName == nil || string(*parentRef.SectionName) != sectionName { + t.Fatalf("expected parentRef sectionName %q, got %#v", sectionName, parentRef.SectionName) + } + if len(httpRoute.Spec.Rules) != 1 { + t.Fatalf("expected one httproute rule, got %d", len(httpRoute.Spec.Rules)) + } + rule := httpRoute.Spec.Rules[0] + if len(rule.Matches) != 1 || rule.Matches[0].Path == nil || rule.Matches[0].Path.Value == nil || *rule.Matches[0].Path.Value != "/" { + t.Fatalf("expected httproute prefix path match on '/', got %#v", rule.Matches) + } + if len(rule.BackendRefs) != 1 { + t.Fatalf("expected one backendRef, got %d", len(rule.BackendRefs)) + } + backendRef := rule.BackendRefs[0].BackendObjectReference + if string(backendRef.Name) != cp.Name { + t.Fatalf("expected backend service name %q, got %q", cp.Name, backendRef.Name) + } + if backendRef.Kind == nil || string(*backendRef.Kind) != "Service" { + t.Fatalf("expected backend kind Service, got %#v", backendRef.Kind) + } + if backendRef.Group == nil || string(*backendRef.Group) != "" { + t.Fatalf("expected backend group to be empty for core Service, got %#v", backendRef.Group) + } + if backendRef.Port == nil || int32(*backendRef.Port) != 80 { + t.Fatalf("expected backend port 80, got %#v", backendRef.Port) + } +} + +func TestReconcile_HTTPRouteExposure_TLSServicePort443UsesHTTPBackend(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + ensureHTTPRouteCRDInstalled(t) + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-httproute-tls-443-backend-http", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-httproute:latest", + Service: coderv1alpha1.ServiceSpec{ + Port: 443, + }, + TLS: coderv1alpha1.TLSSpec{ + SecretNames: []string{"test-httproute-tls-secret"}, + }, + Expose: &coderv1alpha1.ExposeSpec{ + Gateway: &coderv1alpha1.GatewayExposeSpec{ + Host: "tls-443.gateway.example.test", + ParentRefs: []coderv1alpha1.GatewayParentRef{{ + Name: "coder-gateway", + }}, + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + r := &controller.CoderControlPlaneReconciler{Client: k8sClient, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + if _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}); err != nil { + t.Fatalf("reconcile control plane: %v", err) + } + + service := &corev1.Service{} + if err := k8sClient.Get(ctx, namespacedName, service); err != nil { + t.Fatalf("get service: %v", err) + } + if !serviceHasPort(service.Spec.Ports, "https", 443) { + t.Fatalf("expected TLS-enabled service to expose https port 443, got %#v", service.Spec.Ports) + } + if !serviceHasPort(service.Spec.Ports, "http", 80) { + t.Fatalf("expected TLS-enabled service on port 443 to also expose http port 80 for gateway backends, got %#v", service.Spec.Ports) + } + + httpRoute := &gatewayv1.HTTPRoute{} + if err := k8sClient.Get(ctx, namespacedName, httpRoute); err != nil { + t.Fatalf("get httproute: %v", err) + } + if len(httpRoute.Spec.Rules) != 1 || len(httpRoute.Spec.Rules[0].BackendRefs) != 1 { + t.Fatalf("expected one backendRef, got %#v", httpRoute.Spec.Rules) + } + backendRef := httpRoute.Spec.Rules[0].BackendRefs[0].BackendObjectReference + if backendRef.Port == nil || int32(*backendRef.Port) != 80 { + t.Fatalf("expected HTTPRoute backend port 80 when service.port=443 with TLS enabled, got %#v", backendRef.Port) + } +} + +func TestReconcile_HTTPRouteExposure_RequiresParentRefs(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + ensureHTTPRouteCRDInstalled(t) + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-httproute-parentrefs-required", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-httproute:latest", + Expose: &coderv1alpha1.ExposeSpec{ + Gateway: &coderv1alpha1.GatewayExposeSpec{ + Host: "missing-parentrefs.gateway.example.test", + }, + }, + }, + } + err := k8sClient.Create(ctx, cp) + if err == nil { + t.Fatal("expected create to fail when gateway parentRefs are missing") + } + if !apierrors.IsInvalid(err) { + t.Fatalf("expected invalid error for missing gateway parentRefs, got: %v", err) + } + if !strings.Contains(err.Error(), "parentRefs") { + t.Fatalf("expected missing parentRefs validation error, got: %v", err) + } +} + +func TestReconcile_HTTPRouteExposure_CRDMissingIsGraceful(t *testing.T) { + ensureGatewaySchemeRegistered(t) + ctx := context.Background() + if err := gatewayv1.Install(scheme); err != nil { + t.Fatalf("register gateway API types in scheme: %v", err) + } + + cp := &coderv1alpha1.CoderControlPlane{ + ObjectMeta: metav1.ObjectMeta{Name: "test-httproute-crd-missing", Namespace: "default"}, + Spec: coderv1alpha1.CoderControlPlaneSpec{ + Image: "test-httproute:latest", + Expose: &coderv1alpha1.ExposeSpec{ + Gateway: &coderv1alpha1.GatewayExposeSpec{ + Host: "missing-crd.gateway.example.test", + ParentRefs: []coderv1alpha1.GatewayParentRef{{ + Name: "missing-crd-gateway", + }}, + }, + }, + }, + } + if err := k8sClient.Create(ctx, cp); err != nil { + t.Fatalf("create control plane: %v", err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(ctx, cp) + }) + + clientWithNoMatch := &httpRouteNoMatchClient{Client: k8sClient} + r := &controller.CoderControlPlaneReconciler{Client: clientWithNoMatch, Scheme: scheme} + namespacedName := types.NamespacedName{Name: cp.Name, Namespace: cp.Namespace} + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}) + if err != nil { + t.Fatalf("expected reconcile to gracefully ignore missing Gateway CRDs, got error: %v", err) + } + if result.RequeueAfter <= 0 { + t.Fatalf("expected missing Gateway CRDs to request periodic requeue, got %+v", result) + } + if clientWithNoMatch.HTTPRouteGetCalls() == 0 { + t.Fatal("expected gateway exposure reconciliation to attempt HTTPRoute get") + } + + deployment := &appsv1.Deployment{} + if err := k8sClient.Get(ctx, namespacedName, deployment); err != nil { + t.Fatalf("expected deployment reconciliation to continue when gateway CRDs are missing: %v", err) + } +} + +func createCoderControlPlaneUnstructured(ctx context.Context, t *testing.T, name, namespace string, spec map[string]any) *coderv1alpha1.CoderControlPlane { + t.Helper() + + if strings.TrimSpace(name) == "" { + t.Fatal("assertion failed: control plane name must not be empty") + } + if strings.TrimSpace(namespace) == "" { + t.Fatal("assertion failed: control plane namespace must not be empty") + } + + controlPlane := &unstructured.Unstructured{} + controlPlane.SetAPIVersion(coderv1alpha1.GroupVersion.String()) + controlPlane.SetKind("CoderControlPlane") + controlPlane.SetName(name) + controlPlane.SetNamespace(namespace) + controlPlane.Object["spec"] = spec + + if err := k8sClient.Create(ctx, controlPlane); err != nil { + t.Fatalf("create unstructured control plane %s/%s: %v", namespace, name, err) + } + t.Cleanup(func() { + _ = k8sClient.Delete(context.Background(), controlPlane) + }) + + typed := &coderv1alpha1.CoderControlPlane{} + namespacedName := types.NamespacedName{Name: name, Namespace: namespace} + if err := k8sClient.Get(ctx, namespacedName, typed); err != nil { + t.Fatalf("get typed control plane %s: %v", namespacedName, err) + } + if typed.Name != name || typed.Namespace != namespace { + t.Fatalf("assertion failed: fetched control plane %s/%s does not match expected %s/%s", typed.Namespace, typed.Name, namespace, name) + } + + return typed +} + +func roleContainsRuleForResource(rules []rbacv1.PolicyRule, apiGroup, resource string) bool { + for _, rule := range rules { + if !sliceContainsString(rule.APIGroups, apiGroup) { + continue + } + if !sliceContainsString(rule.Resources, resource) { + continue + } + return true + } + return false +} + +func mustFindEnvVar(t *testing.T, envVars []corev1.EnvVar, name string) corev1.EnvVar { + t.Helper() + + for _, envVar := range envVars { + if envVar.Name == name { + return envVar + } + } + t.Fatalf("expected environment variable %q to exist, got %v", name, envVars) + return corev1.EnvVar{} +} + +func countEnvVar(envVars []corev1.EnvVar, name string) int { + count := 0 + for _, envVar := range envVars { + if envVar.Name == name { + count++ + } + } + return count +} + +func containerHasPort(container corev1.Container, name string, port int32) bool { + for _, containerPort := range container.Ports { + if containerPort.Name == name && containerPort.ContainerPort == port { + return true + } + } + return false +} + +func podHasSecretVolume(podSpec corev1.PodSpec, volumeName, secretName string) bool { + for _, volume := range podSpec.Volumes { + if volume.Name != volumeName { + continue + } + if volume.Secret != nil && volume.Secret.SecretName == secretName { + return true + } + } + return false +} + +func secretVolumeName(podSpec corev1.PodSpec, secretName string) string { + for _, volume := range podSpec.Volumes { + if volume.Secret == nil { + continue + } + if volume.Secret.SecretName == secretName { + return volume.Name + } + } + return "" +} + +func podHasVolume(podSpec corev1.PodSpec, volumeName string) bool { + for _, volume := range podSpec.Volumes { + if volume.Name == volumeName { + return true + } + } + return false +} + +func containerHasVolumeMount(container corev1.Container, mountName, mountPath string) bool { + for _, volumeMount := range container.VolumeMounts { + if volumeMount.Name == mountName && volumeMount.MountPath == mountPath { + return true + } + } + return false +} + +func serviceHasPort(servicePorts []corev1.ServicePort, name string, port int32) bool { + for _, servicePort := range servicePorts { + if servicePort.Name == name && servicePort.Port == port { + return true + } + } + return false +} + +func ingressHasHost(rules []networkingv1.IngressRule, host string) bool { + for _, rule := range rules { + if rule.Host == host { + return true + } + } + return false +} + +func ingressTLSContainsSecretAndHost(entries []networkingv1.IngressTLS, secretName, host string) bool { + for _, entry := range entries { + if entry.SecretName != secretName { + continue + } + if sliceContainsString(entry.Hosts, host) { + return true + } + } + return false +} + +func httpRouteHasHostname(hostnames []gatewayv1.Hostname, hostname string) bool { + for _, item := range hostnames { + if string(item) == hostname { + return true + } + } + return false +} + +func sliceContainsString(values []string, candidate string) bool { + for _, value := range values { + if value == candidate { + return true + } + } + return false +} + +func expectedWorkspaceRoleName(t *testing.T, cp *coderv1alpha1.CoderControlPlane, serviceAccountName string) string { + t.Helper() + + scopeHash := expectedWorkspaceRBACScopeHash(t, cp) + return expectedScopedWorkspaceRBACName(t, serviceAccountName, scopeHash, "-workspace-perms") +} + +func expectedWorkspaceRoleBindingName(t *testing.T, cp *coderv1alpha1.CoderControlPlane, serviceAccountName string) string { + t.Helper() + + scopeHash := expectedWorkspaceRBACScopeHash(t, cp) + return expectedScopedWorkspaceRBACName(t, serviceAccountName, scopeHash, "") +} + +func expectedWorkspaceRBACScopeHash(t *testing.T, cp *coderv1alpha1.CoderControlPlane) string { + t.Helper() + if cp == nil { + t.Fatal("expected control plane must not be nil") + } + if strings.TrimSpace(cp.Namespace) == "" { + t.Fatal("expected control plane namespace must not be empty") + } + if strings.TrimSpace(cp.Name) == "" { + t.Fatal("expected control plane name must not be empty") + } + + hasher := fnv.New32a() + _, _ = hasher.Write([]byte(cp.Namespace)) + _, _ = hasher.Write([]byte{0}) + _, _ = hasher.Write([]byte(cp.Name)) + return fmt.Sprintf("%08x", hasher.Sum32()) +} + +func expectedScopedWorkspaceRBACName(t *testing.T, baseName, scopeHash, suffix string) string { + t.Helper() + + normalizedBaseName := strings.TrimSpace(baseName) + if normalizedBaseName == "" { + t.Fatal("expected workspace RBAC base name must not be empty") + } + if strings.TrimSpace(scopeHash) == "" { + t.Fatal("expected workspace RBAC scope hash must not be empty") + } + + const kubernetesObjectNameMaxLength = 253 + + candidate := fmt.Sprintf("%s-%s%s", normalizedBaseName, scopeHash, suffix) + if len(candidate) <= kubernetesObjectNameMaxLength { + return candidate + } + + available := kubernetesObjectNameMaxLength - len(scopeHash) - len(suffix) - 1 + if available < 1 { + t.Fatalf("expected workspace RBAC name prefix capacity to be positive, got %d", available) + } + + truncatedPrefix := normalizedBaseName + if len(truncatedPrefix) > available { + truncatedPrefix = truncatedPrefix[:available] + } + truncatedPrefix = strings.Trim(truncatedPrefix, "-.") + if truncatedPrefix == "" { + truncatedPrefix = "workspace" + } + + result := fmt.Sprintf("%s-%s%s", truncatedPrefix, scopeHash, suffix) + if len(result) > kubernetesObjectNameMaxLength { + t.Fatalf("expected workspace RBAC name %q to be <= %d characters", result, kubernetesObjectNameMaxLength) + } + + return result +} + +func resourceMustParse(t *testing.T, quantity string) resource.Quantity { + t.Helper() + + parsedQuantity, err := resource.ParseQuantity(quantity) + if err != nil { + t.Fatalf("parse resource quantity %q: %v", quantity, err) + } + return parsedQuantity +} + +var ( + ensureGatewaySchemeOnce sync.Once + ensureGatewaySchemeErr error +) + +func ensureGatewaySchemeRegistered(t *testing.T) { + t.Helper() + + ensureGatewaySchemeOnce.Do(func() { + if scheme == nil { + ensureGatewaySchemeErr = errors.New("assertion failed: test scheme must not be nil") + return + } + ensureGatewaySchemeErr = gatewayv1.Install(scheme) + }) + if ensureGatewaySchemeErr != nil { + t.Fatalf("register gateway API types in test scheme: %v", ensureGatewaySchemeErr) + } +} + +var ensureHTTPRouteCRDOnce sync.Once + +func ensureHTTPRouteCRDInstalled(t *testing.T) { + t.Helper() + + var installErr error + ensureHTTPRouteCRDOnce.Do(func() { + if err := gatewayv1.Install(scheme); err != nil { + installErr = err + return + } + + apiextensionsClient, err := apiextensionsclientset.NewForConfig(cfg) + if err != nil { + installErr = err + return + } + + const httpRouteCRDName = "httproutes.gateway.networking.k8s.io" + _, err = apiextensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), httpRouteCRDName, metav1.GetOptions{}) + if err == nil { + return + } + if !apierrors.IsNotFound(err) { + installErr = err + return + } + + httpRouteCRD := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: httpRouteCRDName, + Annotations: map[string]string{ + "api-approved.kubernetes.io": "https://github.com/kubernetes-sigs/gateway-api", + }, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: gatewayv1.GroupVersion.Group, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "httproutes", + Singular: "httproute", + Kind: "HTTPRoute", + ListKind: "HTTPRouteList", + }, + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{{ + Name: gatewayv1.GroupVersion.Version, + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "spec": { + Type: "object", + XPreserveUnknownFields: ptrTo(true), + }, + "status": { + Type: "object", + XPreserveUnknownFields: ptrTo(true), + }, + }, + }, + }, + }}, + }, + } + if _, err := apiextensionsClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.Background(), httpRouteCRD, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { + installErr = err + return + } + + deadline := time.Now().Add(10 * time.Second) + for time.Now().Before(deadline) { + storedCRD, getErr := apiextensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), httpRouteCRDName, metav1.GetOptions{}) + if getErr != nil { + time.Sleep(100 * time.Millisecond) + continue + } + if customResourceDefinitionEstablished(storedCRD) { + return + } + time.Sleep(100 * time.Millisecond) + } + + installErr = errors.New("timed out waiting for HTTPRoute CRD establishment") + }) + if installErr != nil { + t.Fatalf("install HTTPRoute CRD for test: %v", installErr) + } +} + +func customResourceDefinitionEstablished(customResourceDefinition *apiextensionsv1.CustomResourceDefinition) bool { + if customResourceDefinition == nil { + return false + } + for _, condition := range customResourceDefinition.Status.Conditions { + if condition.Type == apiextensionsv1.Established && condition.Status == apiextensionsv1.ConditionTrue { + return true + } + } + return false +} + +type httpRouteNoMatchClient struct { + ctrlclient.Client + mu sync.Mutex + httpRouteGetCalls int +} + +func (c *httpRouteNoMatchClient) Get(ctx context.Context, key types.NamespacedName, object ctrlclient.Object, opts ...ctrlclient.GetOption) error { + if _, ok := object.(*gatewayv1.HTTPRoute); ok { + c.mu.Lock() + c.httpRouteGetCalls++ + c.mu.Unlock() + return &apimeta.NoResourceMatchError{PartialResource: schema.GroupVersionResource{ + Group: gatewayv1.GroupVersion.Group, + Version: gatewayv1.GroupVersion.Version, + Resource: "httproutes", + }} + } + return c.Client.Get(ctx, key, object, opts...) +} + +func (c *httpRouteNoMatchClient) HTTPRouteGetCalls() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.httpRouteGetCalls +} + func ptrTo[T any](value T) *T { return &value } @@ -1969,6 +4797,7 @@ func assertSingleControllerOwnerReference(t *testing.T, ownerReferences []metav1 } func TestReconcile_NilClient(t *testing.T) { + ensureGatewaySchemeRegistered(t) r := &controller.CoderControlPlaneReconciler{ Client: nil, Scheme: scheme, @@ -1991,6 +4820,7 @@ func TestReconcile_NilClient(t *testing.T) { } func TestReconcile_NilScheme(t *testing.T) { + ensureGatewaySchemeRegistered(t) r := &controller.CoderControlPlaneReconciler{ Client: k8sClient, Scheme: nil, diff --git a/vendor/github.com/emicklei/go-restful/v3/.travis.yml b/vendor/github.com/emicklei/go-restful/v3/.travis.yml deleted file mode 100644 index 3a0bf5ff..00000000 --- a/vendor/github.com/emicklei/go-restful/v3/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.x - -before_install: - - go test -v - -script: - - go test -race -coverprofile=coverage.txt -covermode=atomic - -after_success: - - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 6f24dfff..4fcd920a 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,9 @@ # Change history of go-restful +## [v3.13.0] - 2025-08-14 + +- optimize performance of path matching in CurlyRouter ( thanks @wenhuang, Wen Huang) + ## [v3.12.2] - 2025-02-21 - allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 3fb40d19..50a79ab6 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -84,6 +84,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Configurable (trace) logging - Customizable gzip/deflate readers and writers using CompressorProvider registration - Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function +- Added `SetPathTokenCacheEnabled` and `SetCustomVerbCacheEnabled` to disable regexp caching (default=true) ## How to customize There are several hooks to customize the behavior of the go-restful package. diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go index 6fd2bcd5..eec43bfd 100644 --- a/vendor/github.com/emicklei/go-restful/v3/curly.go +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -9,11 +9,35 @@ import ( "regexp" "sort" "strings" + "sync" ) // CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. type CurlyRouter struct{} +var ( + regexCache sync.Map // Cache for compiled regex patterns + pathTokenCacheEnabled = true // Enable/disable path token regex caching +) + +// SetPathTokenCacheEnabled enables or disables path token regex caching for CurlyRouter. +// When disabled, regex patterns will be compiled on every request. +// When enabled (default), compiled regex patterns are cached for better performance. +func SetPathTokenCacheEnabled(enabled bool) { + pathTokenCacheEnabled = enabled +} + +// getCachedRegexp retrieves a compiled regex from the cache if found and valid. +// Returns the regex and true if found and valid, nil and false otherwise. +func getCachedRegexp(cache *sync.Map, pattern string) (*regexp.Regexp, bool) { + if cached, found := cache.Load(pattern); found { + if regex, ok := cached.(*regexp.Regexp); ok { + return regex, true + } + } + return nil, false +} + // SelectRoute is part of the Router interface and returns the best match // for the WebService and its Route for the given Request. func (c CurlyRouter) SelectRoute( @@ -113,8 +137,28 @@ func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, reque } return true, true } - matched, err := regexp.MatchString(regPart, requestToken) - return (matched && err == nil), false + + // Check cache first (if enabled) + if pathTokenCacheEnabled { + if regex, found := getCachedRegexp(®exCache, regPart); found { + matched := regex.MatchString(requestToken) + return matched, false + } + } + + // Compile the regex + regex, err := regexp.Compile(regPart) + if err != nil { + return false, false + } + + // Cache the regex (if enabled) + if pathTokenCacheEnabled { + regexCache.Store(regPart, regex) + } + + matched := regex.MatchString(requestToken) + return matched, false } var jsr311Router = RouterJSR311{} @@ -168,7 +212,7 @@ func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens if matchesToken { score++ // extra score for regex match } - } + } } else { // not a parameter if eachRequestToken != eachRouteToken { diff --git a/vendor/github.com/emicklei/go-restful/v3/custom_verb.go b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go index bfc17efd..0b98eeb0 100644 --- a/vendor/github.com/emicklei/go-restful/v3/custom_verb.go +++ b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go @@ -1,14 +1,28 @@ package restful +// Copyright 2025 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + import ( "fmt" "regexp" + "sync" ) var ( - customVerbReg = regexp.MustCompile(":([A-Za-z]+)$") + customVerbReg = regexp.MustCompile(":([A-Za-z]+)$") + customVerbCache sync.Map // Cache for compiled custom verb regexes + customVerbCacheEnabled = true // Enable/disable custom verb regex caching ) +// SetCustomVerbCacheEnabled enables or disables custom verb regex caching. +// When disabled, custom verb regex patterns will be compiled on every request. +// When enabled (default), compiled custom verb regex patterns are cached for better performance. +func SetCustomVerbCacheEnabled(enabled bool) { + customVerbCacheEnabled = enabled +} + func hasCustomVerb(routeToken string) bool { return customVerbReg.MatchString(routeToken) } @@ -20,7 +34,23 @@ func isMatchCustomVerb(routeToken string, pathToken string) bool { } customVerb := rs[1] - specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb)) + regexPattern := fmt.Sprintf(":%s$", customVerb) + + // Check cache first (if enabled) + if customVerbCacheEnabled { + if specificVerbReg, found := getCachedRegexp(&customVerbCache, regexPattern); found { + return specificVerbReg.MatchString(pathToken) + } + } + + // Compile the regex + specificVerbReg := regexp.MustCompile(regexPattern) + + // Cache the regex (if enabled) + if customVerbCacheEnabled { + customVerbCache.Store(regexPattern, specificVerbReg) + } + return specificVerbReg.MatchString(pathToken) } diff --git a/vendor/github.com/emicklei/go-restful/v3/doc.go b/vendor/github.com/emicklei/go-restful/v3/doc.go index 69b13057..80809225 100644 --- a/vendor/github.com/emicklei/go-restful/v3/doc.go +++ b/vendor/github.com/emicklei/go-restful/v3/doc.go @@ -1,7 +1,7 @@ /* Package restful , a lean package for creating REST-style WebServices without magic. -WebServices and Routes +### WebServices and Routes A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. @@ -30,14 +30,14 @@ The (*Request, *Response) arguments provide functions for reading information fr See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation. -Regular expression matching Routes +### Regular expression matching Routes A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) This feature requires the use of a CurlyRouter. -Containers +### Containers A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. @@ -47,7 +47,7 @@ You can create your own Container and create a new http.Server for that particul container := restful.NewContainer() server := &http.Server{Addr: ":8081", Handler: container} -Filters +### Filters A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. @@ -60,22 +60,21 @@ Use the following statement to pass the request,response pair to the next filter chain.ProcessFilter(req, resp) -Container Filters +### Container Filters These are processed before any registered WebService. // install a (global) filter for the default container (processed before any webservice) restful.Filter(globalLogging) -WebService Filters +### WebService Filters These are processed before any Route of a WebService. // install a webservice filter (processed before any route) ws.Filter(webserviceLogging).Filter(measureTime) - -Route Filters +### Route Filters These are processed before calling the function associated with the Route. @@ -84,7 +83,7 @@ These are processed before calling the function associated with the Route. See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations. -Response Encoding +### Response Encoding Two encodings are supported: gzip and deflate. To enable this for all responses: @@ -95,20 +94,20 @@ Alternatively, you can create a Filter that performs the encoding and install it See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go -OPTIONS support +### OPTIONS support By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. Filter(OPTIONSFilter()) -CORS +### CORS By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} Filter(cors.Filter) -Error Handling +### Error Handling Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. @@ -137,11 +136,11 @@ The request does not have or has an unknown Accept Header set for this operation The request does not have or has an unknown Content-Type Header set for this operation. -ServiceError +### ServiceError In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. -Performance options +### Performance options This package has several options that affect the performance of your service. It is important to understand them and how you can change it. @@ -156,30 +155,27 @@ Default value is true If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. -Trouble shooting +### Trouble shooting This package has the means to produce detail logging of the complete Http request matching process and filter invocation. Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) -Logging +### Logging The restful.SetLogger() method allows you to override the logger used by the package. By default restful uses the standard library `log` package and logs to stdout. Different logging packages are supported as long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your preferred package is simple. -Resources +### Resources -[project]: https://github.com/emicklei/go-restful +(c) 2012-2025, http://ernestmicklei.com. MIT License +[project]: https://github.com/emicklei/go-restful [examples]: https://github.com/emicklei/go-restful/blob/master/examples - -[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ - +[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ [showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape - -(c) 2012-2015, http://ernestmicklei.com. MIT License */ package restful diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml index 22f8d21c..50063062 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -1,61 +1,62 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - +version: "2" linters: - enable-all: true + default: all disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert - funlen - - godox + - gochecknoglobals + - gochecknoinits - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - godot - - gofumpt + - godox + - gosmopolitan + - inamedparam + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns - paralleltest - - tparallel + - testpackage - thelper - - ifshort - - exhaustruct + - tparallel + - unparam - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase + - whitespace + - wrapcheck + - wsl + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/go-openapi/jsonpointer/errors.go b/vendor/github.com/go-openapi/jsonpointer/errors.go new file mode 100644 index 00000000..b84343d9 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/errors.go @@ -0,0 +1,18 @@ +package jsonpointer + +type pointerError string + +func (e pointerError) Error() string { + return string(e) +} + +const ( + // ErrPointer is an error raised by the jsonpointer package + ErrPointer pointerError = "JSON pointer error" + + // ErrInvalidStart states that a JSON pointer must start with a separator ("/") + ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator + + // ErrUnsupportedValueType indicates that a value of the wrong type is being set + ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values" +) diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index d970c7cf..61362105 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -39,9 +39,6 @@ import ( const ( emptyPointer = `` pointerSeparator = `/` - - invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator - notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -80,7 +77,7 @@ func (p *Pointer) parse(jsonPointerString string) error { if jsonPointerString != emptyPointer { if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.New(invalidStart) + err = errors.Join(ErrInvalidStart, ErrPointer) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) @@ -128,7 +125,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() if isNil(node) { - return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + return nil, kind, fmt.Errorf("nil value has no field %q: %w", decodedToken, ErrPointer) } switch typed := node.(type) { @@ -146,7 +143,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { - return nil, kind, fmt.Errorf("object has no field %q", decodedToken) + return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) } fld := rValue.FieldByName(nm) return fld.Interface(), kind, nil @@ -158,7 +155,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide if mv.IsValid() { return mv.Interface(), kind, nil } - return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) case reflect.Slice: tokenIndex, err := strconv.Atoi(decodedToken) @@ -167,14 +164,14 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide } sLength := rValue.Len() if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer) } elem := rValue.Index(tokenIndex) return elem.Interface(), kind, nil default: - return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) + return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) } } @@ -182,6 +179,11 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) + // Check for nil to prevent panic when calling rValue.Type() + if isNil(node) { + return fmt.Errorf("cannot set field %q on nil value: %w", decodedToken, ErrPointer) + } + if ns, ok := node.(JSONSetable); ok { // pointer impl return ns.JSONSet(decodedToken, data) } @@ -194,7 +196,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { - return fmt.Errorf("object has no field %q", decodedToken) + return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) } fld := rValue.FieldByName(nm) if fld.IsValid() { @@ -214,18 +216,18 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP } sLength := rValue.Len() if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer) } elem := rValue.Index(tokenIndex) if !elem.CanSet() { - return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) + return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer) } elem.Set(reflect.ValueOf(data)) return nil default: - return fmt.Errorf("invalid token reference %q", decodedToken) + return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) } } @@ -244,7 +246,6 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K } for _, token := range p.referenceTokens { - decodedToken := Unescape(token) r, knd, err := getSingleImpl(node, decodedToken, nameProvider) @@ -264,7 +265,10 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return errors.New("only structs, pointers, maps and slices are supported for setting values") + return errors.Join( + ErrUnsupportedValueType, + ErrPointer, + ) } if nameProvider == nil { @@ -286,6 +290,11 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { return setSingleImpl(node, data, decodedToken, nameProvider) } + // Check for nil during traversal + if isNil(node) { + return fmt.Errorf("cannot traverse through nil value at %q: %w", decodedToken, ErrPointer) + } + rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() @@ -307,7 +316,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { - return fmt.Errorf("object has no field %q", decodedToken) + return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) } fld := rValue.FieldByName(nm) if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { @@ -321,7 +330,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { mv := rValue.MapIndex(kv) if !mv.IsValid() { - return fmt.Errorf("object has no key %q", decodedToken) + return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) } if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { node = mv.Addr().Interface() @@ -336,7 +345,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { } sLength := rValue.Len() if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer) } elem := rValue.Index(tokenIndex) @@ -347,7 +356,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { node = elem.Interface() default: - return fmt.Errorf("invalid token reference %q", decodedToken) + return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) } } @@ -404,10 +413,10 @@ func (p *Pointer) Offset(document string) (int64, error) { return 0, err } default: - return 0, fmt.Errorf("invalid token %#v", tk) + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) } default: - return 0, fmt.Errorf("invalid token %#v", tk) + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) } } return offset, nil @@ -437,16 +446,16 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { return offset, nil } default: - return 0, fmt.Errorf("invalid token %#v", tk) + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) } } - return 0, fmt.Errorf("token reference %q not found", decodedToken) + return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer) } func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { idx, err := strconv.Atoi(decodedToken) if err != nil { - return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer) } var i int for i = 0; i < idx && dec.More(); i++ { @@ -470,7 +479,7 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { } if !dec.More() { - return 0, fmt.Errorf("token reference %q not found", decodedToken) + return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer) } return dec.InputOffset(), nil } diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0ed55c2b..4de21512 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,8 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v2.0.2 +GOLANGCI_LINT_VERSION ?= v2.1.5 +GOLANGCI_FMT_OPTS ?= # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -156,9 +157,13 @@ $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format -common-format: +common-format: $(GOLANGCI_LINT) @echo ">> formatting code" $(GO) fmt $(pkgs) +ifdef GOLANGCI_LINT + @echo ">> formatting code with golangci-lint" + $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS) +endif .PHONY: common-vet common-vet: @@ -248,8 +253,8 @@ $(PROMU): cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) -.PHONY: proto -proto: +.PHONY: common-proto +common-proto: @echo ">> generating code from proto files" @./scripts/genproto.sh diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 67a9d2b4..1fd4381b 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -123,13 +123,16 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { finish := float64(0) pct := float64(0) recovering := strings.Contains(lines[syncLineIdx], "recovery") + reshaping := strings.Contains(lines[syncLineIdx], "reshape") resyncing := strings.Contains(lines[syncLineIdx], "resync") checking := strings.Contains(lines[syncLineIdx], "check") // Append recovery and resyncing state info. - if recovering || resyncing || checking { + if recovering || resyncing || checking || reshaping { if recovering { state = "recovering" + } else if reshaping { + state = "reshaping" } else if checking { state = "checking" } else { diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index 4b2c4050..937e1f96 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -66,6 +66,10 @@ type Meminfo struct { // Memory which has been evicted from RAM, and is temporarily // on the disk SwapFree *uint64 + // Memory consumed by the zswap backend (compressed size) + Zswap *uint64 + // Amount of anonymous memory stored in zswap (original size) + Zswapped *uint64 // Memory which is waiting to get written back to the disk Dirty *uint64 // Memory which is actively being written back to the disk @@ -85,6 +89,8 @@ type Meminfo struct { // amount of memory dedicated to the lowest level of page // tables. PageTables *uint64 + // secondary page tables. + SecPageTables *uint64 // NFS pages sent to the server, but not yet committed to // stable storage NFSUnstable *uint64 @@ -129,15 +135,18 @@ type Meminfo struct { Percpu *uint64 HardwareCorrupted *uint64 AnonHugePages *uint64 + FileHugePages *uint64 ShmemHugePages *uint64 ShmemPmdMapped *uint64 CmaTotal *uint64 CmaFree *uint64 + Unaccepted *uint64 HugePagesTotal *uint64 HugePagesFree *uint64 HugePagesRsvd *uint64 HugePagesSurp *uint64 Hugepagesize *uint64 + Hugetlb *uint64 DirectMap4k *uint64 DirectMap2M *uint64 DirectMap1G *uint64 @@ -161,6 +170,8 @@ type Meminfo struct { MlockedBytes *uint64 SwapTotalBytes *uint64 SwapFreeBytes *uint64 + ZswapBytes *uint64 + ZswappedBytes *uint64 DirtyBytes *uint64 WritebackBytes *uint64 AnonPagesBytes *uint64 @@ -171,6 +182,7 @@ type Meminfo struct { SUnreclaimBytes *uint64 KernelStackBytes *uint64 PageTablesBytes *uint64 + SecPageTablesBytes *uint64 NFSUnstableBytes *uint64 BounceBytes *uint64 WritebackTmpBytes *uint64 @@ -182,11 +194,14 @@ type Meminfo struct { PercpuBytes *uint64 HardwareCorruptedBytes *uint64 AnonHugePagesBytes *uint64 + FileHugePagesBytes *uint64 ShmemHugePagesBytes *uint64 ShmemPmdMappedBytes *uint64 CmaTotalBytes *uint64 CmaFreeBytes *uint64 + UnacceptedBytes *uint64 HugepagesizeBytes *uint64 + HugetlbBytes *uint64 DirectMap4kBytes *uint64 DirectMap2MBytes *uint64 DirectMap1GBytes *uint64 @@ -287,6 +302,12 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "SwapFree:": m.SwapFree = &val m.SwapFreeBytes = &valBytes + case "Zswap:": + m.Zswap = &val + m.ZswapBytes = &valBytes + case "Zswapped:": + m.Zswapped = &val + m.ZswapBytes = &valBytes case "Dirty:": m.Dirty = &val m.DirtyBytes = &valBytes @@ -317,6 +338,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "PageTables:": m.PageTables = &val m.PageTablesBytes = &valBytes + case "SecPageTables:": + m.SecPageTables = &val + m.SecPageTablesBytes = &valBytes case "NFS_Unstable:": m.NFSUnstable = &val m.NFSUnstableBytes = &valBytes @@ -350,6 +374,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "AnonHugePages:": m.AnonHugePages = &val m.AnonHugePagesBytes = &valBytes + case "FileHugePages:": + m.FileHugePages = &val + m.FileHugePagesBytes = &valBytes case "ShmemHugePages:": m.ShmemHugePages = &val m.ShmemHugePagesBytes = &valBytes @@ -362,6 +389,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "CmaFree:": m.CmaFree = &val m.CmaFreeBytes = &valBytes + case "Unaccepted:": + m.Unaccepted = &val + m.UnacceptedBytes = &valBytes case "HugePages_Total:": m.HugePagesTotal = &val case "HugePages_Free:": @@ -373,6 +403,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "Hugepagesize:": m.Hugepagesize = &val m.HugepagesizeBytes = &valBytes + case "Hugetlb:": + m.Hugetlb = &val + m.HugetlbBytes = &valBytes case "DirectMap4k:": m.DirectMap4k = &val m.DirectMap4kBytes = &valBytes diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06a8d931..3328556b 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -101,6 +101,12 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // The address above which program text can run. + StartCode uint64 + // The address below which program text can run. + EndCode uint64 + // The address of the start (i.e., bottom) of the stack. + StartStack uint64 // CPU number last executed on. Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes @@ -177,9 +183,9 @@ func (p Proc) Stat() (ProcStat, error) { &s.VSize, &s.RSS, &s.RSSLimit, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, + &s.StartCode, + &s.EndCode, + &s.StartStack, &ignoreUint64, &ignoreUint64, &ignoreUint64, diff --git a/vendor/github.com/prometheus/procfs/proc_statm.go b/vendor/github.com/prometheus/procfs/proc_statm.go new file mode 100644 index 00000000..ed579842 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_statm.go @@ -0,0 +1,116 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// - https://man7.org/linux/man-pages/man5/proc_pid_statm.5.html + +// ProcStatm Provides memory usage information for a process, measured in memory pages. +// Read from /proc/[pid]/statm. +type ProcStatm struct { + // The process ID. + PID int + // total program size (same as VmSize in status) + Size uint64 + // resident set size (same as VmRSS in status) + Resident uint64 + // number of resident shared pages (i.e., backed by a file) + Shared uint64 + // text (code) + Text uint64 + // library (unused since Linux 2.6; always 0) + Lib uint64 + // data + stack + Data uint64 + // dirty pages (unused since Linux 2.6; always 0) + Dt uint64 +} + +// NewStatm returns the current status information of the process. +// Deprecated: Use p.Statm() instead. +func (p Proc) NewStatm() (ProcStatm, error) { + return p.Statm() +} + +// Statm returns the current memory usage information of the process. +func (p Proc) Statm() (ProcStatm, error) { + data, err := util.ReadFileNoStat(p.path("statm")) + if err != nil { + return ProcStatm{}, err + } + + statmSlice, err := parseStatm(data) + if err != nil { + return ProcStatm{}, err + } + + procStatm := ProcStatm{ + PID: p.PID, + Size: statmSlice[0], + Resident: statmSlice[1], + Shared: statmSlice[2], + Text: statmSlice[3], + Lib: statmSlice[4], + Data: statmSlice[5], + Dt: statmSlice[6], + } + + return procStatm, nil +} + +// parseStatm return /proc/[pid]/statm data to uint64 slice. +func parseStatm(data []byte) ([]uint64, error) { + var statmSlice []uint64 + statmItems := strings.Fields(string(data)) + for i := 0; i < len(statmItems); i++ { + statmItem, err := strconv.ParseUint(statmItems[i], 10, 64) + if err != nil { + return nil, err + } + statmSlice = append(statmSlice, statmItem) + } + return statmSlice, nil +} + +// SizeBytes returns the process of total program size in bytes. +func (s ProcStatm) SizeBytes() uint64 { + return s.Size * uint64(os.Getpagesize()) +} + +// ResidentBytes returns the process of resident set size in bytes. +func (s ProcStatm) ResidentBytes() uint64 { + return s.Resident * uint64(os.Getpagesize()) +} + +// SHRBytes returns the process of share memory size in bytes. +func (s ProcStatm) SHRBytes() uint64 { + return s.Shared * uint64(os.Getpagesize()) +} + +// TextBytes returns the process of text (code) size in bytes. +func (s ProcStatm) TextBytes() uint64 { + return s.Text * uint64(os.Getpagesize()) +} + +// DataBytes returns the process of data + stack size in bytes. +func (s ProcStatm) DataBytes() uint64 { + return s.Data * uint64(os.Getpagesize()) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ee7c0d26..bb19cdb0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -501,7 +501,7 @@ github.com/elastic/crd-ref-docs/processor github.com/elastic/crd-ref-docs/renderer github.com/elastic/crd-ref-docs/templates github.com/elastic/crd-ref-docs/types -# github.com/emicklei/go-restful/v3 v3.12.2 +# github.com/emicklei/go-restful/v3 v3.13.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -567,7 +567,7 @@ github.com/go-logr/zapr ## explicit; go 1.12 github.com/go-ole/go-ole github.com/go-ole/go-ole/oleutil -# github.com/go-openapi/jsonpointer v0.21.0 +# github.com/go-openapi/jsonpointer v0.21.2 ## explicit; go 1.20 github.com/go-openapi/jsonpointer # github.com/go-openapi/jsonreference v0.21.0 @@ -1302,7 +1302,7 @@ github.com/prometheus/client_model/go ## explicit; go 1.24.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model -# github.com/prometheus/procfs v0.16.1 +# github.com/prometheus/procfs v0.17.0 ## explicit; go 1.23.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -3216,6 +3216,9 @@ sigs.k8s.io/controller-tools/pkg/schemapatcher sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml sigs.k8s.io/controller-tools/pkg/version sigs.k8s.io/controller-tools/pkg/webhook +# sigs.k8s.io/gateway-api v1.4.1 +## explicit; go 1.24.0 +sigs.k8s.io/gateway-api/apis/v1 # sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 ## explicit; go 1.23 sigs.k8s.io/json diff --git a/vendor/sigs.k8s.io/gateway-api/LICENSE b/vendor/sigs.k8s.io/gateway-api/LICENSE new file mode 100644 index 00000000..a5949bd7 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 The Kubernetes Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go new file mode 100644 index 00000000..674dca7b --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go @@ -0,0 +1,324 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:resource:categories=gateway-api,shortName=btlspolicy +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +// BackendTLSPolicy is a Direct Attached Policy. +// +kubebuilder:metadata:labels="gateway.networking.k8s.io/policy=Direct" + +// BackendTLSPolicy provides a way to configure how a Gateway +// connects to a Backend via TLS. +type BackendTLSPolicy struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of BackendTLSPolicy. + // +required + Spec BackendTLSPolicySpec `json:"spec"` + + // Status defines the current state of BackendTLSPolicy. + // +optional + Status PolicyStatus `json:"status,omitempty"` +} + +// BackendTLSPolicyList contains a list of BackendTLSPolicies +// +kubebuilder:object:root=true +type BackendTLSPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackendTLSPolicy `json:"items"` +} + +// BackendTLSPolicySpec defines the desired state of BackendTLSPolicy. +// +// Support: Extended +type BackendTLSPolicySpec struct { + // TargetRefs identifies an API object to apply the policy to. + // Only Services have Extended support. Implementations MAY support + // additional objects, with Implementation Specific support. + // Note that this config applies to the entire referenced resource + // by default, but this default may change in the future to provide + // a more granular application of the policy. + // + // TargetRefs must be _distinct_. This means either that: + // + // * They select different targets. If this is the case, then targetRef + // entries are distinct. In terms of fields, this means that the + // multi-part key defined by `group`, `kind`, and `name` must + // be unique across all targetRef entries in the BackendTLSPolicy. + // * They select different sectionNames in the same target. + // + // + // When more than one BackendTLSPolicy selects the same target and + // sectionName, implementations MUST determine precedence using the + // following criteria, continuing on ties: + // + // * The older policy by creation timestamp takes precedence. For + // example, a policy with a creation timestamp of "2021-07-15 + // 01:02:03" MUST be given precedence over a policy with a + // creation timestamp of "2021-07-15 01:02:04". + // * The policy appearing first in alphabetical order by {name}. + // For example, a policy named `bar` is given precedence over a + // policy named `baz`. + // + // For any BackendTLSPolicy that does not take precedence, the + // implementation MUST ensure the `Accepted` Condition is set to + // `status: False`, with Reason `Conflicted`. + // + // Implementations SHOULD NOT support more than one targetRef at this + // time. Although the API technically allows for this, the current guidance + // for conflict resolution and status handling is lacking. Until that can be + // clarified in a future release, the safest approach is to support a single + // targetRef. + // + // Support: Extended for Kubernetes Service + // + // Support: Implementation-specific for any other resource + // + // +required + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="sectionName must be specified when targetRefs includes 2 or more references to the same target",rule="self.all(p1, self.all(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name ? ((!has(p1.sectionName) || p1.sectionName == '') == (!has(p2.sectionName) || p2.sectionName == '')) : true))" + // +kubebuilder:validation:XValidation:message="sectionName must be unique when targetRefs includes 2 or more references to the same target",rule="self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.sectionName) || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName == '')) || (has(p1.sectionName) && has(p2.sectionName) && p1.sectionName == p2.sectionName))))" + TargetRefs []LocalPolicyTargetReferenceWithSectionName `json:"targetRefs"` + + // Validation contains backend TLS validation configuration. + // +required + Validation BackendTLSPolicyValidation `json:"validation"` + + // Options are a list of key/value pairs to enable extended TLS + // configuration for each implementation. For example, configuring the + // minimum TLS version or supported cipher suites. + // + // A set of common keys MAY be defined by the API in the future. To avoid + // any ambiguity, implementation-specific definitions MUST use + // domain-prefixed names, such as `example.com/my-custom-option`. + // Un-prefixed names are reserved for key names defined by Gateway API. + // + // Support: Implementation-specific + // + // +optional + // +kubebuilder:validation:MaxProperties=16 + Options map[AnnotationKey]AnnotationValue `json:"options,omitempty"` +} + +// BackendTLSPolicyValidation contains backend TLS validation configuration. +// +kubebuilder:validation:XValidation:message="must not contain both CACertificateRefs and WellKnownCACertificates",rule="!(has(self.caCertificateRefs) && size(self.caCertificateRefs) > 0 && has(self.wellKnownCACertificates) && self.wellKnownCACertificates != \"\")" +// +kubebuilder:validation:XValidation:message="must specify either CACertificateRefs or WellKnownCACertificates",rule="(has(self.caCertificateRefs) && size(self.caCertificateRefs) > 0 || has(self.wellKnownCACertificates) && self.wellKnownCACertificates != \"\")" +type BackendTLSPolicyValidation struct { + // CACertificateRefs contains one or more references to Kubernetes objects that + // contain a PEM-encoded TLS CA certificate bundle, which is used to + // validate a TLS handshake between the Gateway and backend Pod. + // + // If CACertificateRefs is empty or unspecified, then WellKnownCACertificates must be + // specified. Only one of CACertificateRefs or WellKnownCACertificates may be specified, + // not both. If CACertificateRefs is empty or unspecified, the configuration for + // WellKnownCACertificates MUST be honored instead if supported by the implementation. + // + // A CACertificateRef is invalid if: + // + // * It refers to a resource that cannot be resolved (e.g., the referenced resource + // does not exist) or is misconfigured (e.g., a ConfigMap does not contain a key + // named `ca.crt`). In this case, the Reason must be set to `InvalidCACertificateRef` + // and the Message of the Condition must indicate which reference is invalid and why. + // + // * It refers to an unknown or unsupported kind of resource. In this case, the Reason + // must be set to `InvalidKind` and the Message of the Condition must explain which + // kind of resource is unknown or unsupported. + // + // * It refers to a resource in another namespace. This may change in future + // spec updates. + // + // Implementations MAY choose to perform further validation of the certificate + // content (e.g., checking expiry or enforcing specific formats). In such cases, + // an implementation-specific Reason and Message must be set for the invalid reference. + // + // In all cases, the implementation MUST ensure the `ResolvedRefs` Condition on + // the BackendTLSPolicy is set to `status: False`, with a Reason and Message + // that indicate the cause of the error. Connections using an invalid + // CACertificateRef MUST fail, and the client MUST receive an HTTP 5xx error + // response. If ALL CACertificateRefs are invalid, the implementation MUST also + // ensure the `Accepted` Condition on the BackendTLSPolicy is set to + // `status: False`, with a Reason `NoValidCACertificate`. + // + // + // A single CACertificateRef to a Kubernetes ConfigMap kind has "Core" support. + // Implementations MAY choose to support attaching multiple certificates to + // a backend, but this behavior is implementation-specific. + // + // Support: Core - An optional single reference to a Kubernetes ConfigMap, + // with the CA certificate in a key named `ca.crt`. + // + // Support: Implementation-specific - More than one reference, other kinds + // of resources, or a single reference that includes multiple certificates. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=8 + CACertificateRefs []LocalObjectReference `json:"caCertificateRefs,omitempty"` + + // WellKnownCACertificates specifies whether system CA certificates may be used in + // the TLS handshake between the gateway and backend pod. + // + // If WellKnownCACertificates is unspecified or empty (""), then CACertificateRefs + // must be specified with at least one entry for a valid configuration. Only one of + // CACertificateRefs or WellKnownCACertificates may be specified, not both. + // If an implementation does not support the WellKnownCACertificates field, or + // the supplied value is not recognized, the implementation MUST ensure the + // `Accepted` Condition on the BackendTLSPolicy is set to `status: False`, with + // a Reason `Invalid`. + // + // Support: Implementation-specific + // + // +optional + // +listType=atomic + WellKnownCACertificates *WellKnownCACertificatesType `json:"wellKnownCACertificates,omitempty"` + + // Hostname is used for two purposes in the connection between Gateways and + // backends: + // + // 1. Hostname MUST be used as the SNI to connect to the backend (RFC 6066). + // 2. Hostname MUST be used for authentication and MUST match the certificate + // served by the matching backend, unless SubjectAltNames is specified. + // 3. If SubjectAltNames are specified, Hostname can be used for certificate selection + // but MUST NOT be used for authentication. If you want to use the value + // of the Hostname field for authentication, you MUST add it to the SubjectAltNames list. + // + // Support: Core + // + // +required + Hostname PreciseHostname `json:"hostname"` + + // SubjectAltNames contains one or more Subject Alternative Names. + // When specified the certificate served from the backend MUST + // have at least one Subject Alternate Name matching one of the specified SubjectAltNames. + // + // Support: Extended + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=5 + SubjectAltNames []SubjectAltName `json:"subjectAltNames,omitempty"` +} + +// SubjectAltName represents Subject Alternative Name. +// +kubebuilder:validation:XValidation:message="SubjectAltName element must contain Hostname, if Type is set to Hostname",rule="!(self.type == \"Hostname\" && (!has(self.hostname) || self.hostname == \"\"))" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must not contain Hostname, if Type is not set to Hostname",rule="!(self.type != \"Hostname\" && has(self.hostname) && self.hostname != \"\")" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must contain URI, if Type is set to URI",rule="!(self.type == \"URI\" && (!has(self.uri) || self.uri == \"\"))" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must not contain URI, if Type is not set to URI",rule="!(self.type != \"URI\" && has(self.uri) && self.uri != \"\")" +type SubjectAltName struct { + // Type determines the format of the Subject Alternative Name. Always required. + // + // Support: Core + // + // +required + Type SubjectAltNameType `json:"type"` + + // Hostname contains Subject Alternative Name specified in DNS name format. + // Required when Type is set to Hostname, ignored otherwise. + // + // Support: Core + // + // +optional + Hostname Hostname `json:"hostname,omitempty"` + + // URI contains Subject Alternative Name specified in a full URI format. + // It MUST include both a scheme (e.g., "http" or "ftp") and a scheme-specific-part. + // Common values include SPIFFE IDs like "spiffe://mycluster.example.com/ns/myns/sa/svc1sa". + // Required when Type is set to URI, ignored otherwise. + // + // Support: Core + // + // +optional + URI AbsoluteURI `json:"uri,omitempty"` +} + +// WellKnownCACertificatesType is the type of CA certificate that will be used +// when the caCertificateRefs field is unspecified. +// +kubebuilder:validation:Enum=System +type WellKnownCACertificatesType string + +const ( + // WellKnownCACertificatesSystem indicates that well known system CA certificates should be used. + WellKnownCACertificatesSystem WellKnownCACertificatesType = "System" +) + +// SubjectAltNameType is the type of the Subject Alternative Name. +// +kubebuilder:validation:Enum=Hostname;URI +type SubjectAltNameType string + +const ( + // HostnameSubjectAltNameType specifies hostname-based SAN. + // + // Support: Core + HostnameSubjectAltNameType SubjectAltNameType = "Hostname" + + // URISubjectAltNameType specifies URI-based SAN, e.g. SPIFFE id. + // + // Support: Core + URISubjectAltNameType SubjectAltNameType = "URI" +) + +const ( + // This reason is used with the "Accepted" condition when it is + // set to false because all CACertificateRefs of the + // BackendTLSPolicy are invalid. + BackendTLSPolicyReasonNoValidCACertificate PolicyConditionReason = "NoValidCACertificate" +) + +const ( + // This condition indicates whether the controller was able to resolve all + // object references for the BackendTLSPolicy. + // + // Possible reasons for this condition to be True are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidCACertificateRef" + // * "InvalidKind" + // + // Controllers may raise this condition with other reasons, but should + // prefer to use the reasons listed above to improve interoperability. + BackendTLSPolicyConditionResolvedRefs PolicyConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + BackendTLSPolicyReasonResolvedRefs PolicyConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when one of the + // BackendTLSPolicy's CACertificateRefs is invalid. + // A CACertificateRef is considered invalid when it refers to a nonexistent + // resource or when the data within that resource is malformed. + BackendTLSPolicyReasonInvalidCACertificateRef PolicyConditionReason = "InvalidCACertificateRef" + + // This reason is used with the "ResolvedRefs" condition when one of the + // BackendTLSPolicy's CACertificateRefs references an unknown or unsupported + // Group and/or Kind. + BackendTLSPolicyReasonInvalidKind PolicyConditionReason = "InvalidKind" +) diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/doc.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/doc.go new file mode 100644 index 00000000..f2c7aa2b --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the gateway.networking.k8s.io +// API group. +// +// +k8s:openapi-gen=true +// +kubebuilder:object:generate=true +// +groupName=gateway.networking.k8s.io +package v1 diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go new file mode 100644 index 00000000..58d97518 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go @@ -0,0 +1,1640 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api,shortName=gtw +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Class",type=string,JSONPath=`.spec.gatewayClassName` +// +kubebuilder:printcolumn:name="Address",type=string,JSONPath=`.status.addresses[*].value` +// +kubebuilder:printcolumn:name="Programmed",type=string,JSONPath=`.status.conditions[?(@.type=="Programmed")].status` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// Gateway represents an instance of a service-traffic handling infrastructure +// by binding Listeners to a set of IP addresses. +type Gateway struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of Gateway. + // +required + Spec GatewaySpec `json:"spec"` + + // Status defines the current state of Gateway. + // + // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type: "Programmed", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + // +optional + Status GatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayList contains a list of Gateways. +type GatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Gateway `json:"items"` +} + +// GatewaySpec defines the desired state of Gateway. +// +// Not all possible combinations of options specified in the Spec are +// valid. Some invalid configurations can be caught synchronously via CRD +// validation, but there are many cases that will require asynchronous +// signaling via the GatewayStatus block. +type GatewaySpec struct { + // GatewayClassName used for this Gateway. This is the name of a + // GatewayClass resource. + // +required + GatewayClassName ObjectName `json:"gatewayClassName"` + + // Listeners associated with this Gateway. Listeners define + // logical endpoints that are bound on this Gateway's addresses. + // At least one Listener MUST be specified. + // + // ## Distinct Listeners + // + // Each Listener in a set of Listeners (for example, in a single Gateway) + // MUST be _distinct_, in that a traffic flow MUST be able to be assigned to + // exactly one listener. (This section uses "set of Listeners" rather than + // "Listeners in a single Gateway" because implementations MAY merge configuration + // from multiple Gateways onto a single data plane, and these rules _also_ + // apply in that case). + // + // Practically, this means that each listener in a set MUST have a unique + // combination of Port, Protocol, and, if supported by the protocol, Hostname. + // + // Some combinations of port, protocol, and TLS settings are considered + // Core support and MUST be supported by implementations based on the objects + // they support: + // + // HTTPRoute + // + // 1. HTTPRoute, Port: 80, Protocol: HTTP + // 2. HTTPRoute, Port: 443, Protocol: HTTPS, TLS Mode: Terminate, TLS keypair provided + // + // TLSRoute + // + // 1. TLSRoute, Port: 443, Protocol: TLS, TLS Mode: Passthrough + // + // "Distinct" Listeners have the following property: + // + // **The implementation can match inbound requests to a single distinct + // Listener**. + // + // When multiple Listeners share values for fields (for + // example, two Listeners with the same Port value), the implementation + // can match requests to only one of the Listeners using other + // Listener fields. + // + // When multiple listeners have the same value for the Protocol field, then + // each of the Listeners with matching Protocol values MUST have different + // values for other fields. + // + // The set of fields that MUST be different for a Listener differs per protocol. + // The following rules define the rules for what fields MUST be considered for + // Listeners to be distinct with each protocol currently defined in the + // Gateway API spec. + // + // The set of listeners that all share a protocol value MUST have _different_ + // values for _at least one_ of these fields to be distinct: + // + // * **HTTP, HTTPS, TLS**: Port, Hostname + // * **TCP, UDP**: Port + // + // One **very** important rule to call out involves what happens when an + // implementation: + // + // * Supports TCP protocol Listeners, as well as HTTP, HTTPS, or TLS protocol + // Listeners, and + // * sees HTTP, HTTPS, or TLS protocols with the same `port` as one with TCP + // Protocol. + // + // In this case all the Listeners that share a port with the + // TCP Listener are not distinct and so MUST NOT be accepted. + // + // If an implementation does not support TCP Protocol Listeners, then the + // previous rule does not apply, and the TCP Listeners SHOULD NOT be + // accepted. + // + // Note that the `tls` field is not used for determining if a listener is distinct, because + // Listeners that _only_ differ on TLS config will still conflict in all cases. + // + // ### Listeners that are distinct only by Hostname + // + // When the Listeners are distinct based only on Hostname, inbound request + // hostnames MUST match from the most specific to least specific Hostname + // values to choose the correct Listener and its associated set of Routes. + // + // Exact matches MUST be processed before wildcard matches, and wildcard + // matches MUST be processed before fallback (empty Hostname value) + // matches. For example, `"foo.example.com"` takes precedence over + // `"*.example.com"`, and `"*.example.com"` takes precedence over `""`. + // + // Additionally, if there are multiple wildcard entries, more specific + // wildcard entries must be processed before less specific wildcard entries. + // For example, `"*.foo.example.com"` takes precedence over `"*.example.com"`. + // + // The precise definition here is that the higher the number of dots in the + // hostname to the right of the wildcard character, the higher the precedence. + // + // The wildcard character will match any number of characters _and dots_ to + // the left, however, so `"*.example.com"` will match both + // `"foo.bar.example.com"` _and_ `"bar.example.com"`. + // + // ## Handling indistinct Listeners + // + // If a set of Listeners contains Listeners that are not distinct, then those + // Listeners are _Conflicted_, and the implementation MUST set the "Conflicted" + // condition in the Listener Status to "True". + // + // The words "indistinct" and "conflicted" are considered equivalent for the + // purpose of this documentation. + // + // Implementations MAY choose to accept a Gateway with some Conflicted + // Listeners only if they only accept the partial Listener set that contains + // no Conflicted Listeners. + // + // Specifically, an implementation MAY accept a partial Listener set subject to + // the following rules: + // + // * The implementation MUST NOT pick one conflicting Listener as the winner. + // ALL indistinct Listeners must not be accepted for processing. + // * At least one distinct Listener MUST be present, or else the Gateway effectively + // contains _no_ Listeners, and must be rejected from processing as a whole. + // + // The implementation MUST set a "ListenersNotValid" condition on the + // Gateway Status when the Gateway contains Conflicted Listeners whether or + // not they accept the Gateway. That Condition SHOULD clearly + // indicate in the Message which Listeners are conflicted, and which are + // Accepted. Additionally, the Listener status for those listeners SHOULD + // indicate which Listeners are conflicted and not Accepted. + // + // ## General Listener behavior + // + // Note that, for all distinct Listeners, requests SHOULD match at most one Listener. + // For example, if Listeners are defined for "foo.example.com" and "*.example.com", a + // request to "foo.example.com" SHOULD only be routed using routes attached + // to the "foo.example.com" Listener (and not the "*.example.com" Listener). + // + // This concept is known as "Listener Isolation", and it is an Extended feature + // of Gateway API. Implementations that do not support Listener Isolation MUST + // clearly document this, and MUST NOT claim support for the + // `GatewayHTTPListenerIsolation` feature. + // + // Implementations that _do_ support Listener Isolation SHOULD claim support + // for the Extended `GatewayHTTPListenerIsolation` feature and pass the associated + // conformance tests. + // + // ## Compatible Listeners + // + // A Gateway's Listeners are considered _compatible_ if: + // + // 1. They are distinct. + // 2. The implementation can serve them in compliance with the Addresses + // requirement that all Listeners are available on all assigned + // addresses. + // + // Compatible combinations in Extended support are expected to vary across + // implementations. A combination that is compatible for one implementation + // may not be compatible for another. + // + // For example, an implementation that cannot serve both TCP and UDP listeners + // on the same address, or cannot mix HTTPS and generic TLS listens on the same port + // would not consider those cases compatible, even though they are distinct. + // + // Implementations MAY merge separate Gateways onto a single set of + // Addresses if all Listeners across all Gateways are compatible. + // + // In a future release the MinItems=1 requirement MAY be dropped. + // + // Support: Core + // + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:XValidation:message="tls must not be specified for protocols ['HTTP', 'TCP', 'UDP']",rule="self.all(l, l.protocol in ['HTTP', 'TCP', 'UDP'] ? !has(l.tls) : true)" + // +kubebuilder:validation:XValidation:message="tls mode must be Terminate for protocol HTTPS",rule="self.all(l, (l.protocol == 'HTTPS' && has(l.tls)) ? (l.tls.mode == '' || l.tls.mode == 'Terminate') : true)" + // +kubebuilder:validation:XValidation:message="hostname must not be specified for protocols ['TCP', 'UDP']",rule="self.all(l, l.protocol in ['TCP', 'UDP'] ? (!has(l.hostname) || l.hostname == '') : true)" + // +kubebuilder:validation:XValidation:message="Listener name must be unique within the Gateway",rule="self.all(l1, self.exists_one(l2, l1.name == l2.name))" + // +kubebuilder:validation:XValidation:message="Combination of port, protocol and hostname must be unique for each listener",rule="self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))" + // +required + Listeners []Listener `json:"listeners"` + + // Addresses requested for this Gateway. This is optional and behavior can + // depend on the implementation. If a value is set in the spec and the + // requested address is invalid or unavailable, the implementation MUST + // indicate this in an associated entry in GatewayStatus.Conditions. + // + // The Addresses field represents a request for the address(es) on the + // "outside of the Gateway", that traffic bound for this Gateway will use. + // This could be the IP address or hostname of an external load balancer or + // other networking infrastructure, or some other address that traffic will + // be sent to. + // + // If no Addresses are specified, the implementation MAY schedule the + // Gateway in an implementation-specific manner, assigning an appropriate + // set of Addresses. + // + // The implementation MUST bind all Listeners to every GatewayAddress that + // it assigns to the Gateway and add a corresponding entry in + // GatewayStatus.Addresses. + // + // Support: Extended + // + // +optional + // +listType=atomic + // + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="IPAddress values must be unique",rule="self.all(a1, a1.type == 'IPAddress' && has(a1.value) ? self.exists_one(a2, a2.type == a1.type && has(a2.value) && a2.value == a1.value) : true )" + // +kubebuilder:validation:XValidation:message="Hostname values must be unique",rule="self.all(a1, a1.type == 'Hostname' && has(a1.value) ? self.exists_one(a2, a2.type == a1.type && has(a2.value) && a2.value == a1.value) : true )" + Addresses []GatewaySpecAddress `json:"addresses,omitempty"` + + // Infrastructure defines infrastructure level attributes about this Gateway instance. + // + // Support: Extended + // + // +optional + Infrastructure *GatewayInfrastructure `json:"infrastructure,omitempty"` + + // AllowedListeners defines which ListenerSets can be attached to this Gateway. + // While this feature is experimental, the default value is to allow no ListenerSets. + // + // + // + // +optional + AllowedListeners *AllowedListeners `json:"allowedListeners,omitempty"` + // + // TLS specifies frontend and backend tls configuration for entire gateway. + // + // Support: Extended + // + // +optional + // + TLS *GatewayTLSConfig `json:"tls,omitempty"` + + // DefaultScope, when set, configures the Gateway as a default Gateway, + // meaning it will dynamically and implicitly have Routes (e.g. HTTPRoute) + // attached to it, according to the scope configured here. + // + // If unset (the default) or set to None, the Gateway will not act as a + // default Gateway; if set, the Gateway will claim any Route with a + // matching scope set in its UseDefaultGateway field, subject to the usual + // rules about which routes the Gateway can attach to. + // + // Think carefully before using this functionality! While the normal rules + // about which Route can apply are still enforced, it is simply easier for + // the wrong Route to be accidentally attached to this Gateway in this + // configuration. If the Gateway operator is not also the operator in + // control of the scope (e.g. namespace) with tight controls and checks on + // what kind of workloads and Routes get added in that scope, we strongly + // recommend not using this just because it seems convenient, and instead + // stick to direct Route attachment. + // + // +optional + // + DefaultScope GatewayDefaultScope `json:"defaultScope,omitempty"` +} + +// AllowedListeners defines which ListenerSets can be attached to this Gateway. +type AllowedListeners struct { + // Namespaces defines which namespaces ListenerSets can be attached to this Gateway. + // While this feature is experimental, the default value is to allow no ListenerSets. + // + // +optional + // +kubebuilder:default={from: None} + Namespaces *ListenerNamespaces `json:"namespaces,omitempty"` +} + +// ListenerNamespaces indicate which namespaces ListenerSets should be selected from. +type ListenerNamespaces struct { + // From indicates where ListenerSets can attach to this Gateway. Possible + // values are: + // + // * Same: Only ListenerSets in the same namespace may be attached to this Gateway. + // * Selector: ListenerSets in namespaces selected by the selector may be attached to this Gateway. + // * All: ListenerSets in all namespaces may be attached to this Gateway. + // * None: Only listeners defined in the Gateway's spec are allowed + // + // While this feature is experimental, the default value None + // + // +optional + // +kubebuilder:default=None + // +kubebuilder:validation:Enum=All;Selector;Same;None + From *FromNamespaces `json:"from,omitempty"` + + // Selector must be specified when From is set to "Selector". In that case, + // only ListenerSets in Namespaces matching this Selector will be selected by this + // Gateway. This field is ignored for other values of "From". + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` +} + +// Listener embodies the concept of a logical endpoint where a Gateway accepts +// network connections. +type Listener struct { + // Name is the name of the Listener. This name MUST be unique within a + // Gateway. + // + // Support: Core + // +required + Name SectionName `json:"name"` + + // Hostname specifies the virtual hostname to match for protocol types that + // define this concept. When unspecified, all hostnames are matched. This + // field is ignored for protocols that don't require hostname based + // matching. + // + // Implementations MUST apply Hostname matching appropriately for each of + // the following protocols: + // + // * TLS: The Listener Hostname MUST match the SNI. + // * HTTP: The Listener Hostname MUST match the Host header of the request. + // * HTTPS: The Listener Hostname SHOULD match both the SNI and Host header. + // Note that this does not require the SNI and Host header to be the same. + // The semantics of this are described in more detail below. + // + // To ensure security, Section 11.1 of RFC-6066 emphasizes that server + // implementations that rely on SNI hostname matching MUST also verify + // hostnames within the application protocol. + // + // Section 9.1.2 of RFC-7540 provides a mechanism for servers to reject the + // reuse of a connection by responding with the HTTP 421 Misdirected Request + // status code. This indicates that the origin server has rejected the + // request because it appears to have been misdirected. + // + // To detect misdirected requests, Gateways SHOULD match the authority of + // the requests with all the SNI hostname(s) configured across all the + // Gateway Listeners on the same port and protocol: + // + // * If another Listener has an exact match or more specific wildcard entry, + // the Gateway SHOULD return a 421. + // * If the current Listener (selected by SNI matching during ClientHello) + // does not match the Host: + // * If another Listener does match the Host the Gateway SHOULD return a + // 421. + // * If no other Listener matches the Host, the Gateway MUST return a + // 404. + // + // For HTTPRoute and TLSRoute resources, there is an interaction with the + // `spec.hostnames` array. When both listener and route specify hostnames, + // there MUST be an intersection between the values for a Route to be + // accepted. For more information, refer to the Route specific Hostnames + // documentation. + // + // Hostnames that are prefixed with a wildcard label (`*.`) are interpreted + // as a suffix match. That means that a match for `*.example.com` would match + // both `test.example.com`, and `foo.test.example.com`, but not `example.com`. + // + // Support: Core + // + // +optional + Hostname *Hostname `json:"hostname,omitempty"` + + // Port is the network port. Multiple listeners may use the + // same port, subject to the Listener compatibility rules. + // + // Support: Core + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // + // +required + Port PortNumber `json:"port"` + + // Protocol specifies the network protocol this listener expects to receive. + // + // Support: Core + // +required + Protocol ProtocolType `json:"protocol"` + + // TLS is the TLS configuration for the Listener. This field is required if + // the Protocol field is "HTTPS" or "TLS". It is invalid to set this field + // if the Protocol field is "HTTP", "TCP", or "UDP". + // + // The association of SNIs to Certificate defined in ListenerTLSConfig is + // defined based on the Hostname field for this listener. + // + // The GatewayClass MUST use the longest matching SNI out of all + // available certificates for any TLS handshake. + // + // Support: Core + // + // +optional + TLS *ListenerTLSConfig `json:"tls,omitempty"` + + // AllowedRoutes defines the types of routes that MAY be attached to a + // Listener and the trusted namespaces where those Route resources MAY be + // present. + // + // Although a client request may match multiple route rules, only one rule + // may ultimately receive the request. Matching precedence MUST be + // determined in order of the following criteria: + // + // * The most specific match as defined by the Route type. + // * The oldest Route based on creation timestamp. For example, a Route with + // a creation timestamp of "2020-09-08 01:02:03" is given precedence over + // a Route with a creation timestamp of "2020-09-08 01:02:04". + // * If everything else is equivalent, the Route appearing first in + // alphabetical order (namespace/name) should be given precedence. For + // example, foo/bar is given precedence over foo/baz. + // + // All valid rules within a Route attached to this Listener should be + // implemented. Invalid Route rules can be ignored (sometimes that will mean + // the full Route). If a Route rule transitions from valid to invalid, + // support for that Route rule should be dropped to ensure consistency. For + // example, even if a filter specified by a Route rule is invalid, the rest + // of the rules within that Route should still be supported. + // + // Support: Core + // +kubebuilder:default={namespaces:{from: Same}} + // +optional + AllowedRoutes *AllowedRoutes `json:"allowedRoutes,omitempty"` +} + +// ProtocolType defines the application protocol accepted by a Listener. +// Implementations are not required to accept all the defined protocols. If an +// implementation does not support a specified protocol, it MUST set the +// "Accepted" condition to False for the affected Listener with a reason of +// "UnsupportedProtocol". +// +// Core ProtocolType values are listed in the table below. +// +// Implementations can define their own protocols if a core ProtocolType does not +// exist. Such definitions must use prefixed name, such as +// `mycompany.com/my-custom-protocol`. Un-prefixed names are reserved for core +// protocols. Any protocol defined by implementations will fall under +// Implementation-specific conformance. +// +// Valid values include: +// +// * "HTTP" - Core support +// * "example.com/bar" - Implementation-specific support +// +// Invalid values include: +// +// * "example.com" - must include path if domain is used +// * "foo.example.com" - must include path if domain is used +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=255 +// +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])?$|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9]+$` +type ProtocolType string + +const ( + // Accepts cleartext HTTP/1.1 sessions over TCP. Implementations MAY also + // support HTTP/2 over cleartext. If implementations support HTTP/2 over + // cleartext on "HTTP" listeners, that MUST be clearly documented by the + // implementation. + HTTPProtocolType ProtocolType = "HTTP" + + // Accepts HTTP/1.1 or HTTP/2 sessions over TLS. + HTTPSProtocolType ProtocolType = "HTTPS" + + // Accepts TLS sessions over TCP. + TLSProtocolType ProtocolType = "TLS" + + // Accepts TCP sessions. + TCPProtocolType ProtocolType = "TCP" + + // Accepts UDP packets. + UDPProtocolType ProtocolType = "UDP" +) + +// GatewayBackendTLS describes backend TLS configuration for gateway. +type GatewayBackendTLS struct { + // ClientCertificateRef is a reference to an object that contains a Client + // Certificate and the associated private key. + // + // References to a resource in different namespace are invalid UNLESS there + // is a ReferenceGrant in the target namespace that allows the certificate + // to be attached. If a ReferenceGrant does not allow this reference, the + // "ResolvedRefs" condition MUST be set to False for this listener with the + // "RefNotPermitted" reason. + // + // ClientCertificateRef can reference to standard Kubernetes resources, i.e. + // Secret, or implementation-specific custom resources. + // + // Support: Core + // + // +optional + // + ClientCertificateRef *SecretObjectReference `json:"clientCertificateRef,omitempty"` +} + +// ListenerTLSConfig describes a TLS configuration for a listener. +// +// +kubebuilder:validation:XValidation:message="certificateRefs or options must be specified when mode is Terminate",rule="self.mode == 'Terminate' ? size(self.certificateRefs) > 0 || size(self.options) > 0 : true" +type ListenerTLSConfig struct { + // Mode defines the TLS behavior for the TLS session initiated by the client. + // There are two possible modes: + // + // - Terminate: The TLS session between the downstream client and the + // Gateway is terminated at the Gateway. This mode requires certificates + // to be specified in some way, such as populating the certificateRefs + // field. + // - Passthrough: The TLS session is NOT terminated by the Gateway. This + // implies that the Gateway can't decipher the TLS stream except for + // the ClientHello message of the TLS protocol. The certificateRefs field + // is ignored in this mode. + // + // Support: Core + // + // +optional + // +kubebuilder:default=Terminate + Mode *TLSModeType `json:"mode,omitempty"` + + // CertificateRefs contains a series of references to Kubernetes objects that + // contains TLS certificates and private keys. These certificates are used to + // establish a TLS handshake for requests that match the hostname of the + // associated listener. + // + // A single CertificateRef to a Kubernetes Secret has "Core" support. + // Implementations MAY choose to support attaching multiple certificates to + // a Listener, but this behavior is implementation-specific. + // + // References to a resource in different namespace are invalid UNLESS there + // is a ReferenceGrant in the target namespace that allows the certificate + // to be attached. If a ReferenceGrant does not allow this reference, the + // "ResolvedRefs" condition MUST be set to False for this listener with the + // "RefNotPermitted" reason. + // + // This field is required to have at least one element when the mode is set + // to "Terminate" (default) and is optional otherwise. + // + // CertificateRefs can reference to standard Kubernetes resources, i.e. + // Secret, or implementation-specific custom resources. + // + // Support: Core - A single reference to a Kubernetes Secret of type kubernetes.io/tls + // + // Support: Implementation-specific (More than one reference or other resource types) + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 + CertificateRefs []SecretObjectReference `json:"certificateRefs,omitempty"` + + // Options are a list of key/value pairs to enable extended TLS + // configuration for each implementation. For example, configuring the + // minimum TLS version or supported cipher suites. + // + // A set of common keys MAY be defined by the API in the future. To avoid + // any ambiguity, implementation-specific definitions MUST use + // domain-prefixed names, such as `example.com/my-custom-option`. + // Un-prefixed names are reserved for key names defined by Gateway API. + // + // Support: Implementation-specific + // + // +optional + // +kubebuilder:validation:MaxProperties=16 + Options map[AnnotationKey]AnnotationValue `json:"options,omitempty"` +} + +// GatewayTLSConfig specifies frontend and backend tls configuration for gateway. +type GatewayTLSConfig struct { + // Backend describes TLS configuration for gateway when connecting + // to backends. + // + // Note that this contains only details for the Gateway as a TLS client, + // and does _not_ imply behavior about how to choose which backend should + // get a TLS connection. That is determined by the presence of a BackendTLSPolicy. + // + // Support: Core + // + // +optional + // + Backend *GatewayBackendTLS `json:"backend,omitempty"` + + // Frontend describes TLS config when client connects to Gateway. + // Support: Core + // + // +optional + // + Frontend *FrontendTLSConfig `json:"frontend,omitempty"` +} + +// FrontendTLSConfig specifies frontend tls configuration for gateway. +type FrontendTLSConfig struct { + // Default specifies the default client certificate validation configuration + // for all Listeners handling HTTPS traffic, unless a per-port configuration + // is defined. + // + // support: Core + // + // +required + // + Default TLSConfig `json:"default"` + + // PerPort specifies tls configuration assigned per port. + // Per port configuration is optional. Once set this configuration overrides + // the default configuration for all Listeners handling HTTPS traffic + // that match this port. + // Each override port requires a unique TLS configuration. + // + // support: Core + // + // +optional + // +listType=map + // +listMapKey=port + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:XValidation:message="Port for TLS configuration must be unique within the Gateway",rule="self.all(t1, self.exists_one(t2, t1.port == t2.port))" + // + PerPort []TLSPortConfig `json:"perPort,omitempty"` +} + +// TLSModeType type defines how a Gateway handles TLS sessions. +// +// +kubebuilder:validation:Enum=Terminate;Passthrough +type TLSModeType string + +const ( + // In this mode, TLS session between the downstream client + // and the Gateway is terminated at the Gateway. + TLSModeTerminate TLSModeType = "Terminate" + + // In this mode, the TLS session is NOT terminated by the Gateway. This + // implies that the Gateway can't decipher the TLS stream except for + // the ClientHello message of the TLS protocol. + // + // Note that SSL passthrough is only supported by TLSRoute. + TLSModePassthrough TLSModeType = "Passthrough" +) + +// TLSConfig describes TLS configuration that can apply to multiple Listeners +// within this Gateway. Currently, it stores only the client certificate validation +// configuration, but this may be extended in the future. +type TLSConfig struct { + // Validation holds configuration information for validating the frontend (client). + // Setting this field will result in mutual authentication when connecting to the gateway. + // In browsers this may result in a dialog appearing + // that requests a user to specify the client certificate. + // The maximum depth of a certificate chain accepted in verification is Implementation specific. + // + // Support: Core + // + // +optional + // + Validation *FrontendTLSValidation `json:"validation,omitempty"` +} + +type TLSPortConfig struct { + // The Port indicates the Port Number to which the TLS configuration will be + // applied. This configuration will be applied to all Listeners handling HTTPS + // traffic that match this port. + // + // Support: Core + // + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // + Port PortNumber `json:"port"` + + // TLS store the configuration that will be applied to all Listeners handling + // HTTPS traffic and matching given port. + // + // Support: Core + // + // +required + // + TLS TLSConfig `json:"tls"` +} + +// FrontendTLSValidation holds configuration information that can be used to validate +// the frontend initiating the TLS connection +type FrontendTLSValidation struct { + // CACertificateRefs contains one or more references to + // Kubernetes objects that contain TLS certificates of + // the Certificate Authorities that can be used + // as a trust anchor to validate the certificates presented by the client. + // + // A single CA certificate reference to a Kubernetes ConfigMap + // has "Core" support. + // Implementations MAY choose to support attaching multiple CA certificates to + // a Listener, but this behavior is implementation-specific. + // + // Support: Core - A single reference to a Kubernetes ConfigMap + // with the CA certificate in a key named `ca.crt`. + // + // Support: Implementation-specific (More than one certificate in a ConfigMap + // with different keys or more than one reference, or other kinds of resources). + // + // References to a resource in a different namespace are invalid UNLESS there + // is a ReferenceGrant in the target namespace that allows the certificate + // to be attached. If a ReferenceGrant does not allow this reference, the + // "ResolvedRefs" condition MUST be set to False for this listener with the + // "RefNotPermitted" reason. + // + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + CACertificateRefs []ObjectReference `json:"caCertificateRefs"` + + // FrontendValidationMode defines the mode for validating the client certificate. + // There are two possible modes: + // + // - AllowValidOnly: In this mode, the gateway will accept connections only if + // the client presents a valid certificate. This certificate must successfully + // pass validation against the CA certificates specified in `CACertificateRefs`. + // - AllowInsecureFallback: In this mode, the gateway will accept connections + // even if the client certificate is not presented or fails verification. + // + // This approach delegates client authorization to the backend and introduce + // a significant security risk. It should be used in testing environments or + // on a temporary basis in non-testing environments. + // + // Defaults to AllowValidOnly. + // + // Support: Core + // + // +optional + // +kubebuilder:default=AllowValidOnly + Mode FrontendValidationModeType `json:"mode,omitempty"` +} + +// FrontendValidationModeType type defines how a Gateway validates client certificates. +// +// +kubebuilder:validation:Enum=AllowValidOnly;AllowInsecureFallback +type FrontendValidationModeType string + +const ( + // AllowValidOnly indicates that a client certificate is required + // during the TLS handshake and MUST pass validation. + // + // Support: Core + AllowValidOnly FrontendValidationModeType = "AllowValidOnly" + + // AllowInsecureFallback indicates that a client certificate may not be + // presented during the handshake or the validation against CA certificates may fail. + // + // Support: Extended + AllowInsecureFallback FrontendValidationModeType = "AllowInsecureFallback" +) + +// AllowedRoutes defines which Routes may be attached to this Listener. +type AllowedRoutes struct { + // Namespaces indicates namespaces from which Routes may be attached to this + // Listener. This is restricted to the namespace of this Gateway by default. + // + // Support: Core + // + // +optional + // +listType=atomic + // +kubebuilder:default={from: Same} + Namespaces *RouteNamespaces `json:"namespaces,omitempty"` + + // Kinds specifies the groups and kinds of Routes that are allowed to bind + // to this Gateway Listener. When unspecified or empty, the kinds of Routes + // selected are determined using the Listener protocol. + // + // A RouteGroupKind MUST correspond to kinds of Routes that are compatible + // with the application protocol specified in the Listener's Protocol field. + // If an implementation does not support or recognize this resource type, it + // MUST set the "ResolvedRefs" condition to False for this Listener with the + // "InvalidRouteKinds" reason. + // + // Support: Core + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=8 + Kinds []RouteGroupKind `json:"kinds,omitempty"` +} + +// FromNamespaces specifies namespace from which Routes/ListenerSets may be attached to a +// Gateway. +type FromNamespaces string + +const ( + // Routes/ListenerSets in all namespaces may be attached to this Gateway. + NamespacesFromAll FromNamespaces = "All" + // Only Routes/ListenerSets in namespaces selected by the selector may be attached to + // this Gateway. + NamespacesFromSelector FromNamespaces = "Selector" + // Only Routes/ListenerSets in the same namespace as the Gateway may be attached to this + // Gateway. + NamespacesFromSame FromNamespaces = "Same" + // No Routes/ListenerSets may be attached to this Gateway. + NamespacesFromNone FromNamespaces = "None" +) + +// RouteNamespaces indicate which namespaces Routes should be selected from. +type RouteNamespaces struct { + // From indicates where Routes will be selected for this Gateway. Possible + // values are: + // + // * All: Routes in all namespaces may be used by this Gateway. + // * Selector: Routes in namespaces selected by the selector may be used by + // this Gateway. + // * Same: Only Routes in the same namespace may be used by this Gateway. + // + // Support: Core + // + // +optional + // +kubebuilder:default=Same + // +kubebuilder:validation:Enum=All;Selector;Same + From *FromNamespaces `json:"from,omitempty"` + + // Selector must be specified when From is set to "Selector". In that case, + // only Routes in Namespaces matching this Selector will be selected by this + // Gateway. This field is ignored for other values of "From". + // + // Support: Core + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` +} + +// RouteGroupKind indicates the group and kind of a Route resource. +type RouteGroupKind struct { + // Group is the group of the Route. + // + // +optional + // +kubebuilder:default=gateway.networking.k8s.io + Group *Group `json:"group,omitempty"` + + // Kind is the kind of the Route. + // +required + Kind Kind `json:"kind"` +} + +// GatewaySpecAddress describes an address that can be bound to a Gateway. +// +// +kubebuilder:validation:XValidation:message="Hostname value must be empty or contain only valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? (!has(self.value) || self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\")): true" +type GatewaySpecAddress struct { + // Type of the address. + // + // +optional + // +kubebuilder:default=IPAddress + Type *AddressType `json:"type,omitempty"` + + // When a value is unspecified, an implementation SHOULD automatically + // assign an address matching the requested type if possible. + // + // If an implementation does not support an empty value, they MUST set the + // "Programmed" condition in status to False with a reason of "AddressNotAssigned". + // + // Examples: `1.2.3.4`, `128::1`, `my-ip-address`. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + Value string `json:"value,omitempty"` +} + +// GatewayStatusAddress describes a network address that is bound to a Gateway. +// +// +kubebuilder:validation:XValidation:message="Hostname value must only contain valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\"): true" +type GatewayStatusAddress struct { + // Type of the address. + // + // +optional + // +kubebuilder:default=IPAddress + Type *AddressType `json:"type,omitempty"` + + // Value of the address. The validity of the values will depend + // on the type and support by the controller. + // + // Examples: `1.2.3.4`, `128::1`, `my-ip-address`. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +required + Value string `json:"value"` +} + +// GatewayStatus defines the observed state of Gateway. +type GatewayStatus struct { + // Addresses lists the network addresses that have been bound to the + // Gateway. + // + // This list may differ from the addresses provided in the spec under some + // conditions: + // + // * no addresses are specified, all addresses are dynamically assigned + // * a combination of specified and dynamic addresses are assigned + // * a specified address was unusable (e.g. already in use) + // + // +optional + // +listType=atomic + // + // +kubebuilder:validation:MaxItems=16 + Addresses []GatewayStatusAddress `json:"addresses,omitempty"` + + // Conditions describe the current conditions of the Gateway. + // + // Implementations should prefer to express Gateway conditions + // using the `GatewayConditionType` and `GatewayConditionReason` + // constants so that operators and tools can converge on a common + // vocabulary to describe Gateway state. + // + // Known condition types are: + // + // * "Accepted" + // * "Programmed" + // * "Ready" + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:default={{type: "Accepted", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type: "Programmed", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}} + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Listeners provide status for each unique listener port defined in the Spec. + // + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=64 + Listeners []ListenerStatus `json:"listeners,omitempty"` +} + +// GatewayInfrastructure defines infrastructure level attributes about a Gateway instance. +type GatewayInfrastructure struct { + // Labels that SHOULD be applied to any resources created in response to this Gateway. + // + // For implementations creating other Kubernetes objects, this should be the `metadata.labels` field on resources. + // For other implementations, this refers to any relevant (implementation specific) "labels" concepts. + // + // An implementation may chose to add additional implementation-specific labels as they see fit. + // + // If an implementation maps these labels to Pods, or any other resource that would need to be recreated when labels + // change, it SHOULD clearly warn about this behavior in documentation. + // + // Support: Extended + // + // +optional + // +kubebuilder:validation:MaxProperties=8 + // +kubebuilder:validation:XValidation:message="Label keys must be in the form of an optional DNS subdomain prefix followed by a required name segment of up to 63 characters.",rule="self.all(key, key.matches(r\"\"\"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$\"\"\"))" + // +kubebuilder:validation:XValidation:message="If specified, the label key's prefix must be a DNS subdomain not longer than 253 characters in total.",rule="self.all(key, key.split(\"/\")[0].size() < 253)" + Labels map[LabelKey]LabelValue `json:"labels,omitempty"` + + // Annotations that SHOULD be applied to any resources created in response to this Gateway. + // + // For implementations creating other Kubernetes objects, this should be the `metadata.annotations` field on resources. + // For other implementations, this refers to any relevant (implementation specific) "annotations" concepts. + // + // An implementation may chose to add additional implementation-specific annotations as they see fit. + // + // Support: Extended + // + // +optional + // +kubebuilder:validation:MaxProperties=8 + // +kubebuilder:validation:XValidation:message="Annotation keys must be in the form of an optional DNS subdomain prefix followed by a required name segment of up to 63 characters.",rule="self.all(key, key.matches(r\"\"\"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$\"\"\"))" + // +kubebuilder:validation:XValidation:message="If specified, the annotation key's prefix must be a DNS subdomain not longer than 253 characters in total.",rule="self.all(key, key.split(\"/\")[0].size() < 253)" + Annotations map[AnnotationKey]AnnotationValue `json:"annotations,omitempty"` + + // ParametersRef is a reference to a resource that contains the configuration + // parameters corresponding to the Gateway. This is optional if the + // controller does not require any additional configuration. + // + // This follows the same semantics as GatewayClass's `parametersRef`, but on a per-Gateway basis + // + // The Gateway's GatewayClass may provide its own `parametersRef`. When both are specified, + // the merging behavior is implementation specific. + // It is generally recommended that GatewayClass provides defaults that can be overridden by a Gateway. + // + // If the referent cannot be found, refers to an unsupported kind, or when + // the data within that resource is malformed, the Gateway SHOULD be + // rejected with the "Accepted" status condition set to "False" and an + // "InvalidParameters" reason. + // + // Support: Implementation-specific + // + // +optional + ParametersRef *LocalParametersReference `json:"parametersRef,omitempty"` +} + +// LocalParametersReference identifies an API object containing controller-specific +// configuration resource within the namespace. +type LocalParametersReference struct { + // Group is the group of the referent. + // +required + Group Group `json:"group"` + + // Kind is kind of the referent. + // +required + Kind Kind `json:"kind"` + + // Name is the name of the referent. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +required + Name string `json:"name"` +} + +// GatewayConditionType is a type of condition associated with a +// Gateway. This type should be used with the GatewayStatus.Conditions +// field. +type GatewayConditionType string + +// GatewayConditionReason defines the set of reasons that explain why a +// particular Gateway condition type has been raised. +type GatewayConditionReason string + +const ( + // This condition indicates whether a Gateway has generated some + // configuration that is assumed to be ready soon in the underlying data + // plane. + // + // It is a positive-polarity summary condition, and so should always be + // present on the resource with ObservedGeneration set. + // + // It should be set to Unknown if the controller performs updates to the + // status before it has all the information it needs to be able to determine + // if the condition is true. + // + // Possible reasons for this condition to be True are: + // + // * "Programmed" + // + // Possible reasons for this condition to be False are: + // + // * "Invalid" + // * "Pending" + // * "NoResources" + // * "AddressNotAssigned" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionProgrammed GatewayConditionType = "Programmed" + + // This reason is used with the "Programmed" condition when the condition is + // true. + GatewayReasonProgrammed GatewayConditionReason = "Programmed" + + // This reason is used with the "Programmed" and "Accepted" conditions when + // the Gateway is syntactically or semantically invalid. For example, this + // could include unspecified TLS configuration, or some unrecognized or + // invalid values in the TLS configuration. + GatewayReasonInvalid GatewayConditionReason = "Invalid" + + // This reason is used with the "Programmed" condition when the + // Gateway is not scheduled because insufficient infrastructure + // resources are available. + GatewayReasonNoResources GatewayConditionReason = "NoResources" + + // This reason is used with the "Programmed" condition when the underlying + // implementation and network have yet to dynamically assign addresses for a + // Gateway. + // + // Some example situations where this reason can be used: + // + // * IPAM address exhaustion + // * Address not yet allocated + // + // When this reason is used the implementation SHOULD provide a clear + // message explaining the underlying problem, ideally with some hints as to + // what actions can be taken that might resolve the problem. + GatewayReasonAddressNotAssigned GatewayConditionReason = "AddressNotAssigned" + + // This reason is used with the "Programmed" condition when the underlying + // implementation (and possibly, network) are unable to use an address that + // was provided in the Gateway specification. + // + // Some example situations where this reason can be used: + // + // * a named address not being found + // * a provided static address can't be used + // * the address is already in use + // + // When this reason is used the implementation SHOULD provide prescriptive + // information on which address is causing the problem and how to resolve it + // in the condition message. + GatewayReasonAddressNotUsable GatewayConditionReason = "AddressNotUsable" + // This condition indicates `FrontendValidationModeType` changed from + // `AllowValidOnly` to `AllowInsecureFallback`. + GatewayConditionInsecureFrontendValidationMode GatewayConditionReason = "InsecureFrontendValidationMode" + // This reason MUST be set for GatewayConditionInsecureFrontendValidationMode + // when client change FrontendValidationModeType for a Gateway or per port override + // to `AllowInsecureFallback`. + GatewayReasonConfigurationChanged GatewayConditionReason = "ConfigurationChanged" +) + +const ( + // This condition is true when the controller managing the Gateway is + // syntactically and semantically valid enough to produce some configuration + // in the underlying data plane. This does not indicate whether or not the + // configuration has been propagated to the data plane. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // * "ListenersNotValid" + // + // Possible reasons for this condition to be False are: + // + // * "Invalid" + // * "InvalidParameters" + // * "NotReconciled" + // * "UnsupportedAddress" + // * "ListenersNotValid" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionAccepted GatewayConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the condition is + // True. + GatewayReasonAccepted GatewayConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when one or + // more Listeners have an invalid or unsupported configuration + // and cannot be configured on the Gateway. + // This can be the reason when "Accepted" is "True" or "False", depending on whether + // the listener being invalid causes the entire Gateway to not be accepted. + GatewayReasonListenersNotValid GatewayConditionReason = "ListenersNotValid" + + // This reason is used with the "Accepted" and "Programmed" + // conditions when the status is "Unknown" and no controller has reconciled + // the Gateway. + GatewayReasonPending GatewayConditionReason = "Pending" + + // This reason is used with the "Accepted" condition to indicate that the + // Gateway could not be accepted because an address that was provided is a + // type which is not supported by the implementation. + GatewayReasonUnsupportedAddress GatewayConditionReason = "UnsupportedAddress" + + // This reason is used with the "Accepted" condition when the + // Gateway was not accepted because the parametersRef field + // was invalid, with more detail in the message. + GatewayReasonInvalidParameters GatewayConditionReason = "InvalidParameters" +) + +const ( + // Deprecated: use "Accepted" instead. + GatewayConditionScheduled GatewayConditionType = "Scheduled" + + // This reason is used with the "Scheduled" condition when the condition is + // True. + // + // Deprecated: use the "Accepted" condition with reason "Accepted" instead. + GatewayReasonScheduled GatewayConditionReason = "Scheduled" + + // Deprecated: Use "Pending" instead. + GatewayReasonNotReconciled GatewayConditionReason = "NotReconciled" +) + +const ( + // "Ready" is a condition type reserved for future use. It should not be used by implementations. + // + // If used in the future, "Ready" will represent the final state where all configuration is confirmed good + // _and has completely propagated to the data plane_. That is, it is a _guarantee_ that, as soon as something + // sees the Condition as `true`, then connections will be correctly routed _immediately_. + // + // This is a very strong guarantee, and to date no implementation has satisfied it enough to implement it. + // This reservation can be discussed in the future if necessary. + // + // Note: This condition is not really "deprecated", but rather "reserved"; however, deprecated triggers Go linters + // to alert about usage. + // Deprecated: Ready is reserved for future use + GatewayConditionReady GatewayConditionType = "Ready" + + // Deprecated: Ready is reserved for future use + GatewayReasonReady GatewayConditionReason = "Ready" + + // Deprecated: Ready is reserved for future use + GatewayReasonListenersNotReady GatewayConditionReason = "ListenersNotReady" +) + +const ( + // AttachedListenerSets is a condition that is true when the Gateway has + // at least one ListenerSet attached to it. + // + // Possible reasons for this condition to be True are: + // + // * "ListenerSetsAttached" + // + // Possible reasons for this condition to be False are: + // + // * "NoListenerSetsAttached" + // * "ListenerSetsNotAllowed" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionAttachedListenerSets GatewayConditionType = "AttachedListenerSets" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has at least one ListenerSet attached to it. + GatewayReasonListenerSetsAttached GatewayConditionReason = "ListenerSetsAttached" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has no ListenerSets attached to it. + GatewayReasonNoListenerSetsAttached GatewayConditionReason = "NoListenerSetsAttached" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has ListenerSets attached to it, but the ListenerSets are not allowed. + GatewayReasonListenerSetsNotAllowed GatewayConditionReason = "ListenerSetsNotAllowed" +) + +// ListenerStatus is the status associated with a Listener. +type ListenerStatus struct { + // Name is the name of the Listener that this status corresponds to. + // +required + Name SectionName `json:"name"` + + // SupportedKinds is the list indicating the Kinds supported by this + // listener. This MUST represent the kinds an implementation supports for + // that Listener configuration. + // + // If kinds are specified in Spec that are not supported, they MUST NOT + // appear in this list and an implementation MUST set the "ResolvedRefs" + // condition to "False" with the "InvalidRouteKinds" reason. If both valid + // and invalid Route kinds are specified, the implementation MUST + // reference the valid Route kinds that have been specified. + // + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=8 + SupportedKinds []RouteGroupKind `json:"supportedKinds"` + + // AttachedRoutes represents the total number of Routes that have been + // successfully attached to this Listener. + // + // Successful attachment of a Route to a Listener is based solely on the + // combination of the AllowedRoutes field on the corresponding Listener + // and the Route's ParentRefs field. A Route is successfully attached to + // a Listener when it is selected by the Listener's AllowedRoutes field + // AND the Route has a valid ParentRef selecting the whole Gateway + // resource or a specific Listener as a parent resource (more detail on + // attachment semantics can be found in the documentation on the various + // Route kinds ParentRefs fields). Listener or Route status does not impact + // successful attachment, i.e. the AttachedRoutes field count MUST be set + // for Listeners with condition Accepted: false and MUST count successfully + // attached Routes that may themselves have Accepted: false conditions. + // + // Uses for this field include troubleshooting Route attachment and + // measuring blast radius/impact of changes to a Listener. + // +required + AttachedRoutes int32 `json:"attachedRoutes"` + + // Conditions describe the current condition of this listener. + // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=8 + // +required + Conditions []metav1.Condition `json:"conditions"` +} + +// ListenerConditionType is a type of condition associated with the +// listener. This type should be used with the ListenerStatus.Conditions +// field. +type ListenerConditionType string + +// ListenerConditionReason defines the set of reasons that explain +// why a particular Listener condition type has been raised. +type ListenerConditionReason string + +const ( + // This condition indicates that the controller was unable to resolve + // conflicting specification requirements for this Listener. If a + // Listener is conflicted, its network port should not be configured + // on any network elements. + // + // Possible reasons for this condition to be true are: + // + // * "HostnameConflict" + // * "ProtocolConflict" + // + // Possible reasons for this condition to be False are: + // + // * "NoConflicts" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ListenerConditionConflicted ListenerConditionType = "Conflicted" + + // This reason is used with the "Conflicted" condition when + // the Listener conflicts with hostnames in other Listeners. For + // example, this reason would be used when multiple Listeners on + // the same port use `example.com` in the hostname field. + ListenerReasonHostnameConflict ListenerConditionReason = "HostnameConflict" + + // This reason is used with the "Conflicted" condition when + // multiple Listeners are specified with the same Listener port + // number, but have conflicting protocol specifications. + ListenerReasonProtocolConflict ListenerConditionReason = "ProtocolConflict" + + // This reason is used with the "Conflicted" condition when the condition + // is False. + ListenerReasonNoConflicts ListenerConditionReason = "NoConflicts" +) + +const ( + // This condition indicates that the listener is syntactically and + // semantically valid, and that all features used in the listener's spec are + // supported. + // + // In general, a Listener will be marked as Accepted when the supplied + // configuration will generate at least some data plane configuration. + // + // For example, a Listener with an unsupported protocol will never generate + // any data plane config, and so will have Accepted set to `false.` + // Conversely, a Listener that does not have any Routes will be able to + // generate data plane config, and so will have Accepted set to `true`. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "PortUnavailable" + // * "UnsupportedProtocol" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ListenerConditionAccepted ListenerConditionType = "Accepted" + + // Deprecated: use "Accepted" instead. + ListenerConditionDetached ListenerConditionType = "Detached" + + // This reason is used with the "Accepted" condition when the condition is + // True. + ListenerReasonAccepted ListenerConditionReason = "Accepted" + + // This reason is used with the "Detached" condition when the condition is + // False. + // + // Deprecated: use the "Accepted" condition with reason "Accepted" instead. + ListenerReasonAttached ListenerConditionReason = "Attached" + + // This reason is used with the "Accepted" condition when the Listener + // requests a port that cannot be used on the Gateway. This reason could be + // used in a number of instances, including: + // + // * The port is already in use. + // * The port is not supported by the implementation. + ListenerReasonPortUnavailable ListenerConditionReason = "PortUnavailable" + + // This reason is used with the "Accepted" condition when the + // Listener could not be attached to be Gateway because its + // protocol type is not supported. + ListenerReasonUnsupportedProtocol ListenerConditionReason = "UnsupportedProtocol" +) + +const ( + // This condition indicates whether the controller was able to + // resolve all the object references for the Listener. + // + // Possible reasons for this condition to be true are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidCertificateRef" + // * "InvalidRouteKinds" + // * "RefNotPermitted" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ListenerConditionResolvedRefs ListenerConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + ListenerReasonResolvedRefs ListenerConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the + // Listener has a TLS configuration with at least one TLS CertificateRef + // that is invalid or does not exist. + // A CertificateRef is considered invalid when it refers to a nonexistent + // or unsupported resource or kind, or when the data within that resource + // is malformed. + // This reason must be used only when the reference is allowed, either by + // referencing an object in the same namespace as the Gateway, or when + // a cross-namespace reference has been explicitly allowed by a ReferenceGrant. + // If the reference is not allowed, the reason RefNotPermitted must be used + // instead. + ListenerReasonInvalidCertificateRef ListenerConditionReason = "InvalidCertificateRef" + + // This reason is used with the "ResolvedRefs" condition when an invalid or + // unsupported Route kind is specified by the Listener. + ListenerReasonInvalidRouteKinds ListenerConditionReason = "InvalidRouteKinds" + + // This reason is used with the "ResolvedRefs" condition when the + // Listener has a TLS configuration that references an object in another + // namespace, where the object in the other namespace does not have a + // ReferenceGrant explicitly allowing the reference. + ListenerReasonRefNotPermitted ListenerConditionReason = "RefNotPermitted" +) + +const ( + // This condition indicates whether a Listener has generated some + // configuration that will soon be ready in the underlying data plane. + // + // It is a positive-polarity summary condition, and so should always be + // present on the resource with ObservedGeneration set. + // + // It should be set to Unknown if the controller performs updates to the + // status before it has all the information it needs to be able to determine + // if the condition is true. + // + // Possible reasons for this condition to be True are: + // + // * "Programmed" + // + // Possible reasons for this condition to be False are: + // + // * "Invalid" + // * "Pending" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ListenerConditionProgrammed ListenerConditionType = "Programmed" + + // This reason is used with the "Programmed" condition when the condition is + // true. + ListenerReasonProgrammed ListenerConditionReason = "Programmed" + + // This reason is used with the "Ready" and "Programmed" conditions when the + // Listener is syntactically or semantically invalid. + ListenerReasonInvalid ListenerConditionReason = "Invalid" + + // This reason is used with the "Accepted", "Ready" and "Programmed" + // conditions when the Listener is either not yet reconciled or not yet not + // online and ready to accept client traffic. + ListenerReasonPending ListenerConditionReason = "Pending" +) + +const ( + // This condition indicates that TLS configuration within this Listener + // conflicts with TLS configuration in another Listener on the same port. + // This could happen for two reasons: + // + // 1) Overlapping Hostnames: Listener A matches *.example.com while Listener + // B matches foo.example.com. + // B) Overlapping Certificates: Listener A contains a certificate with a + // SAN for *.example.com, while Listener B contains a certificate with a + // SAN for foo.example.com. + // + // This overlapping TLS configuration can be particularly problematic when + // combined with HTTP connection coalescing. When clients reuse connections + // using this technique, it can have confusing interactions with Gateway + // API, such as TLS configuration for one Listener getting used for a + // request reusing an existing connection that would not be used if the same + // request was initiating a new connection. + // + // Controllers MUST detect the presence of overlapping hostnames and MAY + // detect the presence of overlapping certificates. + // + // This condition MUST be set on all Listeners with overlapping TLS config. + // For example, consider the following listener - hostname mapping: + // + // A: foo.example.com + // B: foo.example.org + // C: *.example.com + // + // In the above example, Listeners A and C would have overlapping hostnames + // and therefore this condition should be set for Listeners A and C, but not + // B. + // + // Possible reasons for this condition to be True are: + // + // * "OverlappingHostnames" + // * "OverlappingCertificates" + // + // If a controller supports checking for both possible reasons and finds + // that both are true, it SHOULD set the "OverlappingCertificates" Reason. + // + // This is a negative polarity condition and MUST NOT be set when it is + // False. + // + // Controllers may raise this condition with other reasons, but should + // prefer to use the reasons listed above to improve interoperability. + ListenerConditionOverlappingTLSConfig ListenerConditionType = "OverlappingTLSConfig" + + // This reason is used with the "OverlappingTLSConfig" condition when the + // condition is true. + ListenerReasonOverlappingHostnames ListenerConditionReason = "OverlappingHostnames" + + // This reason is used with the "OverlappingTLSConfig" condition when the + // condition is true. + ListenerReasonOverlappingCertificates ListenerConditionReason = "OverlappingCertificates" +) + +const ( + // "Ready" is a condition type reserved for future use. It should not be used by implementations. + // Note: This condition is not really "deprecated", but rather "reserved"; however, deprecated triggers Go linters + // to alert about usage. + // + // If used in the future, "Ready" will represent the final state where all configuration is confirmed good + // _and has completely propagated to the data plane_. That is, it is a _guarantee_ that, as soon as something + // sees the Condition as `true`, then connections will be correctly routed _immediately_. + // + // This is a very strong guarantee, and to date no implementation has satisfied it enough to implement it. + // This reservation can be discussed in the future if necessary. + // + // Deprecated: Ready is reserved for future use + ListenerConditionReady ListenerConditionType = "Ready" + + // Deprecated: Ready is reserved for future use + ListenerReasonReady ListenerConditionReason = "Ready" +) diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go new file mode 100644 index 00000000..972d3504 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go @@ -0,0 +1,327 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api,scope=Cluster,shortName=gc +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Controller",type=string,JSONPath=`.spec.controllerName` +// +kubebuilder:printcolumn:name="Accepted",type=string,JSONPath=`.status.conditions[?(@.type=="Accepted")].status` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Description",type=string,JSONPath=`.spec.description`,priority=1 + +// GatewayClass describes a class of Gateways available to the user for creating +// Gateway resources. +// +// It is recommended that this resource be used as a template for Gateways. This +// means that a Gateway is based on the state of the GatewayClass at the time it +// was created and changes to the GatewayClass or associated parameters are not +// propagated down to existing Gateways. This recommendation is intended to +// limit the blast radius of changes to GatewayClass or associated parameters. +// If implementations choose to propagate GatewayClass changes to existing +// Gateways, that MUST be clearly documented by the implementation. +// +// Whenever one or more Gateways are using a GatewayClass, implementations SHOULD +// add the `gateway-exists-finalizer.gateway.networking.k8s.io` finalizer on the +// associated GatewayClass. This ensures that a GatewayClass associated with a +// Gateway is not deleted while in use. +// +// GatewayClass is a Cluster level resource. +type GatewayClass struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of GatewayClass. + // +required + Spec GatewayClassSpec `json:"spec"` + + // Status defines the current state of GatewayClass. + // + // Implementations MUST populate status on all GatewayClass resources which + // specify their controller name. + // + // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Pending", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + // +optional + Status GatewayClassStatus `json:"status,omitempty"` +} + +const ( + // GatewayClassFinalizerGatewaysExist should be added as a finalizer to the + // GatewayClass whenever there are provisioned Gateways using a + // GatewayClass. + GatewayClassFinalizerGatewaysExist = "gateway-exists-finalizer.gateway.networking.k8s.io" +) + +// GatewayClassSpec reflects the configuration of a class of Gateways. +type GatewayClassSpec struct { + // ControllerName is the name of the controller that is managing Gateways of + // this class. The value of this field MUST be a domain prefixed path. + // + // Example: "example.net/gateway-controller". + // + // This field is not mutable and cannot be empty. + // + // Support: Core + // + // +kubebuilder:validation:XValidation:message="Value is immutable",rule="self == oldSelf" + // +required + ControllerName GatewayController `json:"controllerName"` + + // ParametersRef is a reference to a resource that contains the configuration + // parameters corresponding to the GatewayClass. This is optional if the + // controller does not require any additional configuration. + // + // ParametersRef can reference a standard Kubernetes resource, i.e. ConfigMap, + // or an implementation-specific custom resource. The resource can be + // cluster-scoped or namespace-scoped. + // + // If the referent cannot be found, refers to an unsupported kind, or when + // the data within that resource is malformed, the GatewayClass SHOULD be + // rejected with the "Accepted" status condition set to "False" and an + // "InvalidParameters" reason. + // + // A Gateway for this GatewayClass may provide its own `parametersRef`. When both are specified, + // the merging behavior is implementation specific. + // It is generally recommended that GatewayClass provides defaults that can be overridden by a Gateway. + // + // Support: Implementation-specific + // + // +optional + ParametersRef *ParametersReference `json:"parametersRef,omitempty"` + + // Description helps describe a GatewayClass with more details. + // + // +kubebuilder:validation:MaxLength=64 + // +optional + Description *string `json:"description,omitempty"` +} + +// ParametersReference identifies an API object containing controller-specific +// configuration resource within the cluster. +type ParametersReference struct { + // Group is the group of the referent. + // +required + Group Group `json:"group"` + + // Kind is kind of the referent. + // +required + Kind Kind `json:"kind"` + + // Name is the name of the referent. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +required + Name string `json:"name"` + + // Namespace is the namespace of the referent. + // This field is required when referring to a Namespace-scoped resource and + // MUST be unset when referring to a Cluster-scoped resource. + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} + +// GatewayClassConditionType is the type for status conditions on +// Gateway resources. This type should be used with the +// GatewayClassStatus.Conditions field. +type GatewayClassConditionType string + +// GatewayClassConditionReason defines the set of reasons that explain why a +// particular GatewayClass condition type has been raised. +type GatewayClassConditionReason string + +const ( + // This condition indicates whether the GatewayClass has been accepted by + // the controller requested in the `spec.controller` field. + // + // This condition defaults to Unknown, and MUST be set by a controller when + // it sees a GatewayClass using its controller string. The status of this + // condition MUST be set to True if the controller will support provisioning + // Gateways using this class. Otherwise, this status MUST be set to False. + // If the status is set to False, the controller SHOULD set a Message and + // Reason as an explanation. + // + // Possible reasons for this condition to be true are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidParameters" + // * "Unsupported" + // * "UnsupportedVersion" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers should prefer to use the values of GatewayClassConditionReason + // for the corresponding Reason, where appropriate. + GatewayClassConditionStatusAccepted GatewayClassConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the condition is + // true. + GatewayClassReasonAccepted GatewayClassConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when the GatewayClass + // was not accepted because the parametersRef field refers to + // * a namespaced resource but the Namespace field is not set, or + // * a cluster-scoped resource but the Namespace field is set, or + // * a nonexistent object, or + // * an unsupported resource or kind, or + // * an existing resource but the data within that resource is malformed. + GatewayClassReasonInvalidParameters GatewayClassConditionReason = "InvalidParameters" + + // This reason is used with the "Accepted" condition when the + // requested controller has not yet made a decision about whether + // to admit the GatewayClass. It is the default Reason on a new + // GatewayClass. + GatewayClassReasonPending GatewayClassConditionReason = "Pending" + + // This reason is used with the "Accepted" condition when the GatewayClass + // was not accepted because the implementation does not support a + // user-defined GatewayClass. + GatewayClassReasonUnsupported GatewayClassConditionReason = "Unsupported" + + // Deprecated: Use "Pending" instead. + GatewayClassReasonWaiting GatewayClassConditionReason = "Waiting" +) + +const ( + // This condition indicates whether the GatewayClass supports the version(s) + // of Gateway API CRDs present in the cluster. This condition MUST be set by + // a controller when it marks a GatewayClass "Accepted". + // + // The version of a Gateway API CRD is defined by the + // gateway.networking.k8s.io/bundle-version annotation on the CRD. If + // implementations detect any Gateway API CRDs that either do not have this + // annotation set, or have it set to a version that is not recognized or + // supported by the implementation, this condition MUST be set to false. + // + // Implementations MAY choose to either provide "best effort" support when + // an unrecognized CRD version is present. This would be communicated by + // setting the "Accepted" condition to true and the "SupportedVersion" + // condition to false. + // + // Alternatively, implementations MAY choose not to support CRDs with + // unrecognized versions. This would be communicated by setting the + // "Accepted" condition to false with the reason "UnsupportedVersions". + // + // Possible reasons for this condition to be true are: + // + // * "SupportedVersion" + // + // Possible reasons for this condition to be False are: + // + // * "UnsupportedVersion" + // + // Controllers should prefer to use the values of GatewayClassConditionReason + // for the corresponding Reason, where appropriate. + // + // + GatewayClassConditionStatusSupportedVersion GatewayClassConditionType = "SupportedVersion" + + // This reason is used with the "SupportedVersion" condition when the + // condition is true. + GatewayClassReasonSupportedVersion GatewayClassConditionReason = "SupportedVersion" + + // This reason is used with the "SupportedVersion" or "Accepted" condition + // when the condition is false. A message SHOULD be included in this + // condition that includes the detected CRD version(s) present in the + // cluster and the CRD version(s) that are supported by the GatewayClass. + GatewayClassReasonUnsupportedVersion GatewayClassConditionReason = "UnsupportedVersion" +) + +// GatewayClassStatus is the current status for the GatewayClass. +type GatewayClassStatus struct { + // Conditions is the current status from the controller for + // this GatewayClass. + // + // Controllers should prefer to publish conditions using values + // of GatewayClassConditionType for the type of each Condition. + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:default={{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Pending", lastTransitionTime: "1970-01-01T00:00:00Z"}} + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // SupportedFeatures is the set of features the GatewayClass support. + // It MUST be sorted in ascending alphabetical order by the Name key. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=64 + SupportedFeatures []SupportedFeature `json:"supportedFeatures,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayClassList contains a list of GatewayClass +type GatewayClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GatewayClass `json:"items"` +} + +// FeatureName is used to describe distinct features that are covered by +// conformance tests. +type FeatureName string + +type SupportedFeature struct { + // +required + Name FeatureName `json:"name"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go new file mode 100644 index 00000000..8d768fde --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go @@ -0,0 +1,60 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "errors" +) + +// Below code handles the experimental field breaking change introduced in +// https://github.com/kubernetes-sigs/gateway-api/pull/3200/. +// We are overriding the UnmarshalJSON function to be able to handle cases where +// users had the old version of the GatewayClass CRD applied with SupportedFeatures +// as a list of strings and not list of objects. +// See https://github.com/kubernetes-sigs/gateway-api/issues/3464 +// for more information. + +func (s *SupportedFeature) UnmarshalJSON(data []byte) error { + var oldSupportedFeature oldSupportedFeature + var unmarshalTypeErr *json.UnmarshalTypeError + if err := json.Unmarshal(data, &oldSupportedFeature); err == nil { + s.Name = FeatureName(oldSupportedFeature) + return nil + } else if !errors.As(err, &unmarshalTypeErr) { + // If the error is not a type error, return it + return err + } + + var si supportedFeatureInternal + if err := json.Unmarshal(data, &si); err != nil { + return err + } + s.Name = si.Name + return nil +} + +// This is solely for the purpose of ensuring backward compatibility and +// SHOULD NOT be used elsewhere. +type supportedFeatureInternal struct { + // +required + Name FeatureName `json:"name"` +} + +// This is solely for the purpose of ensuring backward compatibility and +// SHOULD NOT be used elsewhere. +type oldSupportedFeature string diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go new file mode 100644 index 00000000..5f9bde7a --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go @@ -0,0 +1,649 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Hostnames",type=string,JSONPath=`.spec.hostnames` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// GRPCRoute provides a way to route gRPC requests. This includes the capability +// to match requests by hostname, gRPC service, gRPC method, or HTTP/2 header. +// Filters can be used to specify additional processing steps. Backends specify +// where matching requests will be routed. +// +// GRPCRoute falls under extended support within the Gateway API. Within the +// following specification, the word "MUST" indicates that an implementation +// supporting GRPCRoute must conform to the indicated requirement, but an +// implementation not supporting this route type need not follow the requirement +// unless explicitly indicated. +// +// Implementations supporting `GRPCRoute` with the `HTTPS` `ProtocolType` MUST +// accept HTTP/2 connections without an initial upgrade from HTTP/1.1, i.e. via +// ALPN. If the implementation does not support this, then it MUST set the +// "Accepted" condition to "False" for the affected listener with a reason of +// "UnsupportedProtocol". Implementations MAY also accept HTTP/2 connections +// with an upgrade from HTTP/1. +// +// Implementations supporting `GRPCRoute` with the `HTTP` `ProtocolType` MUST +// support HTTP/2 over cleartext TCP (h2c, +// https://www.rfc-editor.org/rfc/rfc7540#section-3.1) without an initial +// upgrade from HTTP/1.1, i.e. with prior knowledge +// (https://www.rfc-editor.org/rfc/rfc7540#section-3.4). If the implementation +// does not support this, then it MUST set the "Accepted" condition to "False" +// for the affected listener with a reason of "UnsupportedProtocol". +// Implementations MAY also accept HTTP/2 connections with an upgrade from +// HTTP/1, i.e. without prior knowledge. +type GRPCRoute struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of GRPCRoute. + // +required + Spec GRPCRouteSpec `json:"spec,omitempty"` + + // Status defines the current state of GRPCRoute. + // +optional + Status GRPCRouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GRPCRouteList contains a list of GRPCRoute. +type GRPCRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GRPCRoute `json:"items"` +} + +// GRPCRouteStatus defines the observed state of GRPCRoute. +type GRPCRouteStatus struct { + RouteStatus `json:",inline"` +} + +// GRPCRouteSpec defines the desired state of GRPCRoute +type GRPCRouteSpec struct { + CommonRouteSpec `json:",inline"` + + // Hostnames defines a set of hostnames to match against the GRPC + // Host header to select a GRPCRoute to process the request. This matches + // the RFC 1123 definition of a hostname with 2 notable exceptions: + // + // 1. IPs are not allowed. + // 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard + // label MUST appear by itself as the first label. + // + // If a hostname is specified by both the Listener and GRPCRoute, there + // MUST be at least one intersecting hostname for the GRPCRoute to be + // attached to the Listener. For example: + // + // * A Listener with `test.example.com` as the hostname matches GRPCRoutes + // that have either not specified any hostnames, or have specified at + // least one of `test.example.com` or `*.example.com`. + // * A Listener with `*.example.com` as the hostname matches GRPCRoutes + // that have either not specified any hostnames or have specified at least + // one hostname that matches the Listener hostname. For example, + // `test.example.com` and `*.example.com` would both match. On the other + // hand, `example.com` and `test.example.net` would not match. + // + // Hostnames that are prefixed with a wildcard label (`*.`) are interpreted + // as a suffix match. That means that a match for `*.example.com` would match + // both `test.example.com`, and `foo.test.example.com`, but not `example.com`. + // + // If both the Listener and GRPCRoute have specified hostnames, any + // GRPCRoute hostnames that do not match the Listener hostname MUST be + // ignored. For example, if a Listener specified `*.example.com`, and the + // GRPCRoute specified `test.example.com` and `test.example.net`, + // `test.example.net` MUST NOT be considered for a match. + // + // If both the Listener and GRPCRoute have specified hostnames, and none + // match with the criteria above, then the GRPCRoute MUST NOT be accepted by + // the implementation. The implementation MUST raise an 'Accepted' Condition + // with a status of `False` in the corresponding RouteParentStatus. + // + // If a Route (A) of type HTTPRoute or GRPCRoute is attached to a + // Listener and that listener already has another Route (B) of the other + // type attached and the intersection of the hostnames of A and B is + // non-empty, then the implementation MUST accept exactly one of these two + // routes, determined by the following criteria, in order: + // + // * The oldest Route based on creation timestamp. + // * The Route appearing first in alphabetical order by + // "{namespace}/{name}". + // + // The rejected Route MUST raise an 'Accepted' condition with a status of + // 'False' in the corresponding RouteParentStatus. + // + // Support: Core + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + Hostnames []Hostname `json:"hostnames,omitempty"` + + // Rules are a list of GRPC matchers, filters and actions. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="While 16 rules and 64 matches per rule are allowed, the total number of matches across all rules in a route must be less than 128",rule="(self.size() > 0 ? (has(self[0].matches) ? self[0].matches.size() : 0) : 0) + (self.size() > 1 ? (has(self[1].matches) ? self[1].matches.size() : 0) : 0) + (self.size() > 2 ? (has(self[2].matches) ? self[2].matches.size() : 0) : 0) + (self.size() > 3 ? (has(self[3].matches) ? self[3].matches.size() : 0) : 0) + (self.size() > 4 ? (has(self[4].matches) ? self[4].matches.size() : 0) : 0) + (self.size() > 5 ? (has(self[5].matches) ? self[5].matches.size() : 0) : 0) + (self.size() > 6 ? (has(self[6].matches) ? self[6].matches.size() : 0) : 0) + (self.size() > 7 ? (has(self[7].matches) ? self[7].matches.size() : 0) : 0) + (self.size() > 8 ? (has(self[8].matches) ? self[8].matches.size() : 0) : 0) + (self.size() > 9 ? (has(self[9].matches) ? self[9].matches.size() : 0) : 0) + (self.size() > 10 ? (has(self[10].matches) ? self[10].matches.size() : 0) : 0) + (self.size() > 11 ? (has(self[11].matches) ? self[11].matches.size() : 0) : 0) + (self.size() > 12 ? (has(self[12].matches) ? self[12].matches.size() : 0) : 0) + (self.size() > 13 ? (has(self[13].matches) ? self[13].matches.size() : 0) : 0) + (self.size() > 14 ? (has(self[14].matches) ? self[14].matches.size() : 0) : 0) + (self.size() > 15 ? (has(self[15].matches) ? self[15].matches.size() : 0) : 0) <= 128" + // + Rules []GRPCRouteRule `json:"rules,omitempty"` +} + +// GRPCRouteRule defines the semantics for matching a gRPC request based on +// conditions (matches), processing it (filters), and forwarding the request to +// an API object (backendRefs). +type GRPCRouteRule struct { + // Name is the name of the route rule. This name MUST be unique within a Route if it is set. + // + // Support: Extended + // +optional + Name *SectionName `json:"name,omitempty"` + + // Matches define conditions used for matching the rule against incoming + // gRPC requests. Each match is independent, i.e. this rule will be matched + // if **any** one of the matches is satisfied. + // + // For example, take the following matches configuration: + // + // ``` + // matches: + // - method: + // service: foo.bar + // headers: + // values: + // version: 2 + // - method: + // service: foo.bar.v2 + // ``` + // + // For a request to match against this rule, it MUST satisfy + // EITHER of the two conditions: + // + // - service of foo.bar AND contains the header `version: 2` + // - service of foo.bar.v2 + // + // See the documentation for GRPCRouteMatch on how to specify multiple + // match conditions to be ANDed together. + // + // If no matches are specified, the implementation MUST match every gRPC request. + // + // Proxy or Load Balancer routing configuration generated from GRPCRoutes + // MUST prioritize rules based on the following criteria, continuing on + // ties. Merging MUST not be done between GRPCRoutes and HTTPRoutes. + // Precedence MUST be given to the rule with the largest number of: + // + // * Characters in a matching non-wildcard hostname. + // * Characters in a matching hostname. + // * Characters in a matching service. + // * Characters in a matching method. + // * Header matches. + // + // If ties still exist across multiple Routes, matching precedence MUST be + // determined in order of the following criteria, continuing on ties: + // + // * The oldest Route based on creation timestamp. + // * The Route appearing first in alphabetical order by + // "{namespace}/{name}". + // + // If ties still exist within the Route that has been given precedence, + // matching precedence MUST be granted to the first matching rule meeting + // the above criteria. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 + Matches []GRPCRouteMatch `json:"matches,omitempty"` + + // Filters define the filters that are applied to requests that match + // this rule. + // + // The effects of ordering of multiple behaviors are currently unspecified. + // This can change in the future based on feedback during the alpha stage. + // + // Conformance-levels at this level are defined based on the type of filter: + // + // - ALL core filters MUST be supported by all implementations that support + // GRPCRoute. + // - Implementers are encouraged to support extended filters. + // - Implementation-specific custom filters have no API guarantees across + // implementations. + // + // Specifying the same filter multiple times is not supported unless explicitly + // indicated in the filter. + // + // If an implementation cannot support a combination of filters, it must clearly + // document that limitation. In cases where incompatible or unsupported + // filters are specified and cause the `Accepted` condition to be set to status + // `False`, implementations may use the `IncompatibleFilters` reason to specify + // this configuration error. + // + // Support: Core + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" + Filters []GRPCRouteFilter `json:"filters,omitempty"` + + // BackendRefs defines the backend(s) where matching requests should be + // sent. + // + // Failure behavior here depends on how many BackendRefs are specified and + // how many are invalid. + // + // If *all* entries in BackendRefs are invalid, and there are also no filters + // specified in this route rule, *all* traffic which matches this rule MUST + // receive an `UNAVAILABLE` status. + // + // See the GRPCBackendRef definition for the rules about what makes a single + // GRPCBackendRef invalid. + // + // When a GRPCBackendRef is invalid, `UNAVAILABLE` statuses MUST be returned for + // requests that would have otherwise been routed to an invalid backend. If + // multiple backends are specified, and some are invalid, the proportion of + // requests that would otherwise have been routed to an invalid backend + // MUST receive an `UNAVAILABLE` status. + // + // For example, if two backends are specified with equal weights, and one is + // invalid, 50 percent of traffic MUST receive an `UNAVAILABLE` status. + // Implementations may choose how that 50 percent is determined. + // + // Support: Core for Kubernetes Service + // + // Support: Implementation-specific for any other resource + // + // Support for weight: Core + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + BackendRefs []GRPCBackendRef `json:"backendRefs,omitempty"` + + // SessionPersistence defines and configures session persistence + // for the route rule. + // + // Support: Extended + // + // +optional + // + SessionPersistence *SessionPersistence `json:"sessionPersistence,omitempty"` +} + +// GRPCRouteMatch defines the predicate used to match requests to a given +// action. Multiple match types are ANDed together, i.e. the match will +// evaluate to true only if all conditions are satisfied. +// +// For example, the match below will match a gRPC request only if its service +// is `foo` AND it contains the `version: v1` header: +// +// ``` +// matches: +// - method: +// type: Exact +// service: "foo" +// headers: +// - name: "version" +// value "v1" +// +// ``` +type GRPCRouteMatch struct { + // Method specifies a gRPC request service/method matcher. If this field is + // not specified, all services and methods will match. + // + // +optional + Method *GRPCMethodMatch `json:"method,omitempty"` + + // Headers specifies gRPC request header matchers. Multiple match values are + // ANDed together, meaning, a request MUST match all the specified headers + // to select the route. + // + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=16 + Headers []GRPCHeaderMatch `json:"headers,omitempty"` +} + +// GRPCMethodMatch describes how to select a gRPC route by matching the gRPC +// request service and/or method. +// +// At least one of Service and Method MUST be a non-empty string. +// +// +kubebuilder:validation:XValidation:message="One or both of 'service' or 'method' must be specified",rule="has(self.type) ? has(self.service) || has(self.method) : true" +// +kubebuilder:validation:XValidation:message="service must only contain valid characters (matching ^(?i)\\.?[a-z_][a-z_0-9]*(\\.[a-z_][a-z_0-9]*)*$)",rule="(!has(self.type) || self.type == 'Exact') && has(self.service) ? self.service.matches(r\"\"\"^(?i)\\.?[a-z_][a-z_0-9]*(\\.[a-z_][a-z_0-9]*)*$\"\"\"): true" +// +kubebuilder:validation:XValidation:message="method must only contain valid characters (matching ^[A-Za-z_][A-Za-z_0-9]*$)",rule="(!has(self.type) || self.type == 'Exact') && has(self.method) ? self.method.matches(r\"\"\"^[A-Za-z_][A-Za-z_0-9]*$\"\"\"): true" +type GRPCMethodMatch struct { + // Type specifies how to match against the service and/or method. + // Support: Core (Exact with service and method specified) + // + // Support: Implementation-specific (Exact with method specified but no service specified) + // + // Support: Implementation-specific (RegularExpression) + // + // +optional + // +kubebuilder:default=Exact + Type *GRPCMethodMatchType `json:"type,omitempty"` + + // Value of the service to match against. If left empty or omitted, will + // match any service. + // + // At least one of Service and Method MUST be a non-empty string. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + Service *string `json:"service,omitempty"` + + // Value of the method to match against. If left empty or omitted, will + // match all services. + // + // At least one of Service and Method MUST be a non-empty string. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + Method *string `json:"method,omitempty"` +} + +// MethodMatchType specifies the semantics of how gRPC methods and services are compared. +// Valid MethodMatchType values, along with their conformance levels, are: +// +// * "Exact" - Core +// * "RegularExpression" - Implementation Specific +// +// Exact methods MUST be syntactically valid: +// +// - Must not contain `/` character +// +// +kubebuilder:validation:Enum=Exact;RegularExpression +type GRPCMethodMatchType string + +const ( + // Matches the method or service exactly and with case sensitivity. + GRPCMethodMatchExact GRPCMethodMatchType = "Exact" + + // Matches if the method or service matches the given regular expression with + // case sensitivity. + // + // Since `"RegularExpression"` has implementation-specific conformance, + // implementations can support POSIX, PCRE, RE2 or any other regular expression + // dialect. + // Please read the implementation's documentation to determine the supported + // dialect. + GRPCMethodMatchRegularExpression GRPCMethodMatchType = "RegularExpression" +) + +// GRPCHeaderMatch describes how to select a gRPC route by matching gRPC request +// headers. +type GRPCHeaderMatch struct { + // Type specifies how to match against the value of the header. + // + // +optional + // +kubebuilder:default=Exact + Type *GRPCHeaderMatchType `json:"type,omitempty"` + + // Name is the name of the gRPC Header to be matched. + // + // If multiple entries specify equivalent header names, only the first + // entry with an equivalent name MUST be considered for a match. Subsequent + // entries with an equivalent header name MUST be ignored. Due to the + // case-insensitivity of header names, "foo" and "Foo" are considered + // equivalent. + // +required + Name GRPCHeaderName `json:"name"` + + // Value is the value of the gRPC Header to be matched. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + // +required + Value string `json:"value"` +} + +// GRPCHeaderMatchType specifies the semantics of how GRPC header values should +// be compared. Valid GRPCHeaderMatchType values, along with their conformance +// levels, are: +// +// * "Exact" - Core +// * "RegularExpression" - Implementation Specific +// +// Note that new values may be added to this enum in future releases of the API, +// implementations MUST ensure that unknown values will not cause a crash. +// +// Unknown values here MUST result in the implementation setting the Accepted +// Condition for the Route to `status: False`, with a Reason of +// `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=Exact;RegularExpression +type GRPCHeaderMatchType string + +// GRPCHeaderMatchType constants. +const ( + GRPCHeaderMatchExact GRPCHeaderMatchType = "Exact" + GRPCHeaderMatchRegularExpression GRPCHeaderMatchType = "RegularExpression" +) + +type GRPCHeaderName HeaderName + +// GRPCRouteFilterType identifies a type of GRPCRoute filter. +type GRPCRouteFilterType string + +const ( + // GRPCRouteFilterRequestHeaderModifier can be used to add or remove a gRPC + // header from a gRPC request before it is sent to the upstream target. + // + // Support in GRPCRouteRule: Core + // + // Support in GRPCBackendRef: Extended + GRPCRouteFilterRequestHeaderModifier GRPCRouteFilterType = "RequestHeaderModifier" + + // GRPCRouteFilterRequestHeaderModifier can be used to add or remove a gRPC + // header from a gRPC response before it is sent to the client. + // + // Support in GRPCRouteRule: Core + // + // Support in GRPCBackendRef: Extended + GRPCRouteFilterResponseHeaderModifier GRPCRouteFilterType = "ResponseHeaderModifier" + + // GRPCRouteFilterRequestMirror can be used to mirror gRPC requests to a + // different backend. The responses from this backend MUST be ignored by + // the Gateway. + // + // Support in GRPCRouteRule: Extended + // + // Support in GRPCBackendRef: Extended + GRPCRouteFilterRequestMirror GRPCRouteFilterType = "RequestMirror" + + // GRPCRouteFilterExtensionRef should be used for configuring custom + // gRPC filters. + // + // Support in GRPCRouteRule: Implementation-specific + // + // Support in GRPCBackendRef: Implementation-specific + GRPCRouteFilterExtensionRef GRPCRouteFilterType = "ExtensionRef" +) + +// GRPCRouteFilter defines processing steps that must be completed during the +// request or response lifecycle. GRPCRouteFilters are meant as an extension +// point to express processing that may be done in Gateway implementations. Some +// examples include request or response modification, implementing +// authentication strategies, rate-limiting, and traffic shaping. API +// guarantee/conformance is defined based on the type of the filter. +// +// +kubebuilder:validation:XValidation:message="filter.requestHeaderModifier must be nil if the filter.type is not RequestHeaderModifier",rule="!(has(self.requestHeaderModifier) && self.type != 'RequestHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.requestHeaderModifier must be specified for RequestHeaderModifier filter.type",rule="!(!has(self.requestHeaderModifier) && self.type == 'RequestHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.responseHeaderModifier must be nil if the filter.type is not ResponseHeaderModifier",rule="!(has(self.responseHeaderModifier) && self.type != 'ResponseHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.responseHeaderModifier must be specified for ResponseHeaderModifier filter.type",rule="!(!has(self.responseHeaderModifier) && self.type == 'ResponseHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.requestMirror must be nil if the filter.type is not RequestMirror",rule="!(has(self.requestMirror) && self.type != 'RequestMirror')" +// +kubebuilder:validation:XValidation:message="filter.requestMirror must be specified for RequestMirror filter.type",rule="!(!has(self.requestMirror) && self.type == 'RequestMirror')" +// +kubebuilder:validation:XValidation:message="filter.extensionRef must be nil if the filter.type is not ExtensionRef",rule="!(has(self.extensionRef) && self.type != 'ExtensionRef')" +// +kubebuilder:validation:XValidation:message="filter.extensionRef must be specified for ExtensionRef filter.type",rule="!(!has(self.extensionRef) && self.type == 'ExtensionRef')" +type GRPCRouteFilter struct { + // Type identifies the type of filter to apply. As with other API fields, + // types are classified into three conformance levels: + // + // - Core: Filter types and their corresponding configuration defined by + // "Support: Core" in this package, e.g. "RequestHeaderModifier". All + // implementations supporting GRPCRoute MUST support core filters. + // + // - Extended: Filter types and their corresponding configuration defined by + // "Support: Extended" in this package, e.g. "RequestMirror". Implementers + // are encouraged to support extended filters. + // + // - Implementation-specific: Filters that are defined and supported by specific vendors. + // In the future, filters showing convergence in behavior across multiple + // implementations will be considered for inclusion in extended or core + // conformance levels. Filter-specific configuration for such filters + // is specified using the ExtensionRef field. `Type` MUST be set to + // "ExtensionRef" for custom filters. + // + // Implementers are encouraged to define custom implementation types to + // extend the core API with implementation-specific behavior. + // + // If a reference to a custom filter type cannot be resolved, the filter + // MUST NOT be skipped. Instead, requests that would have been processed by + // that filter MUST receive a HTTP error response. + // + // +unionDiscriminator + // +kubebuilder:validation:Enum=ResponseHeaderModifier;RequestHeaderModifier;RequestMirror;ExtensionRef + // + // +required + Type GRPCRouteFilterType `json:"type"` + + // RequestHeaderModifier defines a schema for a filter that modifies request + // headers. + // + // Support: Core + // + // +optional + RequestHeaderModifier *HTTPHeaderFilter `json:"requestHeaderModifier,omitempty"` + + // ResponseHeaderModifier defines a schema for a filter that modifies response + // headers. + // + // Support: Extended + // + // +optional + ResponseHeaderModifier *HTTPHeaderFilter `json:"responseHeaderModifier,omitempty"` + + // RequestMirror defines a schema for a filter that mirrors requests. + // Requests are sent to the specified destination, but responses from + // that destination are ignored. + // + // This filter can be used multiple times within the same rule. Note that + // not all implementations will be able to support mirroring to multiple + // backends. + // + // Support: Extended + // + // +optional + // + // +kubebuilder:validation:XValidation:message="Only one of percent or fraction may be specified in HTTPRequestMirrorFilter",rule="!(has(self.percent) && has(self.fraction))" + RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"` + + // ExtensionRef is an optional, implementation-specific extension to the + // "filter" behavior. For example, resource "myroutefilter" in group + // "networking.example.net"). ExtensionRef MUST NOT be used for core and + // extended filters. + // + // Support: Implementation-specific + // + // This filter can be used multiple times within the same rule. + // +optional + ExtensionRef *LocalObjectReference `json:"extensionRef,omitempty"` +} + +// GRPCBackendRef defines how a GRPCRoute forwards a gRPC request. +// +// Note that when a namespace different than the local namespace is specified, a +// ReferenceGrant object is required in the referent namespace to allow that +// namespace's owner to accept the reference. See the ReferenceGrant +// documentation for details. +// +// +// +// When the BackendRef points to a Kubernetes Service, implementations SHOULD +// honor the appProtocol field if it is set for the target Service Port. +// +// Implementations supporting appProtocol SHOULD recognize the Kubernetes +// Standard Application Protocols defined in KEP-3726. +// +// If a Service appProtocol isn't specified, an implementation MAY infer the +// backend protocol through its own means. Implementations MAY infer the +// protocol from the Route type referring to the backend Service. +// +// If a Route is not able to send traffic to the backend using the specified +// protocol then the backend is considered invalid. Implementations MUST set the +// "ResolvedRefs" condition to "False" with the "UnsupportedProtocol" reason. +// +// +type GRPCBackendRef struct { + // BackendRef is a reference to a backend to forward matched requests to. + // + // A BackendRef can be invalid for the following reasons. In all cases, the + // implementation MUST ensure the `ResolvedRefs` Condition on the Route + // is set to `status: False`, with a Reason and Message that indicate + // what is the cause of the error. + // + // A BackendRef is invalid if: + // + // * It refers to an unknown or unsupported kind of resource. In this + // case, the Reason MUST be set to `InvalidKind` and Message of the + // Condition MUST explain which kind of resource is unknown or unsupported. + // + // * It refers to a resource that does not exist. In this case, the Reason MUST + // be set to `BackendNotFound` and the Message of the Condition MUST explain + // which resource does not exist. + // + // * It refers a resource in another namespace when the reference has not been + // explicitly allowed by a ReferenceGrant (or equivalent concept). In this + // case, the Reason MUST be set to `RefNotPermitted` and the Message of the + // Condition MUST explain which cross-namespace reference is not allowed. + // + // Support: Core for Kubernetes Service + // + // Support: Extended for Kubernetes ServiceImport + // + // Support: Implementation-specific for any other resource + // + // Support for weight: Core + // + // +optional + BackendRef `json:",inline"` + + // Filters defined at this level MUST be executed if and only if the + // request is being forwarded to the backend defined here. + // + // Support: Implementation-specific (For broader support of filters, use the + // Filters field in GRPCRouteRule.) + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" + Filters []GRPCRouteFilter `json:"filters,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go new file mode 100644 index 00000000..3d89af0d --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go @@ -0,0 +1,1847 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Hostnames",type=string,JSONPath=`.spec.hostnames` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// HTTPRoute provides a way to route HTTP requests. This includes the capability +// to match requests by hostname, path, header, or query param. Filters can be +// used to specify additional processing steps. Backends specify where matching +// requests should be routed. +type HTTPRoute struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of HTTPRoute. + // +required + Spec HTTPRouteSpec `json:"spec"` + + // Status defines the current state of HTTPRoute. + // +optional + Status HTTPRouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPRouteList contains a list of HTTPRoute. +type HTTPRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HTTPRoute `json:"items"` +} + +// HTTPRouteSpec defines the desired state of HTTPRoute +type HTTPRouteSpec struct { + CommonRouteSpec `json:",inline"` + + // Hostnames defines a set of hostnames that should match against the HTTP Host + // header to select a HTTPRoute used to process the request. Implementations + // MUST ignore any port value specified in the HTTP Host header while + // performing a match and (absent of any applicable header modification + // configuration) MUST forward this header unmodified to the backend. + // + // Valid values for Hostnames are determined by RFC 1123 definition of a + // hostname with 2 notable exceptions: + // + // 1. IPs are not allowed. + // 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard + // label must appear by itself as the first label. + // + // If a hostname is specified by both the Listener and HTTPRoute, there + // must be at least one intersecting hostname for the HTTPRoute to be + // attached to the Listener. For example: + // + // * A Listener with `test.example.com` as the hostname matches HTTPRoutes + // that have either not specified any hostnames, or have specified at + // least one of `test.example.com` or `*.example.com`. + // * A Listener with `*.example.com` as the hostname matches HTTPRoutes + // that have either not specified any hostnames or have specified at least + // one hostname that matches the Listener hostname. For example, + // `*.example.com`, `test.example.com`, and `foo.test.example.com` would + // all match. On the other hand, `example.com` and `test.example.net` would + // not match. + // + // Hostnames that are prefixed with a wildcard label (`*.`) are interpreted + // as a suffix match. That means that a match for `*.example.com` would match + // both `test.example.com`, and `foo.test.example.com`, but not `example.com`. + // + // If both the Listener and HTTPRoute have specified hostnames, any + // HTTPRoute hostnames that do not match the Listener hostname MUST be + // ignored. For example, if a Listener specified `*.example.com`, and the + // HTTPRoute specified `test.example.com` and `test.example.net`, + // `test.example.net` must not be considered for a match. + // + // If both the Listener and HTTPRoute have specified hostnames, and none + // match with the criteria above, then the HTTPRoute is not accepted. The + // implementation must raise an 'Accepted' Condition with a status of + // `False` in the corresponding RouteParentStatus. + // + // In the event that multiple HTTPRoutes specify intersecting hostnames (e.g. + // overlapping wildcard matching and exact matching hostnames), precedence must + // be given to rules from the HTTPRoute with the largest number of: + // + // * Characters in a matching non-wildcard hostname. + // * Characters in a matching hostname. + // + // If ties exist across multiple Routes, the matching precedence rules for + // HTTPRouteMatches takes over. + // + // Support: Core + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + Hostnames []Hostname `json:"hostnames,omitempty"` + + // Rules are a list of HTTP matchers, filters and actions. + // + // +optional + // +listType=atomic + // + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:default={{matches: {{path: {type: "PathPrefix", value: "/"}}}}} + // +kubebuilder:validation:XValidation:message="While 16 rules and 64 matches per rule are allowed, the total number of matches across all rules in a route must be less than 128",rule="(self.size() > 0 ? self[0].matches.size() : 0) + (self.size() > 1 ? self[1].matches.size() : 0) + (self.size() > 2 ? self[2].matches.size() : 0) + (self.size() > 3 ? self[3].matches.size() : 0) + (self.size() > 4 ? self[4].matches.size() : 0) + (self.size() > 5 ? self[5].matches.size() : 0) + (self.size() > 6 ? self[6].matches.size() : 0) + (self.size() > 7 ? self[7].matches.size() : 0) + (self.size() > 8 ? self[8].matches.size() : 0) + (self.size() > 9 ? self[9].matches.size() : 0) + (self.size() > 10 ? self[10].matches.size() : 0) + (self.size() > 11 ? self[11].matches.size() : 0) + (self.size() > 12 ? self[12].matches.size() : 0) + (self.size() > 13 ? self[13].matches.size() : 0) + (self.size() > 14 ? self[14].matches.size() : 0) + (self.size() > 15 ? self[15].matches.size() : 0) <= 128" + Rules []HTTPRouteRule `json:"rules,omitempty"` +} + +// HTTPRouteRule defines semantics for matching an HTTP request based on +// conditions (matches), processing it (filters), and forwarding the request to +// an API object (backendRefs). +// +// +kubebuilder:validation:XValidation:message="RequestRedirect filter must not be used together with backendRefs",rule="(has(self.backendRefs) && size(self.backendRefs) > 0) ? (!has(self.filters) || self.filters.all(f, !has(f.requestRedirect))): true" +// +kubebuilder:validation:XValidation:message="When using RequestRedirect filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.filters) && self.filters.exists_one(f, has(f.requestRedirect) && has(f.requestRedirect.path) && f.requestRedirect.path.type == 'ReplacePrefixMatch' && has(f.requestRedirect.path.replacePrefixMatch))) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" +// +kubebuilder:validation:XValidation:message="When using URLRewrite filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.filters) && self.filters.exists_one(f, has(f.urlRewrite) && has(f.urlRewrite.path) && f.urlRewrite.path.type == 'ReplacePrefixMatch' && has(f.urlRewrite.path.replacePrefixMatch))) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" +// +kubebuilder:validation:XValidation:message="Within backendRefs, when using RequestRedirect filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.backendRefs) && self.backendRefs.exists_one(b, (has(b.filters) && b.filters.exists_one(f, has(f.requestRedirect) && has(f.requestRedirect.path) && f.requestRedirect.path.type == 'ReplacePrefixMatch' && has(f.requestRedirect.path.replacePrefixMatch))) )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" +// +kubebuilder:validation:XValidation:message="Within backendRefs, When using URLRewrite filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.backendRefs) && self.backendRefs.exists_one(b, (has(b.filters) && b.filters.exists_one(f, has(f.urlRewrite) && has(f.urlRewrite.path) && f.urlRewrite.path.type == 'ReplacePrefixMatch' && has(f.urlRewrite.path.replacePrefixMatch))) )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" +type HTTPRouteRule struct { + // Name is the name of the route rule. This name MUST be unique within a Route if it is set. + // + // Support: Extended + // +optional + Name *SectionName `json:"name,omitempty"` + + // Matches define conditions used for matching the rule against incoming + // HTTP requests. Each match is independent, i.e. this rule will be matched + // if **any** one of the matches is satisfied. + // + // For example, take the following matches configuration: + // + // ``` + // matches: + // - path: + // value: "/foo" + // headers: + // - name: "version" + // value: "v2" + // - path: + // value: "/v2/foo" + // ``` + // + // For a request to match against this rule, a request must satisfy + // EITHER of the two conditions: + // + // - path prefixed with `/foo` AND contains the header `version: v2` + // - path prefix of `/v2/foo` + // + // See the documentation for HTTPRouteMatch on how to specify multiple + // match conditions that should be ANDed together. + // + // If no matches are specified, the default is a prefix + // path match on "/", which has the effect of matching every + // HTTP request. + // + // Proxy or Load Balancer routing configuration generated from HTTPRoutes + // MUST prioritize matches based on the following criteria, continuing on + // ties. Across all rules specified on applicable Routes, precedence must be + // given to the match having: + // + // * "Exact" path match. + // * "Prefix" path match with largest number of characters. + // * Method match. + // * Largest number of header matches. + // * Largest number of query param matches. + // + // Note: The precedence of RegularExpression path matches are implementation-specific. + // + // If ties still exist across multiple Routes, matching precedence MUST be + // determined in order of the following criteria, continuing on ties: + // + // * The oldest Route based on creation timestamp. + // * The Route appearing first in alphabetical order by + // "{namespace}/{name}". + // + // If ties still exist within an HTTPRoute, matching precedence MUST be granted + // to the FIRST matching rule (in list order) with a match meeting the above + // criteria. + // + // When no rules matching a request have been successfully attached to the + // parent a request is coming from, a HTTP 404 status code MUST be returned. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:default={{path:{ type: "PathPrefix", value: "/"}}} + Matches []HTTPRouteMatch `json:"matches,omitempty"` + + // Filters define the filters that are applied to requests that match + // this rule. + // + // Wherever possible, implementations SHOULD implement filters in the order + // they are specified. + // + // Implementations MAY choose to implement this ordering strictly, rejecting + // any combination or order of filters that cannot be supported. If implementations + // choose a strict interpretation of filter ordering, they MUST clearly document + // that behavior. + // + // To reject an invalid combination or order of filters, implementations SHOULD + // consider the Route Rules with this configuration invalid. If all Route Rules + // in a Route are invalid, the entire Route would be considered invalid. If only + // a portion of Route Rules are invalid, implementations MUST set the + // "PartiallyInvalid" condition for the Route. + // + // Conformance-levels at this level are defined based on the type of filter: + // + // - ALL core filters MUST be supported by all implementations. + // - Implementers are encouraged to support extended filters. + // - Implementation-specific custom filters have no API guarantees across + // implementations. + // + // Specifying the same filter multiple times is not supported unless explicitly + // indicated in the filter. + // + // All filters are expected to be compatible with each other except for the + // URLRewrite and RequestRedirect filters, which may not be combined. If an + // implementation cannot support other combinations of filters, they must clearly + // document that limitation. In cases where incompatible or unsupported + // filters are specified and cause the `Accepted` condition to be set to status + // `False`, implementations may use the `IncompatibleFilters` reason to specify + // this configuration error. + // + // Support: Core + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" + // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="RequestRedirect filter cannot be repeated",rule="self.filter(f, f.type == 'RequestRedirect').size() <= 1" + // +kubebuilder:validation:XValidation:message="URLRewrite filter cannot be repeated",rule="self.filter(f, f.type == 'URLRewrite').size() <= 1" + Filters []HTTPRouteFilter `json:"filters,omitempty"` + + // BackendRefs defines the backend(s) where matching requests should be + // sent. + // + // Failure behavior here depends on how many BackendRefs are specified and + // how many are invalid. + // + // If *all* entries in BackendRefs are invalid, and there are also no filters + // specified in this route rule, *all* traffic which matches this rule MUST + // receive a 500 status code. + // + // See the HTTPBackendRef definition for the rules about what makes a single + // HTTPBackendRef invalid. + // + // When a HTTPBackendRef is invalid, 500 status codes MUST be returned for + // requests that would have otherwise been routed to an invalid backend. If + // multiple backends are specified, and some are invalid, the proportion of + // requests that would otherwise have been routed to an invalid backend + // MUST receive a 500 status code. + // + // For example, if two backends are specified with equal weights, and one is + // invalid, 50 percent of traffic must receive a 500. Implementations may + // choose how that 50 percent is determined. + // + // When a HTTPBackendRef refers to a Service that has no ready endpoints, + // implementations SHOULD return a 503 for requests to that backend instead. + // If an implementation chooses to do this, all of the above rules for 500 responses + // MUST also apply for responses that return a 503. + // + // Support: Core for Kubernetes Service + // + // Support: Extended for Kubernetes ServiceImport + // + // Support: Implementation-specific for any other resource + // + // Support for weight: Core + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + BackendRefs []HTTPBackendRef `json:"backendRefs,omitempty"` + + // Timeouts defines the timeouts that can be configured for an HTTP request. + // + // Support: Extended + // + // +optional + Timeouts *HTTPRouteTimeouts `json:"timeouts,omitempty"` + + // Retry defines the configuration for when to retry an HTTP request. + // + // Support: Extended + // + // +optional + // + Retry *HTTPRouteRetry `json:"retry,omitempty"` + + // SessionPersistence defines and configures session persistence + // for the route rule. + // + // Support: Extended + // + // +optional + // + SessionPersistence *SessionPersistence `json:"sessionPersistence,omitempty"` +} + +// HTTPRouteTimeouts defines timeouts that can be configured for an HTTPRoute. +// Timeout values are represented with Gateway API Duration formatting. +// +// +kubebuilder:validation:XValidation:message="backendRequest timeout cannot be longer than request timeout",rule="!(has(self.request) && has(self.backendRequest) && duration(self.request) != duration('0s') && duration(self.backendRequest) > duration(self.request))" +type HTTPRouteTimeouts struct { + // Request specifies the maximum duration for a gateway to respond to an HTTP request. + // If the gateway has not been able to respond before this deadline is met, the gateway + // MUST return a timeout error. + // + // For example, setting the `rules.timeouts.request` field to the value `10s` in an + // `HTTPRoute` will cause a timeout if a client request is taking longer than 10 seconds + // to complete. + // + // Setting a timeout to the zero duration (e.g. "0s") SHOULD disable the timeout + // completely. Implementations that cannot completely disable the timeout MUST + // instead interpret the zero duration as the longest possible value to which + // the timeout can be set. + // + // This timeout is intended to cover as close to the whole request-response transaction + // as possible although an implementation MAY choose to start the timeout after the entire + // request stream has been received instead of immediately after the transaction is + // initiated by the client. + // + // The value of Request is a Gateway API Duration string as defined by GEP-2257. When this + // field is unspecified, request timeout behavior is implementation-specific. + // + // Support: Extended + // + // +optional + Request *Duration `json:"request,omitempty"` + + // BackendRequest specifies a timeout for an individual request from the gateway + // to a backend. This covers the time from when the request first starts being + // sent from the gateway to when the full response has been received from the backend. + // + // Setting a timeout to the zero duration (e.g. "0s") SHOULD disable the timeout + // completely. Implementations that cannot completely disable the timeout MUST + // instead interpret the zero duration as the longest possible value to which + // the timeout can be set. + // + // An entire client HTTP transaction with a gateway, covered by the Request timeout, + // may result in more than one call from the gateway to the destination backend, + // for example, if automatic retries are supported. + // + // The value of BackendRequest must be a Gateway API Duration string as defined by + // GEP-2257. When this field is unspecified, its behavior is implementation-specific; + // when specified, the value of BackendRequest must be no more than the value of the + // Request timeout (since the Request timeout encompasses the BackendRequest timeout). + // + // Support: Extended + // + // +optional + BackendRequest *Duration `json:"backendRequest,omitempty"` +} + +// HTTPRouteRetry defines retry configuration for an HTTPRoute. +// +// Implementations SHOULD retry on connection errors (disconnect, reset, timeout, +// TCP failure) if a retry stanza is configured. +type HTTPRouteRetry struct { + // Codes defines the HTTP response status codes for which a backend request + // should be retried. + // + // Support: Extended + // + // +optional + // +listType=atomic + Codes []HTTPRouteRetryStatusCode `json:"codes,omitempty"` + + // Attempts specifies the maximum number of times an individual request + // from the gateway to a backend should be retried. + // + // If the maximum number of retries has been attempted without a successful + // response from the backend, the Gateway MUST return an error. + // + // When this field is unspecified, the number of times to attempt to retry + // a backend request is implementation-specific. + // + // Support: Extended + // + // +optional + Attempts *int `json:"attempts,omitempty"` + + // Backoff specifies the minimum duration a Gateway should wait between + // retry attempts and is represented in Gateway API Duration formatting. + // + // For example, setting the `rules[].retry.backoff` field to the value + // `100ms` will cause a backend request to first be retried approximately + // 100 milliseconds after timing out or receiving a response code configured + // to be retryable. + // + // An implementation MAY use an exponential or alternative backoff strategy + // for subsequent retry attempts, MAY cap the maximum backoff duration to + // some amount greater than the specified minimum, and MAY add arbitrary + // jitter to stagger requests, as long as unsuccessful backend requests are + // not retried before the configured minimum duration. + // + // If a Request timeout (`rules[].timeouts.request`) is configured on the + // route, the entire duration of the initial request and any retry attempts + // MUST not exceed the Request timeout duration. If any retry attempts are + // still in progress when the Request timeout duration has been reached, + // these SHOULD be canceled if possible and the Gateway MUST immediately + // return a timeout error. + // + // If a BackendRequest timeout (`rules[].timeouts.backendRequest`) is + // configured on the route, any retry attempts which reach the configured + // BackendRequest timeout duration without a response SHOULD be canceled if + // possible and the Gateway should wait for at least the specified backoff + // duration before attempting to retry the backend request again. + // + // If a BackendRequest timeout is _not_ configured on the route, retry + // attempts MAY time out after an implementation default duration, or MAY + // remain pending until a configured Request timeout or implementation + // default duration for total request time is reached. + // + // When this field is unspecified, the time to wait between retry attempts + // is implementation-specific. + // + // Support: Extended + // + // +optional + Backoff *Duration `json:"backoff,omitempty"` +} + +// HTTPRouteRetryStatusCode defines an HTTP response status code for +// which a backend request should be retried. +// +// Implementations MUST support the following status codes as retryable: +// +// * 500 +// * 502 +// * 503 +// * 504 +// +// Implementations MAY support specifying additional discrete values in the +// 500-599 range. +// +// Implementations MAY support specifying discrete values in the 400-499 range, +// which are often inadvisable to retry. +// +// +kubebuilder:validation:Minimum:=400 +// +kubebuilder:validation:Maximum:=599 +// +type HTTPRouteRetryStatusCode int + +// PathMatchType specifies the semantics of how HTTP paths should be compared. +// Valid PathMatchType values, along with their support levels, are: +// +// * "Exact" - Core +// * "PathPrefix" - Core +// * "RegularExpression" - Implementation Specific +// +// PathPrefix and Exact paths must be syntactically valid: +// +// - Must begin with the `/` character +// - Must not contain consecutive `/` characters (e.g. `/foo///`, `//`). +// +// Note that values may be added to this enum, implementations +// must ensure that unknown values will not cause a crash. +// +// Unknown values here must result in the implementation setting the +// Accepted Condition for the Route to `status: False`, with a +// Reason of `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=Exact;PathPrefix;RegularExpression +type PathMatchType string + +const ( + // Matches the URL path exactly and with case sensitivity. This means that + // an exact path match on `/abc` will only match requests to `/abc`, NOT + // `/abc/`, `/Abc`, or `/abcd`. + PathMatchExact PathMatchType = "Exact" + + // Matches based on a URL path prefix split by `/`. Matching is + // case-sensitive and done on a path element by element basis. A + // path element refers to the list of labels in the path split by + // the `/` separator. When specified, a trailing `/` is ignored. + // + // For example, the paths `/abc`, `/abc/`, and `/abc/def` would all match + // the prefix `/abc`, but the path `/abcd` would not. + // + // "PathPrefix" is semantically equivalent to the "Prefix" path type in the + // Kubernetes Ingress API. + PathMatchPathPrefix PathMatchType = "PathPrefix" + + // Matches if the URL path matches the given regular expression with + // case sensitivity. + // + // Since `"RegularExpression"` has implementation-specific conformance, + // implementations can support POSIX, PCRE, RE2 or any other regular expression + // dialect. + // Please read the implementation's documentation to determine the supported + // dialect. + PathMatchRegularExpression PathMatchType = "RegularExpression" +) + +// HTTPPathMatch describes how to select a HTTP route by matching the HTTP request path. +// +// +kubebuilder:validation:XValidation:message="value must be an absolute path and start with '/' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? self.value.startsWith('/') : true" +// +kubebuilder:validation:XValidation:message="must not contain '//' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('//') : true" +// +kubebuilder:validation:XValidation:message="must not contain '/./' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('/./') : true" +// +kubebuilder:validation:XValidation:message="must not contain '/../' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('/../') : true" +// +kubebuilder:validation:XValidation:message="must not contain '%2f' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('%2f') : true" +// +kubebuilder:validation:XValidation:message="must not contain '%2F' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('%2F') : true" +// +kubebuilder:validation:XValidation:message="must not contain '#' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('#') : true" +// +kubebuilder:validation:XValidation:message="must not end with '/..' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.endsWith('/..') : true" +// +kubebuilder:validation:XValidation:message="must not end with '/.' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.endsWith('/.') : true" +// +kubebuilder:validation:XValidation:message="type must be one of ['Exact', 'PathPrefix', 'RegularExpression']",rule="self.type in ['Exact','PathPrefix'] || self.type == 'RegularExpression'" +// +kubebuilder:validation:XValidation:message="must only contain valid characters (matching ^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$) for types ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? self.value.matches(r\"\"\"^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$\"\"\") : true" +type HTTPPathMatch struct { + // Type specifies how to match against the path Value. + // + // Support: Core (Exact, PathPrefix) + // + // Support: Implementation-specific (RegularExpression) + // + // +optional + // +kubebuilder:default=PathPrefix + Type *PathMatchType `json:"type,omitempty"` + + // Value of the HTTP path to match against. + // + // +optional + // +kubebuilder:default="/" + // +kubebuilder:validation:MaxLength=1024 + Value *string `json:"value,omitempty"` +} + +// HeaderMatchType specifies the semantics of how HTTP header values should be +// compared. Valid HeaderMatchType values, along with their conformance levels, are: +// +// * "Exact" - Core +// * "RegularExpression" - Implementation Specific +// +// Note that values may be added to this enum, implementations +// must ensure that unknown values will not cause a crash. +// +// Unknown values here must result in the implementation setting the +// Accepted Condition for the Route to `status: False`, with a +// Reason of `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=Exact;RegularExpression +type HeaderMatchType string + +// HeaderMatchType constants. +const ( + HeaderMatchExact HeaderMatchType = "Exact" + HeaderMatchRegularExpression HeaderMatchType = "RegularExpression" +) + +// HTTPHeaderName is the name of an HTTP header. +// +// Valid values include: +// +// * "Authorization" +// * "Set-Cookie" +// +// Invalid values include: +// +// - ":method" - ":" is an invalid character. This means that HTTP/2 pseudo +// headers are not currently supported by this type. +// - "/invalid" - "/ " is an invalid character +type HTTPHeaderName HeaderName + +// HTTPHeaderMatch describes how to select a HTTP route by matching HTTP request +// headers. +type HTTPHeaderMatch struct { + // Type specifies how to match against the value of the header. + // + // Support: Core (Exact) + // + // Support: Implementation-specific (RegularExpression) + // + // Since RegularExpression HeaderMatchType has implementation-specific + // conformance, implementations can support POSIX, PCRE or any other dialects + // of regular expressions. Please read the implementation's documentation to + // determine the supported dialect. + // + // +optional + // +kubebuilder:default=Exact + Type *HeaderMatchType `json:"type,omitempty"` + + // Name is the name of the HTTP Header to be matched. Name matching MUST be + // case-insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + // + // If multiple entries specify equivalent header names, only the first + // entry with an equivalent name MUST be considered for a match. Subsequent + // entries with an equivalent header name MUST be ignored. Due to the + // case-insensitivity of header names, "foo" and "Foo" are considered + // equivalent. + // + // When a header is repeated in an HTTP request, it is + // implementation-specific behavior as to how this is represented. + // Generally, proxies should follow the guidance from the RFC: + // https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 regarding + // processing a repeated header, with special handling for "Set-Cookie". + // +required + Name HTTPHeaderName `json:"name"` + + // Value is the value of HTTP Header to be matched. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + // +required + Value string `json:"value"` +} + +// QueryParamMatchType specifies the semantics of how HTTP query parameter +// values should be compared. Valid QueryParamMatchType values, along with their +// conformance levels, are: +// +// * "Exact" - Core +// * "RegularExpression" - Implementation Specific +// +// Note that values may be added to this enum, implementations +// must ensure that unknown values will not cause a crash. +// +// Unknown values here must result in the implementation setting the +// Accepted Condition for the Route to `status: False`, with a +// Reason of `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=Exact;RegularExpression +type QueryParamMatchType string + +// QueryParamMatchType constants. +const ( + QueryParamMatchExact QueryParamMatchType = "Exact" + QueryParamMatchRegularExpression QueryParamMatchType = "RegularExpression" +) + +// HTTPQueryParamMatch describes how to select a HTTP route by matching HTTP +// query parameters. +type HTTPQueryParamMatch struct { + // Type specifies how to match against the value of the query parameter. + // + // Support: Extended (Exact) + // + // Support: Implementation-specific (RegularExpression) + // + // Since RegularExpression QueryParamMatchType has Implementation-specific + // conformance, implementations can support POSIX, PCRE or any other + // dialects of regular expressions. Please read the implementation's + // documentation to determine the supported dialect. + // + // +optional + // +kubebuilder:default=Exact + Type *QueryParamMatchType `json:"type,omitempty"` + + // Name is the name of the HTTP query param to be matched. This must be an + // exact string match. (See + // https://tools.ietf.org/html/rfc7230#section-2.7.3). + // + // If multiple entries specify equivalent query param names, only the first + // entry with an equivalent name MUST be considered for a match. Subsequent + // entries with an equivalent query param name MUST be ignored. + // + // If a query param is repeated in an HTTP request, the behavior is + // purposely left undefined, since different data planes have different + // capabilities. However, it is *recommended* that implementations should + // match against the first value of the param if the data plane supports it, + // as this behavior is expected in other load balancing contexts outside of + // the Gateway API. + // + // Users SHOULD NOT route traffic based on repeated query params to guard + // themselves against potential differences in the implementations. + // +required + Name HTTPHeaderName `json:"name"` + + // Value is the value of HTTP query param to be matched. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + // +required + Value string `json:"value"` +} + +// HTTPMethod describes how to select a HTTP route by matching the HTTP +// method as defined by +// [RFC 7231](https://datatracker.ietf.org/doc/html/rfc7231#section-4) and +// [RFC 5789](https://datatracker.ietf.org/doc/html/rfc5789#section-2). +// The value is expected in upper case. +// +// Note that values may be added to this enum, implementations +// must ensure that unknown values will not cause a crash. +// +// Unknown values here must result in the implementation setting the +// Accepted Condition for the Route to `status: False`, with a +// Reason of `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=GET;HEAD;POST;PUT;DELETE;CONNECT;OPTIONS;TRACE;PATCH +type HTTPMethod string + +// +kubebuilder:validation:Enum=GET;HEAD;POST;PUT;DELETE;CONNECT;OPTIONS;TRACE;PATCH;* +type HTTPMethodWithWildcard string + +const ( + HTTPMethodGet HTTPMethod = "GET" + HTTPMethodHead HTTPMethod = "HEAD" + HTTPMethodPost HTTPMethod = "POST" + HTTPMethodPut HTTPMethod = "PUT" + HTTPMethodDelete HTTPMethod = "DELETE" + HTTPMethodConnect HTTPMethod = "CONNECT" + HTTPMethodOptions HTTPMethod = "OPTIONS" + HTTPMethodTrace HTTPMethod = "TRACE" + HTTPMethodPatch HTTPMethod = "PATCH" +) + +// HTTPRouteMatch defines the predicate used to match requests to a given +// action. Multiple match types are ANDed together, i.e. the match will +// evaluate to true only if all conditions are satisfied. +// +// For example, the match below will match a HTTP request only if its path +// starts with `/foo` AND it contains the `version: v1` header: +// +// ``` +// match: +// +// path: +// value: "/foo" +// headers: +// - name: "version" +// value "v1" +// +// ``` +type HTTPRouteMatch struct { + // Path specifies a HTTP request path matcher. If this field is not + // specified, a default prefix match on the "/" path is provided. + // + // +optional + // +kubebuilder:default={type: "PathPrefix", value: "/"} + Path *HTTPPathMatch `json:"path,omitempty"` + + // Headers specifies HTTP request header matchers. Multiple match values are + // ANDed together, meaning, a request must match all the specified headers + // to select the route. + // + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=16 + Headers []HTTPHeaderMatch `json:"headers,omitempty"` + + // QueryParams specifies HTTP query parameter matchers. Multiple match + // values are ANDed together, meaning, a request must match all the + // specified query parameters to select the route. + // + // Support: Extended + // + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=16 + QueryParams []HTTPQueryParamMatch `json:"queryParams,omitempty"` + + // Method specifies HTTP method matcher. + // When specified, this route will be matched only if the request has the + // specified method. + // + // Support: Extended + // + // +optional + Method *HTTPMethod `json:"method,omitempty"` +} + +// HTTPRouteFilter defines processing steps that must be completed during the +// request or response lifecycle. HTTPRouteFilters are meant as an extension +// point to express processing that may be done in Gateway implementations. Some +// examples include request or response modification, implementing +// authentication strategies, rate-limiting, and traffic shaping. API +// guarantee/conformance is defined based on the type of the filter. +// +// +kubebuilder:validation:XValidation:message="filter.requestHeaderModifier must be nil if the filter.type is not RequestHeaderModifier",rule="!(has(self.requestHeaderModifier) && self.type != 'RequestHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.requestHeaderModifier must be specified for RequestHeaderModifier filter.type",rule="!(!has(self.requestHeaderModifier) && self.type == 'RequestHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.responseHeaderModifier must be nil if the filter.type is not ResponseHeaderModifier",rule="!(has(self.responseHeaderModifier) && self.type != 'ResponseHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.responseHeaderModifier must be specified for ResponseHeaderModifier filter.type",rule="!(!has(self.responseHeaderModifier) && self.type == 'ResponseHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.requestMirror must be nil if the filter.type is not RequestMirror",rule="!(has(self.requestMirror) && self.type != 'RequestMirror')" +// +kubebuilder:validation:XValidation:message="filter.requestMirror must be specified for RequestMirror filter.type",rule="!(!has(self.requestMirror) && self.type == 'RequestMirror')" +// +kubebuilder:validation:XValidation:message="filter.requestRedirect must be nil if the filter.type is not RequestRedirect",rule="!(has(self.requestRedirect) && self.type != 'RequestRedirect')" +// +kubebuilder:validation:XValidation:message="filter.requestRedirect must be specified for RequestRedirect filter.type",rule="!(!has(self.requestRedirect) && self.type == 'RequestRedirect')" +// +kubebuilder:validation:XValidation:message="filter.urlRewrite must be nil if the filter.type is not URLRewrite",rule="!(has(self.urlRewrite) && self.type != 'URLRewrite')" +// +kubebuilder:validation:XValidation:message="filter.urlRewrite must be specified for URLRewrite filter.type",rule="!(!has(self.urlRewrite) && self.type == 'URLRewrite')" +// +// +// +// +// +kubebuilder:validation:XValidation:message="filter.extensionRef must be nil if the filter.type is not ExtensionRef",rule="!(has(self.extensionRef) && self.type != 'ExtensionRef')" +// +kubebuilder:validation:XValidation:message="filter.extensionRef must be specified for ExtensionRef filter.type",rule="!(!has(self.extensionRef) && self.type == 'ExtensionRef')" +type HTTPRouteFilter struct { + // Type identifies the type of filter to apply. As with other API fields, + // types are classified into three conformance levels: + // + // - Core: Filter types and their corresponding configuration defined by + // "Support: Core" in this package, e.g. "RequestHeaderModifier". All + // implementations must support core filters. + // + // - Extended: Filter types and their corresponding configuration defined by + // "Support: Extended" in this package, e.g. "RequestMirror". Implementers + // are encouraged to support extended filters. + // + // - Implementation-specific: Filters that are defined and supported by + // specific vendors. + // In the future, filters showing convergence in behavior across multiple + // implementations will be considered for inclusion in extended or core + // conformance levels. Filter-specific configuration for such filters + // is specified using the ExtensionRef field. `Type` should be set to + // "ExtensionRef" for custom filters. + // + // Implementers are encouraged to define custom implementation types to + // extend the core API with implementation-specific behavior. + // + // If a reference to a custom filter type cannot be resolved, the filter + // MUST NOT be skipped. Instead, requests that would have been processed by + // that filter MUST receive a HTTP error response. + // + // Note that values may be added to this enum, implementations + // must ensure that unknown values will not cause a crash. + // + // Unknown values here must result in the implementation setting the + // Accepted Condition for the Route to `status: False`, with a + // Reason of `UnsupportedValue`. + // + // +unionDiscriminator + // +kubebuilder:validation:Enum=RequestHeaderModifier;ResponseHeaderModifier;RequestMirror;RequestRedirect;URLRewrite;ExtensionRef + // + // +required + Type HTTPRouteFilterType `json:"type"` + + // RequestHeaderModifier defines a schema for a filter that modifies request + // headers. + // + // Support: Core + // + // +optional + RequestHeaderModifier *HTTPHeaderFilter `json:"requestHeaderModifier,omitempty"` + + // ResponseHeaderModifier defines a schema for a filter that modifies response + // headers. + // + // Support: Extended + // + // +optional + ResponseHeaderModifier *HTTPHeaderFilter `json:"responseHeaderModifier,omitempty"` + + // RequestMirror defines a schema for a filter that mirrors requests. + // Requests are sent to the specified destination, but responses from + // that destination are ignored. + // + // This filter can be used multiple times within the same rule. Note that + // not all implementations will be able to support mirroring to multiple + // backends. + // + // Support: Extended + // + // +optional + // + // +kubebuilder:validation:XValidation:message="Only one of percent or fraction may be specified in HTTPRequestMirrorFilter",rule="!(has(self.percent) && has(self.fraction))" + RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"` + + // RequestRedirect defines a schema for a filter that responds to the + // request with an HTTP redirection. + // + // Support: Core + // + // +optional + RequestRedirect *HTTPRequestRedirectFilter `json:"requestRedirect,omitempty"` + + // URLRewrite defines a schema for a filter that modifies a request during forwarding. + // + // Support: Extended + // + // +optional + URLRewrite *HTTPURLRewriteFilter `json:"urlRewrite,omitempty"` + + // CORS defines a schema for a filter that responds to the + // cross-origin request based on HTTP response header. + // + // Support: Extended + // + // +optional + // + CORS *HTTPCORSFilter `json:"cors,omitempty"` + + // ExternalAuth configures settings related to sending request details + // to an external auth service. The external service MUST authenticate + // the request, and MAY authorize the request as well. + // + // If there is any problem communicating with the external service, + // this filter MUST fail closed. + // + // Support: Extended + // + // +optional + // + ExternalAuth *HTTPExternalAuthFilter `json:"externalAuth,omitempty"` + + // ExtensionRef is an optional, implementation-specific extension to the + // "filter" behavior. For example, resource "myroutefilter" in group + // "networking.example.net"). ExtensionRef MUST NOT be used for core and + // extended filters. + // + // This filter can be used multiple times within the same rule. + // + // Support: Implementation-specific + // + // +optional + ExtensionRef *LocalObjectReference `json:"extensionRef,omitempty"` +} + +// HTTPRouteFilterType identifies a type of HTTPRoute filter. +type HTTPRouteFilterType string + +const ( + // HTTPRouteFilterRequestHeaderModifier can be used to add or remove an HTTP + // header from an HTTP request before it is sent to the upstream target. + // + // Support in HTTPRouteRule: Core + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterRequestHeaderModifier HTTPRouteFilterType = "RequestHeaderModifier" + + // HTTPRouteFilterResponseHeaderModifier can be used to add or remove an HTTP + // header from an HTTP response before it is sent to the client. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterResponseHeaderModifier HTTPRouteFilterType = "ResponseHeaderModifier" + + // HTTPRouteFilterRequestRedirect can be used to redirect a request to + // another location. This filter can also be used for HTTP to HTTPS + // redirects. This may not be used on the same Route rule or BackendRef as a + // URLRewrite filter. + // + // Support in HTTPRouteRule: Core + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterRequestRedirect HTTPRouteFilterType = "RequestRedirect" + + // HTTPRouteFilterURLRewrite can be used to modify a request during + // forwarding. At most one of these filters may be used on a Route rule. + // This may not be used on the same Route rule or BackendRef as a + // RequestRedirect filter. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterURLRewrite HTTPRouteFilterType = "URLRewrite" + + // HTTPRouteFilterRequestMirror can be used to mirror HTTP requests to a + // different backend. The responses from this backend MUST be ignored by + // the Gateway. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterRequestMirror HTTPRouteFilterType = "RequestMirror" + + // HTTPRouteFilterCORS can be used to add CORS headers to an + // HTTP response before it is sent to the client. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + // + HTTPRouteFilterCORS HTTPRouteFilterType = "CORS" + + // HTTPRouteFilterExternalAuth can be used to configure a Gateway implementation + // to call out to an external Auth server, which MUST perform Authentication + // and MAY perform Authorization on the matched request before the request + // is forwarded to the backend. + // + // Support in HTTPRouteRule: Extended + // + // Feature Name: HTTPRouteExternalAuth + // + // + HTTPRouteFilterExternalAuth HTTPRouteFilterType = "ExternalAuth" + + // HTTPRouteFilterExtensionRef should be used for configuring custom + // HTTP filters. + // + // Support in HTTPRouteRule: Implementation-specific + // + // Support in HTTPBackendRef: Implementation-specific + HTTPRouteFilterExtensionRef HTTPRouteFilterType = "ExtensionRef" +) + +// HTTPHeader represents an HTTP Header name and value as defined by RFC 7230. +type HTTPHeader struct { + // Name is the name of the HTTP Header to be matched. Name matching MUST be + // case-insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + // + // If multiple entries specify equivalent header names, the first entry with + // an equivalent name MUST be considered for a match. Subsequent entries + // with an equivalent header name MUST be ignored. Due to the + // case-insensitivity of header names, "foo" and "Foo" are considered + // equivalent. + // +required + Name HTTPHeaderName `json:"name"` + + // Value is the value of HTTP Header to be matched. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + // +required + Value string `json:"value"` +} + +// HTTPHeaderFilter defines a filter that modifies the headers of an HTTP +// request or response. Only one action for a given header name is +// permitted. Filters specifying multiple actions of the same or different +// type for any one header name are invalid. Configuration to set or add +// multiple values for a header must use RFC 7230 header value formatting, +// separating each value with a comma. +type HTTPHeaderFilter struct { + // Set overwrites the request with the given header (name, value) + // before the action. + // + // Input: + // GET /foo HTTP/1.1 + // my-header: foo + // + // Config: + // set: + // - name: "my-header" + // value: "bar" + // + // Output: + // GET /foo HTTP/1.1 + // my-header: bar + // + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=16 + Set []HTTPHeader `json:"set,omitempty"` + + // Add adds the given header(s) (name, value) to the request + // before the action. It appends to any existing values associated + // with the header name. + // + // Input: + // GET /foo HTTP/1.1 + // my-header: foo + // + // Config: + // add: + // - name: "my-header" + // value: "bar,baz" + // + // Output: + // GET /foo HTTP/1.1 + // my-header: foo,bar,baz + // + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=16 + Add []HTTPHeader `json:"add,omitempty"` + + // Remove the given header(s) from the HTTP request before the action. The + // value of Remove is a list of HTTP header names. Note that the header + // names are case-insensitive (see + // https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + // + // Input: + // GET /foo HTTP/1.1 + // my-header1: foo + // my-header2: bar + // my-header3: baz + // + // Config: + // remove: ["my-header1", "my-header3"] + // + // Output: + // GET /foo HTTP/1.1 + // my-header2: bar + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxItems=16 + Remove []string `json:"remove,omitempty"` +} + +// HTTPPathModifierType defines the type of path redirect or rewrite. +type HTTPPathModifierType string + +const ( + // This type of modifier indicates that the full path will be replaced + // by the specified value. + FullPathHTTPPathModifier HTTPPathModifierType = "ReplaceFullPath" + + // This type of modifier indicates that any prefix path matches will be + // replaced by the substitution value. For example, a path with a prefix + // match of "/foo" and a ReplacePrefixMatch substitution of "/bar" will have + // the "/foo" prefix replaced with "/bar" in matching requests. + // + // Note that this matches the behavior of the PathPrefix match type. This + // matches full path elements. A path element refers to the list of labels + // in the path split by the `/` separator. When specified, a trailing `/` is + // ignored. For example, the paths `/abc`, `/abc/`, and `/abc/def` would all + // match the prefix `/abc`, but the path `/abcd` would not. + PrefixMatchHTTPPathModifier HTTPPathModifierType = "ReplacePrefixMatch" +) + +// HTTPPathModifier defines configuration for path modifiers. +// +// +kubebuilder:validation:XValidation:message="replaceFullPath must be specified when type is set to 'ReplaceFullPath'",rule="self.type == 'ReplaceFullPath' ? has(self.replaceFullPath) : true" +// +kubebuilder:validation:XValidation:message="type must be 'ReplaceFullPath' when replaceFullPath is set",rule="has(self.replaceFullPath) ? self.type == 'ReplaceFullPath' : true" +// +kubebuilder:validation:XValidation:message="replacePrefixMatch must be specified when type is set to 'ReplacePrefixMatch'",rule="self.type == 'ReplacePrefixMatch' ? has(self.replacePrefixMatch) : true" +// +kubebuilder:validation:XValidation:message="type must be 'ReplacePrefixMatch' when replacePrefixMatch is set",rule="has(self.replacePrefixMatch) ? self.type == 'ReplacePrefixMatch' : true" +type HTTPPathModifier struct { + // Type defines the type of path modifier. Additional types may be + // added in a future release of the API. + // + // Note that values may be added to this enum, implementations + // must ensure that unknown values will not cause a crash. + // + // Unknown values here must result in the implementation setting the + // Accepted Condition for the Route to `status: False`, with a + // Reason of `UnsupportedValue`. + // + // +kubebuilder:validation:Enum=ReplaceFullPath;ReplacePrefixMatch + // +required + Type HTTPPathModifierType `json:"type"` + + // ReplaceFullPath specifies the value with which to replace the full path + // of a request during a rewrite or redirect. + // + // +kubebuilder:validation:MaxLength=1024 + // +optional + ReplaceFullPath *string `json:"replaceFullPath,omitempty"` + + // ReplacePrefixMatch specifies the value with which to replace the prefix + // match of a request during a rewrite or redirect. For example, a request + // to "/foo/bar" with a prefix match of "/foo" and a ReplacePrefixMatch + // of "/xyz" would be modified to "/xyz/bar". + // + // Note that this matches the behavior of the PathPrefix match type. This + // matches full path elements. A path element refers to the list of labels + // in the path split by the `/` separator. When specified, a trailing `/` is + // ignored. For example, the paths `/abc`, `/abc/`, and `/abc/def` would all + // match the prefix `/abc`, but the path `/abcd` would not. + // + // ReplacePrefixMatch is only compatible with a `PathPrefix` HTTPRouteMatch. + // Using any other HTTPRouteMatch type on the same HTTPRouteRule will result in + // the implementation setting the Accepted Condition for the Route to `status: False`. + // + // Request Path | Prefix Match | Replace Prefix | Modified Path + // -------------|--------------|----------------|---------- + // /foo/bar | /foo | /xyz | /xyz/bar + // /foo/bar | /foo | /xyz/ | /xyz/bar + // /foo/bar | /foo/ | /xyz | /xyz/bar + // /foo/bar | /foo/ | /xyz/ | /xyz/bar + // /foo | /foo | /xyz | /xyz + // /foo/ | /foo | /xyz | /xyz/ + // /foo/bar | /foo | | /bar + // /foo/ | /foo | | / + // /foo | /foo | | / + // /foo/ | /foo | / | / + // /foo | /foo | / | / + // + // +kubebuilder:validation:MaxLength=1024 + // +optional + ReplacePrefixMatch *string `json:"replacePrefixMatch,omitempty"` +} + +// HTTPRequestRedirect defines a filter that redirects a request. This filter +// MUST NOT be used on the same Route rule as a HTTPURLRewrite filter. +type HTTPRequestRedirectFilter struct { + // Scheme is the scheme to be used in the value of the `Location` header in + // the response. When empty, the scheme of the request is used. + // + // Scheme redirects can affect the port of the redirect, for more information, + // refer to the documentation for the port field of this filter. + // + // Note that values may be added to this enum, implementations + // must ensure that unknown values will not cause a crash. + // + // Unknown values here must result in the implementation setting the + // Accepted Condition for the Route to `status: False`, with a + // Reason of `UnsupportedValue`. + // + // Support: Extended + // + // +optional + // +kubebuilder:validation:Enum=http;https + Scheme *string `json:"scheme,omitempty"` + + // Hostname is the hostname to be used in the value of the `Location` + // header in the response. + // When empty, the hostname in the `Host` header of the request is used. + // + // Support: Core + // + // +optional + Hostname *PreciseHostname `json:"hostname,omitempty"` + + // Path defines parameters used to modify the path of the incoming request. + // The modified path is then used to construct the `Location` header. When + // empty, the request path is used as-is. + // + // Support: Extended + // + // +optional + Path *HTTPPathModifier `json:"path,omitempty"` + + // Port is the port to be used in the value of the `Location` + // header in the response. + // + // If no port is specified, the redirect port MUST be derived using the + // following rules: + // + // * If redirect scheme is not-empty, the redirect port MUST be the well-known + // port associated with the redirect scheme. Specifically "http" to port 80 + // and "https" to port 443. If the redirect scheme does not have a + // well-known port, the listener port of the Gateway SHOULD be used. + // * If redirect scheme is empty, the redirect port MUST be the Gateway + // Listener port. + // + // Implementations SHOULD NOT add the port number in the 'Location' + // header in the following cases: + // + // * A Location header that will use HTTP (whether that is determined via + // the Listener protocol or the Scheme field) _and_ use port 80. + // * A Location header that will use HTTPS (whether that is determined via + // the Listener protocol or the Scheme field) _and_ use port 443. + // + // Support: Extended + // + // +optional + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port *PortNumber `json:"port,omitempty"` + + // StatusCode is the HTTP status code to be used in response. + // + // Note that values may be added to this enum, implementations + // must ensure that unknown values will not cause a crash. + // + // Unknown values here must result in the implementation setting the + // Accepted Condition for the Route to `status: False`, with a + // Reason of `UnsupportedValue`. + // + // Support: Core + // + // +optional + // +kubebuilder:default=302 + // +kubebuilder:validation:Enum=301;302 + StatusCode *int `json:"statusCode,omitempty"` +} + +// HTTPURLRewriteFilter defines a filter that modifies a request during +// forwarding. At most one of these filters may be used on a Route rule. This +// MUST NOT be used on the same Route rule as a HTTPRequestRedirect filter. +// +// Support: Extended +type HTTPURLRewriteFilter struct { + // Hostname is the value to be used to replace the Host header value during + // forwarding. + // + // Support: Extended + // + // +optional + Hostname *PreciseHostname `json:"hostname,omitempty"` + + // Path defines a path rewrite. + // + // Support: Extended + // + // +optional + Path *HTTPPathModifier `json:"path,omitempty"` +} + +// HTTPRequestMirrorFilter defines configuration for the RequestMirror filter. +type HTTPRequestMirrorFilter struct { + // BackendRef references a resource where mirrored requests are sent. + // + // Mirrored requests must be sent only to a single destination endpoint + // within this BackendRef, irrespective of how many endpoints are present + // within this BackendRef. + // + // If the referent cannot be found, this BackendRef is invalid and must be + // dropped from the Gateway. The controller must ensure the "ResolvedRefs" + // condition on the Route status is set to `status: False` and not configure + // this backend in the underlying implementation. + // + // If there is a cross-namespace reference to an *existing* object + // that is not allowed by a ReferenceGrant, the controller must ensure the + // "ResolvedRefs" condition on the Route is set to `status: False`, + // with the "RefNotPermitted" reason and not configure this backend in the + // underlying implementation. + // + // In either error case, the Message of the `ResolvedRefs` Condition + // should be used to provide more detail about the problem. + // + // Support: Extended for Kubernetes Service + // + // Support: Implementation-specific for any other resource + // +required + BackendRef BackendObjectReference `json:"backendRef"` + + // Percent represents the percentage of requests that should be + // mirrored to BackendRef. Its minimum value is 0 (indicating 0% of + // requests) and its maximum value is 100 (indicating 100% of requests). + // + // Only one of Fraction or Percent may be specified. If neither field + // is specified, 100% of requests will be mirrored. + // + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 + Percent *int32 `json:"percent,omitempty"` + + // Fraction represents the fraction of requests that should be + // mirrored to BackendRef. + // + // Only one of Fraction or Percent may be specified. If neither field + // is specified, 100% of requests will be mirrored. + // + // +optional + Fraction *Fraction `json:"fraction,omitempty"` +} + +// HTTPCORSFilter defines a filter that that configures Cross-Origin Request +// Sharing (CORS). +type HTTPCORSFilter struct { + // AllowOrigins indicates whether the response can be shared with requested + // resource from the given `Origin`. + // + // The `Origin` consists of a scheme and a host, with an optional port, and + // takes the form `://(:)`. + // + // Valid values for scheme are: `http` and `https`. + // + // Valid values for port are any integer between 1 and 65535 (the list of + // available TCP/UDP ports). Note that, if not included, port `80` is + // assumed for `http` scheme origins, and port `443` is assumed for `https` + // origins. This may affect origin matching. + // + // The host part of the origin may contain the wildcard character `*`. These + // wildcard characters behave as follows: + // + // * `*` is a greedy match to the _left_, including any number of + // DNS labels to the left of its position. This also means that + // `*` will include any number of period `.` characters to the + // left of its position. + // * A wildcard by itself matches all hosts. + // + // An origin value that includes _only_ the `*` character indicates requests + // from all `Origin`s are allowed. + // + // When the `AllowOrigins` field is configured with multiple origins, it + // means the server supports clients from multiple origins. If the request + // `Origin` matches the configured allowed origins, the gateway must return + // the given `Origin` and sets value of the header + // `Access-Control-Allow-Origin` same as the `Origin` header provided by the + // client. + // + // The status code of a successful response to a "preflight" request is + // always an OK status (i.e., 204 or 200). + // + // If the request `Origin` does not match the configured allowed origins, + // the gateway returns 204/200 response but doesn't set the relevant + // cross-origin response headers. Alternatively, the gateway responds with + // 403 status to the "preflight" request is denied, coupled with omitting + // the CORS headers. The cross-origin request fails on the client side. + // Therefore, the client doesn't attempt the actual cross-origin request. + // + // The `Access-Control-Allow-Origin` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowOrigins` field + // specified with the `*` wildcard, the gateway must return a single origin + // in the value of the `Access-Control-Allow-Origin` response header, + // instead of specifying the `*` wildcard. The value of the header + // `Access-Control-Allow-Origin` is same as the `Origin` header provided by + // the client. + // + // Support: Extended + // +listType=set + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:XValidation:message="AllowOrigins cannot contain '*' alongside other origins",rule="!('*' in self && self.size() > 1)" + // +optional + AllowOrigins []CORSOrigin `json:"allowOrigins,omitempty"` + + // AllowCredentials indicates whether the actual cross-origin request allows + // to include credentials. + // + // When set to true, the gateway will include the `Access-Control-Allow-Credentials` + // response header with value true (case-sensitive). + // + // When set to false or omitted the gateway will omit the header + // `Access-Control-Allow-Credentials` entirely (this is the standard CORS + // behavior). + // + // Support: Extended + // + // +optional + AllowCredentials *bool `json:"allowCredentials,omitempty"` + + // AllowMethods indicates which HTTP methods are supported for accessing the + // requested resource. + // + // Valid values are any method defined by RFC9110, along with the special + // value `*`, which represents all HTTP methods are allowed. + // + // Method names are case sensitive, so these values are also case-sensitive. + // (See https://www.rfc-editor.org/rfc/rfc2616#section-5.1.1) + // + // Multiple method names in the value of the `Access-Control-Allow-Methods` + // response header are separated by a comma (","). + // + // A CORS-safelisted method is a method that is `GET`, `HEAD`, or `POST`. + // (See https://fetch.spec.whatwg.org/#cors-safelisted-method) The + // CORS-safelisted methods are always allowed, regardless of whether they + // are specified in the `AllowMethods` field. + // + // When the `AllowMethods` field is configured with one or more methods, the + // gateway must return the `Access-Control-Allow-Methods` response header + // which value is present in the `AllowMethods` field. + // + // If the HTTP method of the `Access-Control-Request-Method` request header + // is not included in the list of methods specified by the response header + // `Access-Control-Allow-Methods`, it will present an error on the client + // side. + // + // The `Access-Control-Allow-Methods` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowMethods` field + // specified with the `*` wildcard, the gateway must specify one HTTP method + // in the value of the Access-Control-Allow-Methods response header. The + // value of the header `Access-Control-Allow-Methods` is same as the + // `Access-Control-Request-Method` header provided by the client. If the + // header `Access-Control-Request-Method` is not included in the request, + // the gateway will omit the `Access-Control-Allow-Methods` response header, + // instead of specifying the `*` wildcard. A Gateway implementation may + // choose to add implementation-specific default methods. + // + // Support: Extended + // + // +listType=set + // +kubebuilder:validation:MaxItems=9 + // +kubebuilder:validation:XValidation:message="AllowMethods cannot contain '*' alongside other methods",rule="!('*' in self && self.size() > 1)" + // +optional + AllowMethods []HTTPMethodWithWildcard `json:"allowMethods,omitempty"` + + // AllowHeaders indicates which HTTP request headers are supported for + // accessing the requested resource. + // + // Header names are not case sensitive. + // + // Multiple header names in the value of the `Access-Control-Allow-Headers` + // response header are separated by a comma (","). + // + // When the `AllowHeaders` field is configured with one or more headers, the + // gateway must return the `Access-Control-Allow-Headers` response header + // which value is present in the `AllowHeaders` field. + // + // If any header name in the `Access-Control-Request-Headers` request header + // is not included in the list of header names specified by the response + // header `Access-Control-Allow-Headers`, it will present an error on the + // client side. + // + // If any header name in the `Access-Control-Allow-Headers` response header + // does not recognize by the client, it will also occur an error on the + // client side. + // + // A wildcard indicates that the requests with all HTTP headers are allowed. + // The `Access-Control-Allow-Headers` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowHeaders` field + // specified with the `*` wildcard, the gateway must specify one or more + // HTTP headers in the value of the `Access-Control-Allow-Headers` response + // header. The value of the header `Access-Control-Allow-Headers` is same as + // the `Access-Control-Request-Headers` header provided by the client. If + // the header `Access-Control-Request-Headers` is not included in the + // request, the gateway will omit the `Access-Control-Allow-Headers` + // response header, instead of specifying the `*` wildcard. A Gateway + // implementation may choose to add implementation-specific default headers. + // + // Support: Extended + // + // +listType=set + // +kubebuilder:validation:MaxItems=64 + // +optional + AllowHeaders []HTTPHeaderName `json:"allowHeaders,omitempty"` + + // ExposeHeaders indicates which HTTP response headers can be exposed + // to client-side scripts in response to a cross-origin request. + // + // A CORS-safelisted response header is an HTTP header in a CORS response + // that it is considered safe to expose to the client scripts. + // The CORS-safelisted response headers include the following headers: + // `Cache-Control` + // `Content-Language` + // `Content-Length` + // `Content-Type` + // `Expires` + // `Last-Modified` + // `Pragma` + // (See https://fetch.spec.whatwg.org/#cors-safelisted-response-header-name) + // The CORS-safelisted response headers are exposed to client by default. + // + // When an HTTP header name is specified using the `ExposeHeaders` field, + // this additional header will be exposed as part of the response to the + // client. + // + // Header names are not case sensitive. + // + // Multiple header names in the value of the `Access-Control-Expose-Headers` + // response header are separated by a comma (","). + // + // A wildcard indicates that the responses with all HTTP headers are exposed + // to clients. The `Access-Control-Expose-Headers` response header can only + // use `*` wildcard as value when the `AllowCredentials` field is false or omitted. + // + // Support: Extended + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxItems=64 + ExposeHeaders []HTTPHeaderName `json:"exposeHeaders,omitempty"` + + // MaxAge indicates the duration (in seconds) for the client to cache the + // results of a "preflight" request. + // + // The information provided by the `Access-Control-Allow-Methods` and + // `Access-Control-Allow-Headers` response headers can be cached by the + // client until the time specified by `Access-Control-Max-Age` elapses. + // + // The default value of `Access-Control-Max-Age` response header is 5 + // (seconds). + // + // +optional + // +kubebuilder:default=5 + // +kubebuilder:validation:Minimum=1 + MaxAge int32 `json:"maxAge,omitempty"` +} + +// HTTPRouteExternalAuthProtcol specifies what protocol should be used +// for communicating with an external authorization server. +// +// Valid values are supplied as constants below. +type HTTPRouteExternalAuthProtocol string + +const ( + HTTPRouteExternalAuthGRPCProtocol HTTPRouteExternalAuthProtocol = "GRPC" + HTTPRouteExternalAuthHTTPProtocol HTTPRouteExternalAuthProtocol = "HTTP" +) + +// HTTPExternalAuthFilter defines a filter that modifies requests by sending +// request details to an external authorization server. +// +// Support: Extended +// Feature Name: HTTPRouteExternalAuth +// +kubebuilder:validation:XValidation:message="grpc must be specified when protocol is set to 'GRPC'",rule="self.protocol == 'GRPC' ? has(self.grpc) : true" +// +kubebuilder:validation:XValidation:message="protocol must be 'GRPC' when grpc is set",rule="has(self.grpc) ? self.protocol == 'GRPC' : true" +// +kubebuilder:validation:XValidation:message="http must be specified when protocol is set to 'HTTP'",rule="self.protocol == 'HTTP' ? has(self.http) : true" +// +kubebuilder:validation:XValidation:message="protocol must be 'HTTP' when http is set",rule="has(self.http) ? self.protocol == 'HTTP' : true" +type HTTPExternalAuthFilter struct { + // ExternalAuthProtocol describes which protocol to use when communicating with an + // ext_authz authorization server. + // + // When this is set to GRPC, each backend must use the Envoy ext_authz protocol + // on the port specified in `backendRefs`. Requests and responses are defined + // in the protobufs explained at: + // https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/external_auth.proto + // + // When this is set to HTTP, each backend must respond with a `200` status + // code in on a successful authorization. Any other code is considered + // an authorization failure. + // + // Feature Names: + // GRPC Support - HTTPRouteExternalAuthGRPC + // HTTP Support - HTTPRouteExternalAuthHTTP + // + // +unionDiscriminator + // +required + // +kubebuilder:validation:Enum=HTTP;GRPC + ExternalAuthProtocol HTTPRouteExternalAuthProtocol `json:"protocol,omitempty"` + + // BackendRef is a reference to a backend to send authorization + // requests to. + // + // The backend must speak the selected protocol (GRPC or HTTP) on the + // referenced port. + // + // If the backend service requires TLS, use BackendTLSPolicy to tell the + // implementation to supply the TLS details to be used to connect to that + // backend. + // + // +required + BackendRef BackendObjectReference `json:"backendRef,omitempty"` + + // GRPCAuthConfig contains configuration for communication with ext_authz + // protocol-speaking backends. + // + // If unset, implementations must assume the default behavior for each + // included field is intended. + // + // +optional + GRPCAuthConfig *GRPCAuthConfig `json:"grpc,omitempty"` + + // HTTPAuthConfig contains configuration for communication with HTTP-speaking + // backends. + // + // If unset, implementations must assume the default behavior for each + // included field is intended. + // + // +optional + HTTPAuthConfig *HTTPAuthConfig `json:"http,omitempty"` + + // ForwardBody controls if requests to the authorization server should include + // the body of the client request; and if so, how big that body is allowed + // to be. + // + // It is expected that implementations will buffer the request body up to + // `forwardBody.maxSize` bytes. Bodies over that size must be rejected with a + // 4xx series error (413 or 403 are common examples), and fail processing + // of the filter. + // + // If unset, or `forwardBody.maxSize` is set to `0`, then the body will not + // be forwarded. + // + // Feature Name: HTTPRouteExternalAuthForwardBody + // + // + // +optional + ForwardBody *ForwardBodyConfig `json:"forwardBody,omitempty"` +} + +// GRPCAuthConfig contains configuration for communication with Auth server +// backends that speak Envoy's ext_authz gRPC protocol. +// +// Requests and responses are defined in the protobufs explained at: +// https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/external_auth.proto +type GRPCAuthConfig struct { + // AllowedRequestHeaders specifies what headers from the client request + // will be sent to the authorization server. + // + // If this list is empty, then all headers must be sent. + // + // If the list has entries, only those entries must be sent. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedRequestHeaders []string `json:"allowedHeaders,omitempty"` +} + +// HTTPAuthConfig contains configuration for communication with HTTP-speaking +// backends. +type HTTPAuthConfig struct { + // Path sets the prefix that paths from the client request will have added + // when forwarded to the authorization server. + // + // When empty or unspecified, no prefix is added. + // + // Valid values are the same as the "value" regex for path values in the `match` + // stanza, and the validation regex will screen out invalid paths in the same way. + // Even with the validation, implementations MUST sanitize this input before using it + // directly. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:Pattern="^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$" + Path string `json:"path,omitempty"` + + // AllowedRequestHeaders specifies what additional headers from the client request + // will be sent to the authorization server. + // + // The following headers must always be sent to the authorization server, + // regardless of this setting: + // + // * `Host` + // * `Method` + // * `Path` + // * `Content-Length` + // * `Authorization` + // + // If this list is empty, then only those headers must be sent. + // + // Note that `Content-Length` has a special behavior, in that the length + // sent must be correct for the actual request to the external authorization + // server - that is, it must reflect the actual number of bytes sent in the + // body of the request to the authorization server. + // + // So if the `forwardBody` stanza is unset, or `forwardBody.maxSize` is set + // to `0`, then `Content-Length` must be `0`. If `forwardBody.maxSize` is set + // to anything other than `0`, then the `Content-Length` of the authorization + // request must be set to the actual number of bytes forwarded. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedRequestHeaders []string `json:"allowedHeaders,omitempty"` + + // AllowedResponseHeaders specifies what headers from the authorization response + // will be copied into the request to the backend. + // + // If this list is empty, then all headers from the authorization server + // except Authority or Host must be copied. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedResponseHeaders []string `json:"allowedResponseHeaders,omitempty"` +} + +// ForwardBody configures if requests to the authorization server should include +// the body of the client request; and if so, how big that body is allowed +// to be. +// +// If empty or unset, do not forward the body. +type ForwardBodyConfig struct { + // MaxSize specifies how large in bytes the largest body that will be buffered + // and sent to the authorization server. If the body size is larger than + // `maxSize`, then the body sent to the authorization server must be + // truncated to `maxSize` bytes. + // + // Experimental note: This behavior needs to be checked against + // various dataplanes; it may need to be changed. + // See https://github.com/kubernetes-sigs/gateway-api/pull/4001#discussion_r2291405746 + // for more. + // + // If 0, the body will not be sent to the authorization server. + // +optional + MaxSize uint16 `json:"maxSize,omitempty"` +} + +// HTTPBackendRef defines how a HTTPRoute forwards a HTTP request. +// +// Note that when a namespace different than the local namespace is specified, a +// ReferenceGrant object is required in the referent namespace to allow that +// namespace's owner to accept the reference. See the ReferenceGrant +// documentation for details. +// +// +// +// When the BackendRef points to a Kubernetes Service, implementations SHOULD +// honor the appProtocol field if it is set for the target Service Port. +// +// Implementations supporting appProtocol SHOULD recognize the Kubernetes +// Standard Application Protocols defined in KEP-3726. +// +// If a Service appProtocol isn't specified, an implementation MAY infer the +// backend protocol through its own means. Implementations MAY infer the +// protocol from the Route type referring to the backend Service. +// +// If a Route is not able to send traffic to the backend using the specified +// protocol then the backend is considered invalid. Implementations MUST set the +// "ResolvedRefs" condition to "False" with the "UnsupportedProtocol" reason. +// +// +type HTTPBackendRef struct { + // BackendRef is a reference to a backend to forward matched requests to. + // + // A BackendRef can be invalid for the following reasons. In all cases, the + // implementation MUST ensure the `ResolvedRefs` Condition on the Route + // is set to `status: False`, with a Reason and Message that indicate + // what is the cause of the error. + // + // A BackendRef is invalid if: + // + // * It refers to an unknown or unsupported kind of resource. In this + // case, the Reason must be set to `InvalidKind` and Message of the + // Condition must explain which kind of resource is unknown or unsupported. + // + // * It refers to a resource that does not exist. In this case, the Reason must + // be set to `BackendNotFound` and the Message of the Condition must explain + // which resource does not exist. + // + // * It refers a resource in another namespace when the reference has not been + // explicitly allowed by a ReferenceGrant (or equivalent concept). In this + // case, the Reason must be set to `RefNotPermitted` and the Message of the + // Condition must explain which cross-namespace reference is not allowed. + // + // * It refers to a Kubernetes Service that has an incompatible appProtocol + // for the given Route type + // + // * The BackendTLSPolicy object is installed in the cluster, a BackendTLSPolicy + // is present that refers to the Service, and the implementation is unable + // to meet the requirement. At the time of writing, BackendTLSPolicy is + // experimental, but once it becomes standard, this will become a MUST + // requirement. + // + // Support: Core for Kubernetes Service + // + // Support: Implementation-specific for any other resource + // + // Support for weight: Core + // + // Support for Kubernetes Service appProtocol: Extended + // + // Support for BackendTLSPolicy: Experimental and ImplementationSpecific + // + // +optional + BackendRef `json:",inline"` + + // Filters defined at this level should be executed if and only if the + // request is being forwarded to the backend defined here. + // + // Support: Implementation-specific (For broader support of filters, use the + // Filters field in HTTPRouteRule.) + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" + // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="RequestRedirect filter cannot be repeated",rule="self.filter(f, f.type == 'RequestRedirect').size() <= 1" + // +kubebuilder:validation:XValidation:message="URLRewrite filter cannot be repeated",rule="self.filter(f, f.type == 'URLRewrite').size() <= 1" + Filters []HTTPRouteFilter `json:"filters,omitempty"` +} + +// HTTPRouteStatus defines the observed state of HTTPRoute. +type HTTPRouteStatus struct { + RouteStatus `json:",inline"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go new file mode 100644 index 00000000..414e39b9 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go @@ -0,0 +1,190 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// LocalObjectReference identifies an API object within the namespace of the +// referrer. +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +// +// References to objects with invalid Group and Kind are not valid, and must +// be rejected by the implementation, with appropriate Conditions set +// on the containing object. +type LocalObjectReference struct { + // Group is the group of the referent. For example, "gateway.networking.k8s.io". + // When unspecified or empty string, core API group is inferred. + // +required + Group Group `json:"group"` + + // Kind is kind of the referent. For example "HTTPRoute" or "Service". + // +required + Kind Kind `json:"kind"` + + // Name is the name of the referent. + // +required + Name ObjectName `json:"name"` +} + +// SecretObjectReference identifies an API object including its namespace, +// defaulting to Secret. +// +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +// +// References to objects with invalid Group and Kind are not valid, and must +// be rejected by the implementation, with appropriate Conditions set +// on the containing object. +type SecretObjectReference struct { + // Group is the group of the referent. For example, "gateway.networking.k8s.io". + // When unspecified or empty string, core API group is inferred. + // + // +optional + // +kubebuilder:default="" + Group *Group `json:"group"` + + // Kind is kind of the referent. For example "Secret". + // + // +optional + // +kubebuilder:default=Secret + Kind *Kind `json:"kind"` + + // Name is the name of the referent. + // +required + Name ObjectName `json:"name"` + + // Namespace is the namespace of the referenced object. When unspecified, the local + // namespace is inferred. + // + // Note that when a namespace different than the local namespace is specified, + // a ReferenceGrant object is required in the referent namespace to allow that + // namespace's owner to accept the reference. See the ReferenceGrant + // documentation for details. + // + // Support: Core + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} + +// BackendObjectReference defines how an ObjectReference that is +// specific to BackendRef. It includes a few additional fields and features +// than a regular ObjectReference. +// +// Note that when a namespace different than the local namespace is specified, a +// ReferenceGrant object is required in the referent namespace to allow that +// namespace's owner to accept the reference. See the ReferenceGrant +// documentation for details. +// +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +// +// References to objects with invalid Group and Kind are not valid, and must +// be rejected by the implementation, with appropriate Conditions set +// on the containing object. +// +// +kubebuilder:validation:XValidation:message="Must have port for Service reference",rule="(size(self.group) == 0 && self.kind == 'Service') ? has(self.port) : true" +type BackendObjectReference struct { + // Group is the group of the referent. For example, "gateway.networking.k8s.io". + // When unspecified or empty string, core API group is inferred. + // + // +optional + // +kubebuilder:default="" + Group *Group `json:"group,omitempty"` + + // Kind is the Kubernetes resource kind of the referent. For example + // "Service". + // + // Defaults to "Service" when not specified. + // + // ExternalName services can refer to CNAME DNS records that may live + // outside of the cluster and as such are difficult to reason about in + // terms of conformance. They also may not be safe to forward to (see + // CVE-2021-25740 for more information). Implementations SHOULD NOT + // support ExternalName Services. + // + // Support: Core (Services with a type other than ExternalName) + // + // Support: Implementation-specific (Services with type ExternalName) + // + // +optional + // +kubebuilder:default=Service + Kind *Kind `json:"kind,omitempty"` + + // Name is the name of the referent. + // +required + Name ObjectName `json:"name"` + + // Namespace is the namespace of the backend. When unspecified, the local + // namespace is inferred. + // + // Note that when a namespace different than the local namespace is specified, + // a ReferenceGrant object is required in the referent namespace to allow that + // namespace's owner to accept the reference. See the ReferenceGrant + // documentation for details. + // + // Support: Core + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` + + // Port specifies the destination port number to use for this resource. + // Port is required when the referent is a Kubernetes Service. In this + // case, the port number is the service port number, not the target port. + // For other resources, destination port might be derived from the referent + // resource or this field. + // + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port *PortNumber `json:"port,omitempty"` +} + +// ObjectReference identifies an API object including its namespace. +// +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +// +// References to objects with invalid Group and Kind are not valid, and must +// be rejected by the implementation, with appropriate Conditions set +// on the containing object. +type ObjectReference struct { + // Group is the group of the referent. For example, "gateway.networking.k8s.io". + // When set to the empty string, core API group is inferred. + // +required + Group Group `json:"group"` + + // Kind is kind of the referent. For example "ConfigMap" or "Service". + // +required + Kind Kind `json:"kind"` + + // Name is the name of the referent. + // +required + Name ObjectName `json:"name"` + + // Namespace is the namespace of the referenced object. When unspecified, the local + // namespace is inferred. + // + // Note that when a namespace different than the local namespace is specified, + // a ReferenceGrant object is required in the referent namespace to allow that + // namespace's owner to accept the reference. See the ReferenceGrant + // documentation for details. + // + // Support: Core + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go new file mode 100644 index 00000000..552db9bf --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go @@ -0,0 +1,279 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + // PolicyLabelKey is the label whose presence identifies a CRD that the + // Gateway API Policy attachment model. The value of the label SHOULD be one + // of the following: + // - A label value of "Inherited" indicates that this Policy is inheritable. + // An example of inheritable policy is one which if applied at the Gateway + // level would affect all attached HTTPRoutes and their respective + // Backends. + // - A label value of "Direct" indicates that the policy only affects the + // resource to which it is attached and does not affect it's sub resources. + PolicyLabelKey = "gateway.networking.k8s.io/policy" +) + +// LocalPolicyTargetReference identifies an API object to apply a direct or +// inherited policy to. This should be used as part of Policy resources +// that can target Gateway API resources. For more information on how this +// policy attachment model works, and a sample Policy resource, refer to +// the policy attachment documentation for Gateway API. +type LocalPolicyTargetReference struct { + // Group is the group of the target resource. + // +required + Group Group `json:"group"` + + // Kind is kind of the target resource. + // +required + Kind Kind `json:"kind"` + + // Name is the name of the target resource. + // +required + Name ObjectName `json:"name"` +} + +// NamespacedPolicyTargetReference identifies an API object to apply a direct or +// inherited policy to, potentially in a different namespace. This should only +// be used as part of Policy resources that need to be able to target resources +// in different namespaces. For more information on how this policy attachment +// model works, and a sample Policy resource, refer to the policy attachment +// documentation for Gateway API. +type NamespacedPolicyTargetReference struct { + // Group is the group of the target resource. + // +required + Group Group `json:"group"` + + // Kind is kind of the target resource. + // +required + Kind Kind `json:"kind"` + + // Name is the name of the target resource. + // +required + Name ObjectName `json:"name"` + + // Namespace is the namespace of the referent. When unspecified, the local + // namespace is inferred. Even when policy targets a resource in a different + // namespace, it MUST only apply to traffic originating from the same + // namespace as the policy. + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} + +// LocalPolicyTargetReferenceWithSectionName identifies an API object to apply a +// direct policy to. This should be used as part of Policy resources that can +// target single resources. For more information on how this policy attachment +// mode works, and a sample Policy resource, refer to the policy attachment +// documentation for Gateway API. +// +// Note: This should only be used for direct policy attachment when references +// to SectionName are actually needed. In all other cases, +// LocalPolicyTargetReference should be used. +type LocalPolicyTargetReferenceWithSectionName struct { + LocalPolicyTargetReference `json:",inline"` + + // SectionName is the name of a section within the target resource. When + // unspecified, this targetRef targets the entire resource. In the following + // resources, SectionName is interpreted as the following: + // + // * Gateway: Listener name + // * HTTPRoute: HTTPRouteRule name + // * Service: Port name + // + // If a SectionName is specified, but does not exist on the targeted object, + // the Policy must fail to attach, and the policy implementation should record + // a `ResolvedRefs` or similar Condition in the Policy's status. + // + // +optional + SectionName *SectionName `json:"sectionName,omitempty"` +} + +// PolicyConditionType is a type of condition for a policy. This type should be +// used with a Policy resource Status.Conditions field. +type PolicyConditionType string + +// PolicyConditionReason is a reason for a policy condition. +type PolicyConditionReason string + +const ( + // PolicyConditionAccepted indicates whether the policy has been accepted or + // rejected by a targeted resource, and why. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "Conflicted" + // * "Invalid" + // * "TargetNotFound" + // + PolicyConditionAccepted PolicyConditionType = "Accepted" + + // PolicyReasonAccepted is used with the "Accepted" condition when the policy + // has been accepted by the targeted resource. + PolicyReasonAccepted PolicyConditionReason = "Accepted" + + // PolicyReasonConflicted is used with the "Accepted" condition when the + // policy has not been accepted by a targeted resource because there is + // another policy that targets the same resource and a merge is not possible. + PolicyReasonConflicted PolicyConditionReason = "Conflicted" + + // PolicyReasonInvalid is used with the "Accepted" condition when the policy + // is syntactically or semantically invalid. + PolicyReasonInvalid PolicyConditionReason = "Invalid" + + // PolicyReasonTargetNotFound is used with the "Accepted" condition when the + // policy is attached to an invalid target resource. + PolicyReasonTargetNotFound PolicyConditionReason = "TargetNotFound" +) + +// PolicyAncestorStatus describes the status of a route with respect to an +// associated Ancestor. +// +// Ancestors refer to objects that are either the Target of a policy or above it +// in terms of object hierarchy. For example, if a policy targets a Service, the +// Policy's Ancestors are, in order, the Service, the HTTPRoute, the Gateway, and +// the GatewayClass. Almost always, in this hierarchy, the Gateway will be the most +// useful object to place Policy status on, so we recommend that implementations +// SHOULD use Gateway as the PolicyAncestorStatus object unless the designers +// have a _very_ good reason otherwise. +// +// In the context of policy attachment, the Ancestor is used to distinguish which +// resource results in a distinct application of this policy. For example, if a policy +// targets a Service, it may have a distinct result per attached Gateway. +// +// Policies targeting the same resource may have different effects depending on the +// ancestors of those resources. For example, different Gateways targeting the same +// Service may have different capabilities, especially if they have different underlying +// implementations. +// +// For example, in BackendTLSPolicy, the Policy attaches to a Service that is +// used as a backend in a HTTPRoute that is itself attached to a Gateway. +// In this case, the relevant object for status is the Gateway, and that is the +// ancestor object referred to in this status. +// +// Note that a parent is also an ancestor, so for objects where the parent is the +// relevant object for status, this struct SHOULD still be used. +// +// This struct is intended to be used in a slice that's effectively a map, +// with a composite key made up of the AncestorRef and the ControllerName. +type PolicyAncestorStatus struct { + // AncestorRef corresponds with a ParentRef in the spec that this + // PolicyAncestorStatus struct describes the status of. + // +required + AncestorRef ParentReference `json:"ancestorRef"` + + // ControllerName is a domain/path string that indicates the name of the + // controller that wrote this status. This corresponds with the + // controllerName field on GatewayClass. + // + // Example: "example.net/gateway-controller". + // + // The format of this field is DOMAIN "/" PATH, where DOMAIN and PATH are + // valid Kubernetes names + // (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + // + // Controllers MUST populate this field when writing status. Controllers should ensure that + // entries to status populated with their ControllerName are cleaned up when they are no + // longer necessary. + // +required + ControllerName GatewayController `json:"controllerName"` + + // Conditions describes the status of the Policy with respect to the given Ancestor. + // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // + // +required + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// PolicyStatus defines the common attributes that all Policies should include within +// their status. +type PolicyStatus struct { + // Ancestors is a list of ancestor resources (usually Gateways) that are + // associated with the policy, and the status of the policy with respect to + // each ancestor. When this policy attaches to a parent, the controller that + // manages the parent and the ancestors MUST add an entry to this list when + // the controller first sees the policy and SHOULD update the entry as + // appropriate when the relevant ancestor is modified. + // + // Note that choosing the relevant ancestor is left to the Policy designers; + // an important part of Policy design is designing the right object level at + // which to namespace this status. + // + // Note also that implementations MUST ONLY populate ancestor status for + // the Ancestor resources they are responsible for. Implementations MUST + // use the ControllerName field to uniquely identify the entries in this list + // that they are responsible for. + // + // Note that to achieve this, the list of PolicyAncestorStatus structs + // MUST be treated as a map with a composite key, made up of the AncestorRef + // and ControllerName fields combined. + // + // A maximum of 16 ancestors will be represented in this list. An empty list + // means the Policy is not relevant for any ancestors. + // + // If this slice is full, implementations MUST NOT add further entries. + // Instead they MUST consider the policy unimplementable and signal that + // on any related resources such as the ancestor that would be referenced + // here. For example, if this list was full on BackendTLSPolicy, no + // additional Gateways would be able to reference the Service targeted by + // the BackendTLSPolicy. + // + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + Ancestors []PolicyAncestorStatus `json:"ancestors"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go new file mode 100644 index 00000000..eb880683 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go @@ -0,0 +1,1030 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ParentReference identifies an API object (usually a Gateway) that can be considered +// a parent of this resource (usually a route). There are two kinds of parent resources +// with "Core" support: +// +// * Gateway (Gateway conformance profile) +// * Service (Mesh conformance profile, ClusterIP Services only) +// +// This API may be extended in the future to support additional kinds of parent +// resources. +// +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +type ParentReference struct { + // Group is the group of the referent. + // When unspecified, "gateway.networking.k8s.io" is inferred. + // To set the core API group (such as for a "Service" kind referent), + // Group must be explicitly set to "" (empty string). + // + // Support: Core + // + // +kubebuilder:default=gateway.networking.k8s.io + // +optional + Group *Group `json:"group,omitempty"` + + // Kind is kind of the referent. + // + // There are two kinds of parent resources with "Core" support: + // + // * Gateway (Gateway conformance profile) + // * Service (Mesh conformance profile, ClusterIP Services only) + // + // Support for other resources is Implementation-Specific. + // + // +kubebuilder:default=Gateway + // +optional + Kind *Kind `json:"kind,omitempty"` + + // Namespace is the namespace of the referent. When unspecified, this refers + // to the local namespace of the Route. + // + // Note that there are specific rules for ParentRefs which cross namespace + // boundaries. Cross-namespace references are only valid if they are explicitly + // allowed by something in the namespace they are referring to. For example: + // Gateway has the AllowedRoutes field, and ReferenceGrant provides a + // generic way to enable any other kind of cross-namespace reference. + // + // + // ParentRefs from a Route to a Service in the same namespace are "producer" + // routes, which apply default routing rules to inbound connections from + // any namespace to the Service. + // + // ParentRefs from a Route to a Service in a different namespace are + // "consumer" routes, and these routing rules are only applied to outbound + // connections originating from the same namespace as the Route, for which + // the intended destination of the connections are a Service targeted as a + // ParentRef of the Route. + // + // + // Support: Core + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` + + // Name is the name of the referent. + // + // Support: Core + // +required + Name ObjectName `json:"name"` + + // SectionName is the name of a section within the target resource. In the + // following resources, SectionName is interpreted as the following: + // + // * Gateway: Listener name. When both Port (experimental) and SectionName + // are specified, the name and port of the selected listener must match + // both specified values. + // * Service: Port name. When both Port (experimental) and SectionName + // are specified, the name and port of the selected listener must match + // both specified values. + // + // Implementations MAY choose to support attaching Routes to other resources. + // If that is the case, they MUST clearly document how SectionName is + // interpreted. + // + // When unspecified (empty string), this will reference the entire resource. + // For the purpose of status, an attachment is considered successful if at + // least one section in the parent resource accepts it. For example, Gateway + // listeners can restrict which Routes can attach to them by Route kind, + // namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from + // the referencing Route, the Route MUST be considered successfully + // attached. If no Gateway listeners accept attachment from this Route, the + // Route MUST be considered detached from the Gateway. + // + // Support: Core + // + // +optional + SectionName *SectionName `json:"sectionName,omitempty"` + + // Port is the network port this Route targets. It can be interpreted + // differently based on the type of parent resource. + // + // When the parent resource is a Gateway, this targets all listeners + // listening on the specified port that also support this kind of Route(and + // select this Route). It's not recommended to set `Port` unless the + // networking behaviors specified in a Route must apply to a specific port + // as opposed to a listener(s) whose port(s) may be changed. When both Port + // and SectionName are specified, the name and port of the selected listener + // must match both specified values. + // + // + // When the parent resource is a Service, this targets a specific port in the + // Service spec. When both Port (experimental) and SectionName are specified, + // the name and port of the selected port must match both specified values. + // + // + // Implementations MAY choose to support other parent resources. + // Implementations supporting other types of parent resources MUST clearly + // document how/if Port is interpreted. + // + // For the purpose of status, an attachment is considered successful as + // long as the parent resource accepts it partially. For example, Gateway + // listeners can restrict which Routes can attach to them by Route kind, + // namespace, or hostname. If 1 of 2 Gateway listeners accept attachment + // from the referencing Route, the Route MUST be considered successfully + // attached. If no Gateway listeners accept attachment from this Route, + // the Route MUST be considered detached from the Gateway. + // + // Support: Extended + // + // +optional + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port *PortNumber `json:"port,omitempty"` +} + +// GatewayDefaultScope defines the set of default scopes that a Gateway +// can claim, for use in any Route type. At present the only supported +// scopes are "All" and "None". "None" is a special scope which +// explicitly means that the Route MUST NOT attached to any default +// Gateway. +// +// +kubebuilder:validation:Enum=All;None +type GatewayDefaultScope string + +const ( + // GatewayDefaultScopeAll indicates that a Gateway can claim absolutely + // any Route asking for a default Gateway. + GatewayDefaultScopeAll GatewayDefaultScope = "All" + + // GatewayDefaultScopeNone indicates that a Gateway MUST NOT claim + // any Route asking for a default Gateway. + GatewayDefaultScopeNone GatewayDefaultScope = "None" +) + +// CommonRouteSpec defines the common attributes that all Routes MUST include +// within their spec. +type CommonRouteSpec struct { + // ParentRefs references the resources (usually Gateways) that a Route wants + // to be attached to. Note that the referenced parent resource needs to + // allow this for the attachment to be complete. For Gateways, that means + // the Gateway needs to allow attachment from Routes of this kind and + // namespace. For Services, that means the Service must either be in the same + // namespace for a "producer" route, or the mesh implementation must support + // and allow "consumer" routes for the referenced Service. ReferenceGrant is + // not applicable for governing ParentRefs to Services - it is not possible to + // create a "producer" route for a Service in a different namespace from the + // Route. + // + // There are two kinds of parent resources with "Core" support: + // + // * Gateway (Gateway conformance profile) + // * Service (Mesh conformance profile, ClusterIP Services only) + // + // This API may be extended in the future to support additional kinds of parent + // resources. + // + // ParentRefs must be _distinct_. This means either that: + // + // * They select different objects. If this is the case, then parentRef + // entries are distinct. In terms of fields, this means that the + // multi-part key defined by `group`, `kind`, `namespace`, and `name` must + // be unique across all parentRef entries in the Route. + // * They do not select different objects, but for each optional field used, + // each ParentRef that selects the same object must set the same set of + // optional fields to different values. If one ParentRef sets a + // combination of optional fields, all must set the same combination. + // + // Some examples: + // + // * If one ParentRef sets `sectionName`, all ParentRefs referencing the + // same object must also set `sectionName`. + // * If one ParentRef sets `port`, all ParentRefs referencing the same + // object must also set `port`. + // * If one ParentRef sets `sectionName` and `port`, all ParentRefs + // referencing the same object must also set `sectionName` and `port`. + // + // It is possible to separately reference multiple distinct objects that may + // be collapsed by an implementation. For example, some implementations may + // choose to merge compatible Gateway Listeners together. If that is the + // case, the list of routes attached to those resources should also be + // merged. + // + // Note that for ParentRefs that cross namespace boundaries, there are specific + // rules. Cross-namespace references are only valid if they are explicitly + // allowed by something in the namespace they are referring to. For example, + // Gateway has the AllowedRoutes field, and ReferenceGrant provides a + // generic way to enable other kinds of cross-namespace reference. + // + // + // ParentRefs from a Route to a Service in the same namespace are "producer" + // routes, which apply default routing rules to inbound connections from + // any namespace to the Service. + // + // ParentRefs from a Route to a Service in a different namespace are + // "consumer" routes, and these routing rules are only applied to outbound + // connections originating from the same namespace as the Route, for which + // the intended destination of the connections are a Service targeted as a + // ParentRef of the Route. + // + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // + // + // + // + ParentRefs []ParentReference `json:"parentRefs,omitempty"` + + // UseDefaultGateways indicates the default Gateway scope to use for this + // Route. If unset (the default) or set to None, the Route will not be + // attached to any default Gateway; if set, it will be attached to any + // default Gateway supporting the named scope, subject to the usual rules + // about which Routes a Gateway is allowed to claim. + // + // Think carefully before using this functionality! The set of default + // Gateways supporting the requested scope can change over time without + // any notice to the Route author, and in many situations it will not be + // appropriate to request a default Gateway for a given Route -- for + // example, a Route with specific security requirements should almost + // certainly not use a default Gateway. + // + // +optional + // + UseDefaultGateways GatewayDefaultScope `json:"useDefaultGateways,omitempty"` +} + +// PortNumber defines a network port. +type PortNumber = int32 + +// BackendRef defines how a Route should forward a request to a Kubernetes +// resource. +// +// Note that when a namespace different than the local namespace is specified, a +// ReferenceGrant object is required in the referent namespace to allow that +// namespace's owner to accept the reference. See the ReferenceGrant +// documentation for details. +// +// +// +// When the BackendRef points to a Kubernetes Service, implementations SHOULD +// honor the appProtocol field if it is set for the target Service Port. +// +// Implementations supporting appProtocol SHOULD recognize the Kubernetes +// Standard Application Protocols defined in KEP-3726. +// +// If a Service appProtocol isn't specified, an implementation MAY infer the +// backend protocol through its own means. Implementations MAY infer the +// protocol from the Route type referring to the backend Service. +// +// If a Route is not able to send traffic to the backend using the specified +// protocol then the backend is considered invalid. Implementations MUST set the +// "ResolvedRefs" condition to "False" with the "UnsupportedProtocol" reason. +// +// +// +// Note that when the BackendTLSPolicy object is enabled by the implementation, +// there are some extra rules about validity to consider here. See the fields +// where this struct is used for more information about the exact behavior. +type BackendRef struct { + // BackendObjectReference references a Kubernetes object. + BackendObjectReference `json:",inline"` + + // Weight specifies the proportion of requests forwarded to the referenced + // backend. This is computed as weight/(sum of all weights in this + // BackendRefs list). For non-zero values, there may be some epsilon from + // the exact proportion defined here depending on the precision an + // implementation supports. Weight is not a percentage and the sum of + // weights does not need to equal 100. + // + // If only one backend is specified and it has a weight greater than 0, 100% + // of the traffic is forwarded to that backend. If weight is set to 0, no + // traffic should be forwarded for this entry. If unspecified, weight + // defaults to 1. + // + // Support for this field varies based on the context where used. + // + // +optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=1000000 + Weight *int32 `json:"weight,omitempty"` +} + +// RouteConditionType is a type of condition for a route. +type RouteConditionType string + +// RouteConditionReason is a reason for a route condition. +type RouteConditionReason string + +const ( + // This condition indicates whether the route has been accepted or rejected + // by a Gateway, and why. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "NotAllowedByListeners" + // * "NoMatchingListenerHostname" + // * "NoMatchingParent" + // * "UnsupportedValue" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + RouteConditionAccepted RouteConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the Route has been + // accepted by the Gateway. + RouteReasonAccepted RouteConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when the route has not + // been accepted by a Gateway because the Gateway has no Listener whose + // allowedRoutes criteria permit the route + RouteReasonNotAllowedByListeners RouteConditionReason = "NotAllowedByListeners" + + // This reason is used with the "Accepted" condition when the Gateway has no + // compatible Listeners whose Hostname matches the route + RouteReasonNoMatchingListenerHostname RouteConditionReason = "NoMatchingListenerHostname" + + // This reason is used with the "Accepted" condition when there are + // no matching Parents. In the case of Gateways, this can occur when + // a Route ParentRef specifies a Port and/or SectionName that does not + // match any Listeners in the Gateway. + RouteReasonNoMatchingParent RouteConditionReason = "NoMatchingParent" + + // This reason is used with the "Accepted" condition when a value for an Enum + // is not recognized. + RouteReasonUnsupportedValue RouteConditionReason = "UnsupportedValue" + + // This reason is used with the "Accepted" when a controller has not yet + // reconciled the route. + RouteReasonPending RouteConditionReason = "Pending" + + // This reason is used with the "Accepted" condition when there + // are incompatible filters present on a route rule (for example if + // the URLRewrite and RequestRedirect are both present on an HTTPRoute). + RouteReasonIncompatibleFilters RouteConditionReason = "IncompatibleFilters" +) + +const ( + // This condition indicates whether the controller was able to resolve all + // the object references for the Route. + // + // Possible reasons for this condition to be True are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be False are: + // + // * "RefNotPermitted" + // * "InvalidKind" + // * "BackendNotFound" + // * "UnsupportedProtocol" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + RouteConditionResolvedRefs RouteConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + RouteReasonResolvedRefs RouteConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when + // one of the Listener's Routes has a BackendRef to an object in + // another namespace, where the object in the other namespace does + // not have a ReferenceGrant explicitly allowing the reference. + RouteReasonRefNotPermitted RouteConditionReason = "RefNotPermitted" + + // This reason is used with the "ResolvedRefs" condition when + // one of the Route's rules has a reference to an unknown or unsupported + // Group and/or Kind. + RouteReasonInvalidKind RouteConditionReason = "InvalidKind" + + // This reason is used with the "ResolvedRefs" condition when one of the + // Route's rules has a reference to a resource that does not exist. + RouteReasonBackendNotFound RouteConditionReason = "BackendNotFound" + + // This reason is used with the "ResolvedRefs" condition when one of the + // Route's rules has a reference to a resource with an app protocol that + // is not supported by this implementation. + RouteReasonUnsupportedProtocol RouteConditionReason = "UnsupportedProtocol" +) + +const ( + // This condition indicates that the Route contains a combination of both + // valid and invalid rules. + // + // When this happens, implementations MUST take one of the following + // approaches: + // + // 1) Drop Rule(s): With this approach, implementations will drop the + // invalid Route Rule(s) until they are fully valid again. The message + // for this condition MUST start with the prefix "Dropped Rule" and + // include information about which Rules have been dropped. In this + // state, the "Accepted" condition MUST be set to "True" with the latest + // generation of the resource. + // 2) Fall Back: With this approach, implementations will fall back to the + // last known good state of the entire Route. The message for this + // condition MUST start with the prefix "Fall Back" and include + // information about why the current Rule(s) are invalid. To represent + // this, the "Accepted" condition MUST be set to "True" with the + // generation of the last known good state of the resource. + // + // Reverting to the last known good state should only be done by + // implementations that have a means of restoring that state if/when they + // are restarted. + // + // This condition MUST NOT be set if a Route is fully valid, fully invalid, + // or not accepted. By extension, that means that this condition MUST only + // be set when it is "True". + // + // Possible reasons for this condition to be True are: + // + // * "UnsupportedValue" + // + // Controllers may raise this condition with other reasons, but should + // prefer to use the reasons listed above to improve interoperability. + RouteConditionPartiallyInvalid RouteConditionType = "PartiallyInvalid" +) + +// RouteParentStatus describes the status of a route with respect to an +// associated Parent. +type RouteParentStatus struct { + // ParentRef corresponds with a ParentRef in the spec that this + // RouteParentStatus struct describes the status of. + // +required + ParentRef ParentReference `json:"parentRef"` + + // ControllerName is a domain/path string that indicates the name of the + // controller that wrote this status. This corresponds with the + // controllerName field on GatewayClass. + // + // Example: "example.net/gateway-controller". + // + // The format of this field is DOMAIN "/" PATH, where DOMAIN and PATH are + // valid Kubernetes names + // (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + // + // Controllers MUST populate this field when writing status. Controllers should ensure that + // entries to status populated with their ControllerName are cleaned up when they are no + // longer necessary. + // +required + ControllerName GatewayController `json:"controllerName"` + + // Conditions describes the status of the route with respect to the Gateway. + // Note that the route's availability is also subject to the Gateway's own + // status conditions and listener status. + // + // If the Route's ParentRef specifies an existing Gateway that supports + // Routes of this kind AND that Gateway's controller has sufficient access, + // then that Gateway's controller MUST set the "Accepted" condition on the + // Route, to indicate whether the route has been accepted or rejected by the + // Gateway, and why. + // + // A Route MUST be considered "Accepted" if at least one of the Route's + // rules is implemented by the Gateway. + // + // There are a number of cases where the "Accepted" condition may not be set + // due to lack of controller visibility, that includes when: + // + // * The Route refers to a nonexistent parent. + // * The Route is of a type that the controller does not support. + // * The Route is in a namespace the controller does not have access to. + // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// RouteStatus defines the common attributes that all Routes MUST include within +// their status. +type RouteStatus struct { + // Parents is a list of parent resources (usually Gateways) that are + // associated with the route, and the status of the route with respect to + // each parent. When this route attaches to a parent, the controller that + // manages the parent must add an entry to this list when the controller + // first sees the route and should update the entry as appropriate when the + // route or gateway is modified. + // + // Note that parent references that cannot be resolved by an implementation + // of this API will not be added to this list. Implementations of this API + // can only populate Route status for the Gateways/parent resources they are + // responsible for. + // + // A maximum of 32 Gateways will be represented in this list. An empty list + // means the route has not been attached to any Gateway. + // + // + // Notes for implementors: + // + // While parents is not a listType `map`, this is due to the fact that the + // list key is not scalar, and Kubernetes is unable to represent this. + // + // Parent status MUST be considered to be namespaced by the combination of + // the parentRef and controllerName fields, and implementations should keep + // the following rules in mind when updating this status: + // + // * Implementations MUST update only entries that have a matching value of + // `controllerName` for that implementation. + // * Implementations MUST NOT update entries with non-matching `controllerName` + // fields. + // * Implementations MUST treat each `parentRef`` in the Route separately and + // update its status based on the relationship with that parent. + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // + // + // + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + Parents []RouteParentStatus `json:"parents"` +} + +// Hostname is the fully qualified domain name of a network host. This matches +// the RFC 1123 definition of a hostname with 2 notable exceptions: +// +// 1. IPs are not allowed. +// 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard +// label must appear by itself as the first label. +// +// Hostname can be "precise" which is a domain name without the terminating +// dot of a network host (e.g. "foo.example.com") or "wildcard", which is a +// domain name prefixed with a single wildcard label (e.g. `*.example.com`). +// +// Note that as per RFC1035 and RFC1123, a *label* must consist of lower case +// alphanumeric characters or '-', and must start and end with an alphanumeric +// character. No other punctuation is allowed. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` +type Hostname string + +// PreciseHostname is the fully qualified domain name of a network host. This +// matches the RFC 1123 definition of a hostname with 1 notable exception that +// numeric IP addresses are not allowed. +// +// Note that as per RFC1035 and RFC1123, a *label* must consist of lower case +// alphanumeric characters or '-', and must start and end with an alphanumeric +// character. No other punctuation is allowed. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` +type PreciseHostname string + +// AbsoluteURI represents a Uniform Resource Identifier (URI) as defined by RFC3986. + +// The AbsoluteURI MUST NOT be a relative URI, and it MUST follow the URI syntax and +// encoding rules specified in RFC3986. The AbsoluteURI MUST include both a +// scheme (e.g., "http" or "spiffe") and a scheme-specific-part. URIs that +// include an authority MUST include a fully qualified domain name or +// IP address as the host. +// The below regex is taken from the regex section in RFC 3986 with a slight modification to enforce a full URI and not relative. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^(([^:/?#]+):)(//([^/?#]*))([^?#]*)(\?([^#]*))?(#(.*))?` +type AbsoluteURI string + +// The CORSOrigin MUST NOT be a relative URI, and it MUST follow the URI syntax and +// encoding rules specified in RFC3986. The CORSOrigin MUST include both a +// scheme (e.g., "http" or "spiffe") and a scheme-specific-part, or it should be a single '*' character. +// URIs that include an authority MUST include a fully qualified domain name or +// IP address as the host. +// The below regex was generated to simplify the assertion of scheme://host: being port optional +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`(^\*$)|(^([a-zA-Z][a-zA-Z0-9+\-.]+):\/\/([^:/?#]+)(:([0-9]{1,5}))?$)` +type CORSOrigin string + +// Group refers to a Kubernetes Group. It must either be an empty string or a +// RFC 1123 subdomain. +// +// This validation is based off of the corresponding Kubernetes validation: +// https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/util/validation/validation.go#L208 +// +// Valid values include: +// +// * "" - empty string implies core Kubernetes API group +// * "gateway.networking.k8s.io" +// * "foo.example.com" +// +// Invalid values include: +// +// * "example.com/bar" - "/" is an invalid character +// +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` +type Group string + +// Kind refers to a Kubernetes Kind. +// +// Valid values include: +// +// * "Service" +// * "HTTPRoute" +// +// Invalid values include: +// +// * "invalid/kind" - "/" is an invalid character +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=63 +// +kubebuilder:validation:Pattern=`^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$` +type Kind string + +// ObjectName refers to the name of a Kubernetes object. +// Object names can have a variety of forms, including RFC 1123 subdomains, +// RFC 1123 labels, or RFC 1035 labels. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +type ObjectName string + +// Namespace refers to a Kubernetes namespace. It must be a RFC 1123 label. +// +// This validation is based off of the corresponding Kubernetes validation: +// https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/util/validation/validation.go#L187 +// +// This is used for Namespace name validation here: +// https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/api/validation/generic.go#L63 +// +// Valid values include: +// +// * "example" +// +// Invalid values include: +// +// * "example.com" - "." is an invalid character +// +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$` +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=63 +type Namespace string + +// SectionName is the name of a section in a Kubernetes resource. +// +// In the following resources, SectionName is interpreted as the following: +// +// * Gateway: Listener name +// * HTTPRoute: HTTPRouteRule name +// * Service: Port name +// +// Section names can have a variety of forms, including RFC 1123 subdomains, +// RFC 1123 labels, or RFC 1035 labels. +// +// This validation is based off of the corresponding Kubernetes validation: +// https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/util/validation/validation.go#L208 +// +// Valid values include: +// +// * "example" +// * "foo-example" +// * "example.com" +// * "foo.example.com" +// +// Invalid values include: +// +// * "example.com/bar" - "/" is an invalid character +// +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +type SectionName string + +// GatewayController is the name of a Gateway API controller. It must be a +// domain prefixed path. +// +// Valid values include: +// +// * "example.com/bar" +// +// Invalid values include: +// +// * "example.com" - must include path +// * "foo.example.com" - must include path +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$` +type GatewayController string + +// AnnotationKey is the key of an annotation in Gateway API. This is used for +// validation of maps such as TLS options. This matches the Kubernetes +// "qualified name" validation that is used for annotations and other common +// values. +// +// Valid values include: +// +// * example +// * example.com +// * example.com/path +// * example.com/path.html +// +// Invalid values include: +// +// * example~ - "~" is an invalid character +// * example.com. - cannot start or end with "." +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$` +type AnnotationKey string + +// AnnotationValue is the value of an annotation in Gateway API. This is used +// for validation of maps such as TLS options. This roughly matches Kubernetes +// annotation validation, although the length validation in that case is based +// on the entire size of the annotations struct. +// +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=4096 +type AnnotationValue string + +// LabelKey is the key of a label in the Gateway API. This is used for validation +// of maps such as Gateway infrastructure labels. This matches the Kubernetes +// "qualified name" validation that is used for labels. +// +// Valid values include: +// +// * example +// * example.com +// * example.com/path +// * example.com/path.html +// +// Invalid values include: +// +// * example~ - "~" is an invalid character +// * example.com. - cannot start or end with "." +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$` +type LabelKey string + +// LabelValue is the value of a label in the Gateway API. This is used for validation +// of maps such as Gateway infrastructure labels. This matches the Kubernetes +// label validation rules: +// * must be 63 characters or less (can be empty), +// * unless empty, must begin and end with an alphanumeric character ([a-z0-9A-Z]), +// * could contain dashes (-), underscores (_), dots (.), and alphanumerics between. +// +// Valid values include: +// +// * MyValue +// * my.name +// * 123-my-value +// +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=63 +// +kubebuilder:validation:Pattern=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$` +type LabelValue string + +// AddressType defines how a network address is represented as a text string. +// This may take two possible forms: +// +// * A predefined CamelCase string identifier (currently limited to `IPAddress` or `Hostname`) +// * A domain-prefixed string identifier (like `acme.io/CustomAddressType`) +// +// Values `IPAddress` and `Hostname` have Extended support. +// +// The `NamedAddress` value has been deprecated in favor of implementation +// specific domain-prefixed strings. +// +// All other values, including domain-prefixed values have Implementation-specific support, +// which are used in implementation-specific behaviors. Support for additional +// predefined CamelCase identifiers may be added in future releases. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^Hostname|IPAddress|NamedAddress|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$` +type AddressType string + +// HeaderName is the name of a header or query parameter. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=256 +// +kubebuilder:validation:Pattern=`^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$` +// +k8s:deepcopy-gen=false +type HeaderName string + +// Duration is a string value representing a duration in time. The format is as specified +// in GEP-2257, a strict subset of the syntax parsed by Golang time.ParseDuration. +// +// +kubebuilder:validation:Pattern=`^([0-9]{1,5}(h|m|s|ms)){1,4}$` +type Duration string + +const ( + // A textual representation of a numeric IP address. IPv4 + // addresses must be in dotted-decimal form. IPv6 addresses + // must be in a standard IPv6 text representation + // (see [RFC 5952](https://tools.ietf.org/html/rfc5952)). + // + // This type is intended for specific addresses. Address ranges are not + // supported (e.g. you cannot use a CIDR range like 127.0.0.0/24 as an + // IPAddress). + // + // Support: Extended + IPAddressType AddressType = "IPAddress" + + // A Hostname represents a DNS based ingress point. This is similar to the + // corresponding hostname field in Kubernetes load balancer status. For + // example, this concept may be used for cloud load balancers where a DNS + // name is used to expose a load balancer. + // + // Support: Extended + HostnameAddressType AddressType = "Hostname" + + // A NamedAddress provides a way to reference a specific IP address by name. + // For example, this may be a name or other unique identifier that refers + // to a resource on a cloud provider such as a static IP. + // + // The `NamedAddress` type has been deprecated in favor of implementation + // specific domain-prefixed strings. + // + // Support: Implementation-specific + NamedAddressType AddressType = "NamedAddress" +) + +// SessionPersistence defines the desired state of SessionPersistence. +// +kubebuilder:validation:XValidation:message="AbsoluteTimeout must be specified when cookie lifetimeType is Permanent",rule="!has(self.cookieConfig) || !has(self.cookieConfig.lifetimeType) || self.cookieConfig.lifetimeType != 'Permanent' || has(self.absoluteTimeout)" +type SessionPersistence struct { + // SessionName defines the name of the persistent session token + // which may be reflected in the cookie or the header. Users + // should avoid reusing session names to prevent unintended + // consequences, such as rejection or unpredictable behavior. + // + // Support: Implementation-specific + // + // +optional + // +kubebuilder:validation:MaxLength=128 + SessionName *string `json:"sessionName,omitempty"` + + // AbsoluteTimeout defines the absolute timeout of the persistent + // session. Once the AbsoluteTimeout duration has elapsed, the + // session becomes invalid. + // + // Support: Extended + // + // +optional + AbsoluteTimeout *Duration `json:"absoluteTimeout,omitempty"` + + // IdleTimeout defines the idle timeout of the persistent session. + // Once the session has been idle for more than the specified + // IdleTimeout duration, the session becomes invalid. + // + // Support: Extended + // + // +optional + IdleTimeout *Duration `json:"idleTimeout,omitempty"` + + // Type defines the type of session persistence such as through + // the use a header or cookie. Defaults to cookie based session + // persistence. + // + // Support: Core for "Cookie" type + // + // Support: Extended for "Header" type + // + // +optional + // +kubebuilder:default=Cookie + Type *SessionPersistenceType `json:"type,omitempty"` + + // CookieConfig provides configuration settings that are specific + // to cookie-based session persistence. + // + // Support: Core + // + // +optional + CookieConfig *CookieConfig `json:"cookieConfig,omitempty"` +} + +// +kubebuilder:validation:Enum=Cookie;Header +type SessionPersistenceType string + +const ( + // CookieBasedSessionPersistence specifies cookie-based session + // persistence. + // + // Support: Core + CookieBasedSessionPersistence SessionPersistenceType = "Cookie" + + // HeaderBasedSessionPersistence specifies header-based session + // persistence. + // + // Support: Extended + HeaderBasedSessionPersistence SessionPersistenceType = "Header" +) + +// CookieConfig defines the configuration for cookie-based session persistence. +type CookieConfig struct { + // LifetimeType specifies whether the cookie has a permanent or + // session-based lifetime. A permanent cookie persists until its + // specified expiry time, defined by the Expires or Max-Age cookie + // attributes, while a session cookie is deleted when the current + // session ends. + // + // When set to "Permanent", AbsoluteTimeout indicates the + // cookie's lifetime via the Expires or Max-Age cookie attributes + // and is required. + // + // When set to "Session", AbsoluteTimeout indicates the + // absolute lifetime of the cookie tracked by the gateway and + // is optional. + // + // Defaults to "Session". + // + // Support: Core for "Session" type + // + // Support: Extended for "Permanent" type + // + // +optional + // +kubebuilder:default=Session + LifetimeType *CookieLifetimeType `json:"lifetimeType,omitempty"` +} + +// +kubebuilder:validation:Enum=Permanent;Session +type CookieLifetimeType string + +const ( + // SessionCookieLifetimeType specifies the type for a session + // cookie. + // + // Support: Core + SessionCookieLifetimeType CookieLifetimeType = "Session" + + // PermanentCookieLifetimeType specifies the type for a permanent + // cookie. + // + // Support: Extended + PermanentCookieLifetimeType CookieLifetimeType = "Permanent" +) + +// +kubebuilder:validation:XValidation:message="numerator must be less than or equal to denominator",rule="self.numerator <= self.denominator" +type Fraction struct { + // +kubebuilder:validation:Minimum=0 + // +required + Numerator int32 `json:"numerator"` + + // +optional + // +kubebuilder:default=100 + // +kubebuilder:validation:Minimum=1 + Denominator *int32 `json:"denominator,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..174c2927 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go @@ -0,0 +1,2251 @@ +//go:build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedListeners) DeepCopyInto(out *AllowedListeners) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = new(ListenerNamespaces) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedListeners. +func (in *AllowedListeners) DeepCopy() *AllowedListeners { + if in == nil { + return nil + } + out := new(AllowedListeners) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedRoutes) DeepCopyInto(out *AllowedRoutes) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = new(RouteNamespaces) + (*in).DeepCopyInto(*out) + } + if in.Kinds != nil { + in, out := &in.Kinds, &out.Kinds + *out = make([]RouteGroupKind, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedRoutes. +func (in *AllowedRoutes) DeepCopy() *AllowedRoutes { + if in == nil { + return nil + } + out := new(AllowedRoutes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendObjectReference) DeepCopyInto(out *BackendObjectReference) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(Group) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(Kind) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(PortNumber) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendObjectReference. +func (in *BackendObjectReference) DeepCopy() *BackendObjectReference { + if in == nil { + return nil + } + out := new(BackendObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRef) DeepCopyInto(out *BackendRef) { + *out = *in + in.BackendObjectReference.DeepCopyInto(&out.BackendObjectReference) + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRef. +func (in *BackendRef) DeepCopy() *BackendRef { + if in == nil { + return nil + } + out := new(BackendRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicy) DeepCopyInto(out *BackendTLSPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicy. +func (in *BackendTLSPolicy) DeepCopy() *BackendTLSPolicy { + if in == nil { + return nil + } + out := new(BackendTLSPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendTLSPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicyList) DeepCopyInto(out *BackendTLSPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackendTLSPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicyList. +func (in *BackendTLSPolicyList) DeepCopy() *BackendTLSPolicyList { + if in == nil { + return nil + } + out := new(BackendTLSPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendTLSPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicySpec) DeepCopyInto(out *BackendTLSPolicySpec) { + *out = *in + if in.TargetRefs != nil { + in, out := &in.TargetRefs, &out.TargetRefs + *out = make([]LocalPolicyTargetReferenceWithSectionName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Validation.DeepCopyInto(&out.Validation) + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicySpec. +func (in *BackendTLSPolicySpec) DeepCopy() *BackendTLSPolicySpec { + if in == nil { + return nil + } + out := new(BackendTLSPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicyValidation) DeepCopyInto(out *BackendTLSPolicyValidation) { + *out = *in + if in.CACertificateRefs != nil { + in, out := &in.CACertificateRefs, &out.CACertificateRefs + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.WellKnownCACertificates != nil { + in, out := &in.WellKnownCACertificates, &out.WellKnownCACertificates + *out = new(WellKnownCACertificatesType) + **out = **in + } + if in.SubjectAltNames != nil { + in, out := &in.SubjectAltNames, &out.SubjectAltNames + *out = make([]SubjectAltName, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicyValidation. +func (in *BackendTLSPolicyValidation) DeepCopy() *BackendTLSPolicyValidation { + if in == nil { + return nil + } + out := new(BackendTLSPolicyValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonRouteSpec) DeepCopyInto(out *CommonRouteSpec) { + *out = *in + if in.ParentRefs != nil { + in, out := &in.ParentRefs, &out.ParentRefs + *out = make([]ParentReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonRouteSpec. +func (in *CommonRouteSpec) DeepCopy() *CommonRouteSpec { + if in == nil { + return nil + } + out := new(CommonRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookieConfig) DeepCopyInto(out *CookieConfig) { + *out = *in + if in.LifetimeType != nil { + in, out := &in.LifetimeType, &out.LifetimeType + *out = new(CookieLifetimeType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookieConfig. +func (in *CookieConfig) DeepCopy() *CookieConfig { + if in == nil { + return nil + } + out := new(CookieConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardBodyConfig) DeepCopyInto(out *ForwardBodyConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardBodyConfig. +func (in *ForwardBodyConfig) DeepCopy() *ForwardBodyConfig { + if in == nil { + return nil + } + out := new(ForwardBodyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fraction) DeepCopyInto(out *Fraction) { + *out = *in + if in.Denominator != nil { + in, out := &in.Denominator, &out.Denominator + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fraction. +func (in *Fraction) DeepCopy() *Fraction { + if in == nil { + return nil + } + out := new(Fraction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendTLSConfig) DeepCopyInto(out *FrontendTLSConfig) { + *out = *in + in.Default.DeepCopyInto(&out.Default) + if in.PerPort != nil { + in, out := &in.PerPort, &out.PerPort + *out = make([]TLSPortConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendTLSConfig. +func (in *FrontendTLSConfig) DeepCopy() *FrontendTLSConfig { + if in == nil { + return nil + } + out := new(FrontendTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendTLSValidation) DeepCopyInto(out *FrontendTLSValidation) { + *out = *in + if in.CACertificateRefs != nil { + in, out := &in.CACertificateRefs, &out.CACertificateRefs + *out = make([]ObjectReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendTLSValidation. +func (in *FrontendTLSValidation) DeepCopy() *FrontendTLSValidation { + if in == nil { + return nil + } + out := new(FrontendTLSValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCAuthConfig) DeepCopyInto(out *GRPCAuthConfig) { + *out = *in + if in.AllowedRequestHeaders != nil { + in, out := &in.AllowedRequestHeaders, &out.AllowedRequestHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAuthConfig. +func (in *GRPCAuthConfig) DeepCopy() *GRPCAuthConfig { + if in == nil { + return nil + } + out := new(GRPCAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCBackendRef) DeepCopyInto(out *GRPCBackendRef) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]GRPCRouteFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCBackendRef. +func (in *GRPCBackendRef) DeepCopy() *GRPCBackendRef { + if in == nil { + return nil + } + out := new(GRPCBackendRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCHeaderMatch) DeepCopyInto(out *GRPCHeaderMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(GRPCHeaderMatchType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCHeaderMatch. +func (in *GRPCHeaderMatch) DeepCopy() *GRPCHeaderMatch { + if in == nil { + return nil + } + out := new(GRPCHeaderMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCMethodMatch) DeepCopyInto(out *GRPCMethodMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(GRPCMethodMatchType) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCMethodMatch. +func (in *GRPCMethodMatch) DeepCopy() *GRPCMethodMatch { + if in == nil { + return nil + } + out := new(GRPCMethodMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRoute) DeepCopyInto(out *GRPCRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRoute. +func (in *GRPCRoute) DeepCopy() *GRPCRoute { + if in == nil { + return nil + } + out := new(GRPCRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GRPCRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteFilter) DeepCopyInto(out *GRPCRouteFilter) { + *out = *in + if in.RequestHeaderModifier != nil { + in, out := &in.RequestHeaderModifier, &out.RequestHeaderModifier + *out = new(HTTPHeaderFilter) + (*in).DeepCopyInto(*out) + } + if in.ResponseHeaderModifier != nil { + in, out := &in.ResponseHeaderModifier, &out.ResponseHeaderModifier + *out = new(HTTPHeaderFilter) + (*in).DeepCopyInto(*out) + } + if in.RequestMirror != nil { + in, out := &in.RequestMirror, &out.RequestMirror + *out = new(HTTPRequestMirrorFilter) + (*in).DeepCopyInto(*out) + } + if in.ExtensionRef != nil { + in, out := &in.ExtensionRef, &out.ExtensionRef + *out = new(LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteFilter. +func (in *GRPCRouteFilter) DeepCopy() *GRPCRouteFilter { + if in == nil { + return nil + } + out := new(GRPCRouteFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteList) DeepCopyInto(out *GRPCRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GRPCRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteList. +func (in *GRPCRouteList) DeepCopy() *GRPCRouteList { + if in == nil { + return nil + } + out := new(GRPCRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GRPCRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteMatch) DeepCopyInto(out *GRPCRouteMatch) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(GRPCMethodMatch) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]GRPCHeaderMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteMatch. +func (in *GRPCRouteMatch) DeepCopy() *GRPCRouteMatch { + if in == nil { + return nil + } + out := new(GRPCRouteMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteRule) DeepCopyInto(out *GRPCRouteRule) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(SectionName) + **out = **in + } + if in.Matches != nil { + in, out := &in.Matches, &out.Matches + *out = make([]GRPCRouteMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]GRPCRouteFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendRefs != nil { + in, out := &in.BackendRefs, &out.BackendRefs + *out = make([]GRPCBackendRef, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SessionPersistence != nil { + in, out := &in.SessionPersistence, &out.SessionPersistence + *out = new(SessionPersistence) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteRule. +func (in *GRPCRouteRule) DeepCopy() *GRPCRouteRule { + if in == nil { + return nil + } + out := new(GRPCRouteRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteSpec) DeepCopyInto(out *GRPCRouteSpec) { + *out = *in + in.CommonRouteSpec.DeepCopyInto(&out.CommonRouteSpec) + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]GRPCRouteRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteSpec. +func (in *GRPCRouteSpec) DeepCopy() *GRPCRouteSpec { + if in == nil { + return nil + } + out := new(GRPCRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteStatus) DeepCopyInto(out *GRPCRouteStatus) { + *out = *in + in.RouteStatus.DeepCopyInto(&out.RouteStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteStatus. +func (in *GRPCRouteStatus) DeepCopy() *GRPCRouteStatus { + if in == nil { + return nil + } + out := new(GRPCRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayBackendTLS) DeepCopyInto(out *GatewayBackendTLS) { + *out = *in + if in.ClientCertificateRef != nil { + in, out := &in.ClientCertificateRef, &out.ClientCertificateRef + *out = new(SecretObjectReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayBackendTLS. +func (in *GatewayBackendTLS) DeepCopy() *GatewayBackendTLS { + if in == nil { + return nil + } + out := new(GatewayBackendTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClass) DeepCopyInto(out *GatewayClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClass. +func (in *GatewayClass) DeepCopy() *GatewayClass { + if in == nil { + return nil + } + out := new(GatewayClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassList) DeepCopyInto(out *GatewayClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GatewayClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassList. +func (in *GatewayClassList) DeepCopy() *GatewayClassList { + if in == nil { + return nil + } + out := new(GatewayClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassSpec) DeepCopyInto(out *GatewayClassSpec) { + *out = *in + if in.ParametersRef != nil { + in, out := &in.ParametersRef, &out.ParametersRef + *out = new(ParametersReference) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassSpec. +func (in *GatewayClassSpec) DeepCopy() *GatewayClassSpec { + if in == nil { + return nil + } + out := new(GatewayClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassStatus) DeepCopyInto(out *GatewayClassStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SupportedFeatures != nil { + in, out := &in.SupportedFeatures, &out.SupportedFeatures + *out = make([]SupportedFeature, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassStatus. +func (in *GatewayClassStatus) DeepCopy() *GatewayClassStatus { + if in == nil { + return nil + } + out := new(GatewayClassStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayInfrastructure) DeepCopyInto(out *GatewayInfrastructure) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[LabelKey]LabelValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ParametersRef != nil { + in, out := &in.ParametersRef, &out.ParametersRef + *out = new(LocalParametersReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayInfrastructure. +func (in *GatewayInfrastructure) DeepCopy() *GatewayInfrastructure { + if in == nil { + return nil + } + out := new(GatewayInfrastructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + if in.Listeners != nil { + in, out := &in.Listeners, &out.Listeners + *out = make([]Listener, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]GatewaySpecAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Infrastructure != nil { + in, out := &in.Infrastructure, &out.Infrastructure + *out = new(GatewayInfrastructure) + (*in).DeepCopyInto(*out) + } + if in.AllowedListeners != nil { + in, out := &in.AllowedListeners, &out.AllowedListeners + *out = new(AllowedListeners) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(GatewayTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpecAddress) DeepCopyInto(out *GatewaySpecAddress) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(AddressType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpecAddress. +func (in *GatewaySpecAddress) DeepCopy() *GatewaySpecAddress { + if in == nil { + return nil + } + out := new(GatewaySpecAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]GatewayStatusAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Listeners != nil { + in, out := &in.Listeners, &out.Listeners + *out = make([]ListenerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayStatus. +func (in *GatewayStatus) DeepCopy() *GatewayStatus { + if in == nil { + return nil + } + out := new(GatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayStatusAddress) DeepCopyInto(out *GatewayStatusAddress) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(AddressType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayStatusAddress. +func (in *GatewayStatusAddress) DeepCopy() *GatewayStatusAddress { + if in == nil { + return nil + } + out := new(GatewayStatusAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayTLSConfig) DeepCopyInto(out *GatewayTLSConfig) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(GatewayBackendTLS) + (*in).DeepCopyInto(*out) + } + if in.Frontend != nil { + in, out := &in.Frontend, &out.Frontend + *out = new(FrontendTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayTLSConfig. +func (in *GatewayTLSConfig) DeepCopy() *GatewayTLSConfig { + if in == nil { + return nil + } + out := new(GatewayTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPAuthConfig) DeepCopyInto(out *HTTPAuthConfig) { + *out = *in + if in.AllowedRequestHeaders != nil { + in, out := &in.AllowedRequestHeaders, &out.AllowedRequestHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedResponseHeaders != nil { + in, out := &in.AllowedResponseHeaders, &out.AllowedResponseHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPAuthConfig. +func (in *HTTPAuthConfig) DeepCopy() *HTTPAuthConfig { + if in == nil { + return nil + } + out := new(HTTPAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendRef) DeepCopyInto(out *HTTPBackendRef) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]HTTPRouteFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendRef. +func (in *HTTPBackendRef) DeepCopy() *HTTPBackendRef { + if in == nil { + return nil + } + out := new(HTTPBackendRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCORSFilter) DeepCopyInto(out *HTTPCORSFilter) { + *out = *in + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]CORSOrigin, len(*in)) + copy(*out, *in) + } + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]HTTPMethodWithWildcard, len(*in)) + copy(*out, *in) + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]HTTPHeaderName, len(*in)) + copy(*out, *in) + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]HTTPHeaderName, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCORSFilter. +func (in *HTTPCORSFilter) DeepCopy() *HTTPCORSFilter { + if in == nil { + return nil + } + out := new(HTTPCORSFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPExternalAuthFilter) DeepCopyInto(out *HTTPExternalAuthFilter) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.GRPCAuthConfig != nil { + in, out := &in.GRPCAuthConfig, &out.GRPCAuthConfig + *out = new(GRPCAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPAuthConfig != nil { + in, out := &in.HTTPAuthConfig, &out.HTTPAuthConfig + *out = new(HTTPAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.ForwardBody != nil { + in, out := &in.ForwardBody, &out.ForwardBody + *out = new(ForwardBodyConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPExternalAuthFilter. +func (in *HTTPExternalAuthFilter) DeepCopy() *HTTPExternalAuthFilter { + if in == nil { + return nil + } + out := new(HTTPExternalAuthFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderFilter) DeepCopyInto(out *HTTPHeaderFilter) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderFilter. +func (in *HTTPHeaderFilter) DeepCopy() *HTTPHeaderFilter { + if in == nil { + return nil + } + out := new(HTTPHeaderFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderMatch) DeepCopyInto(out *HTTPHeaderMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(HeaderMatchType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderMatch. +func (in *HTTPHeaderMatch) DeepCopy() *HTTPHeaderMatch { + if in == nil { + return nil + } + out := new(HTTPHeaderMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPPathMatch) DeepCopyInto(out *HTTPPathMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(PathMatchType) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPathMatch. +func (in *HTTPPathMatch) DeepCopy() *HTTPPathMatch { + if in == nil { + return nil + } + out := new(HTTPPathMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPPathModifier) DeepCopyInto(out *HTTPPathModifier) { + *out = *in + if in.ReplaceFullPath != nil { + in, out := &in.ReplaceFullPath, &out.ReplaceFullPath + *out = new(string) + **out = **in + } + if in.ReplacePrefixMatch != nil { + in, out := &in.ReplacePrefixMatch, &out.ReplacePrefixMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPathModifier. +func (in *HTTPPathModifier) DeepCopy() *HTTPPathModifier { + if in == nil { + return nil + } + out := new(HTTPPathModifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPQueryParamMatch) DeepCopyInto(out *HTTPQueryParamMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(QueryParamMatchType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPQueryParamMatch. +func (in *HTTPQueryParamMatch) DeepCopy() *HTTPQueryParamMatch { + if in == nil { + return nil + } + out := new(HTTPQueryParamMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRequestMirrorFilter) DeepCopyInto(out *HTTPRequestMirrorFilter) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.Percent != nil { + in, out := &in.Percent, &out.Percent + *out = new(int32) + **out = **in + } + if in.Fraction != nil { + in, out := &in.Fraction, &out.Fraction + *out = new(Fraction) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestMirrorFilter. +func (in *HTTPRequestMirrorFilter) DeepCopy() *HTTPRequestMirrorFilter { + if in == nil { + return nil + } + out := new(HTTPRequestMirrorFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRequestRedirectFilter) DeepCopyInto(out *HTTPRequestRedirectFilter) { + *out = *in + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(PreciseHostname) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPPathModifier) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(PortNumber) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestRedirectFilter. +func (in *HTTPRequestRedirectFilter) DeepCopy() *HTTPRequestRedirectFilter { + if in == nil { + return nil + } + out := new(HTTPRequestRedirectFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRoute. +func (in *HTTPRoute) DeepCopy() *HTTPRoute { + if in == nil { + return nil + } + out := new(HTTPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteFilter) DeepCopyInto(out *HTTPRouteFilter) { + *out = *in + if in.RequestHeaderModifier != nil { + in, out := &in.RequestHeaderModifier, &out.RequestHeaderModifier + *out = new(HTTPHeaderFilter) + (*in).DeepCopyInto(*out) + } + if in.ResponseHeaderModifier != nil { + in, out := &in.ResponseHeaderModifier, &out.ResponseHeaderModifier + *out = new(HTTPHeaderFilter) + (*in).DeepCopyInto(*out) + } + if in.RequestMirror != nil { + in, out := &in.RequestMirror, &out.RequestMirror + *out = new(HTTPRequestMirrorFilter) + (*in).DeepCopyInto(*out) + } + if in.RequestRedirect != nil { + in, out := &in.RequestRedirect, &out.RequestRedirect + *out = new(HTTPRequestRedirectFilter) + (*in).DeepCopyInto(*out) + } + if in.URLRewrite != nil { + in, out := &in.URLRewrite, &out.URLRewrite + *out = new(HTTPURLRewriteFilter) + (*in).DeepCopyInto(*out) + } + if in.CORS != nil { + in, out := &in.CORS, &out.CORS + *out = new(HTTPCORSFilter) + (*in).DeepCopyInto(*out) + } + if in.ExternalAuth != nil { + in, out := &in.ExternalAuth, &out.ExternalAuth + *out = new(HTTPExternalAuthFilter) + (*in).DeepCopyInto(*out) + } + if in.ExtensionRef != nil { + in, out := &in.ExtensionRef, &out.ExtensionRef + *out = new(LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteFilter. +func (in *HTTPRouteFilter) DeepCopy() *HTTPRouteFilter { + if in == nil { + return nil + } + out := new(HTTPRouteFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteList) DeepCopyInto(out *HTTPRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HTTPRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteList. +func (in *HTTPRouteList) DeepCopy() *HTTPRouteList { + if in == nil { + return nil + } + out := new(HTTPRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatch) DeepCopyInto(out *HTTPRouteMatch) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPPathMatch) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HTTPHeaderMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]HTTPQueryParamMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(HTTPMethod) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatch. +func (in *HTTPRouteMatch) DeepCopy() *HTTPRouteMatch { + if in == nil { + return nil + } + out := new(HTTPRouteMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetry) DeepCopyInto(out *HTTPRouteRetry) { + *out = *in + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]HTTPRouteRetryStatusCode, len(*in)) + copy(*out, *in) + } + if in.Attempts != nil { + in, out := &in.Attempts, &out.Attempts + *out = new(int) + **out = **in + } + if in.Backoff != nil { + in, out := &in.Backoff, &out.Backoff + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetry. +func (in *HTTPRouteRetry) DeepCopy() *HTTPRouteRetry { + if in == nil { + return nil + } + out := new(HTTPRouteRetry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(SectionName) + **out = **in + } + if in.Matches != nil { + in, out := &in.Matches, &out.Matches + *out = make([]HTTPRouteMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]HTTPRouteFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendRefs != nil { + in, out := &in.BackendRefs, &out.BackendRefs + *out = make([]HTTPBackendRef, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeouts != nil { + in, out := &in.Timeouts, &out.Timeouts + *out = new(HTTPRouteTimeouts) + (*in).DeepCopyInto(*out) + } + if in.Retry != nil { + in, out := &in.Retry, &out.Retry + *out = new(HTTPRouteRetry) + (*in).DeepCopyInto(*out) + } + if in.SessionPersistence != nil { + in, out := &in.SessionPersistence, &out.SessionPersistence + *out = new(SessionPersistence) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRule. +func (in *HTTPRouteRule) DeepCopy() *HTTPRouteRule { + if in == nil { + return nil + } + out := new(HTTPRouteRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteSpec) DeepCopyInto(out *HTTPRouteSpec) { + *out = *in + in.CommonRouteSpec.DeepCopyInto(&out.CommonRouteSpec) + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]HTTPRouteRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteSpec. +func (in *HTTPRouteSpec) DeepCopy() *HTTPRouteSpec { + if in == nil { + return nil + } + out := new(HTTPRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteStatus) DeepCopyInto(out *HTTPRouteStatus) { + *out = *in + in.RouteStatus.DeepCopyInto(&out.RouteStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteStatus. +func (in *HTTPRouteStatus) DeepCopy() *HTTPRouteStatus { + if in == nil { + return nil + } + out := new(HTTPRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeouts) DeepCopyInto(out *HTTPRouteTimeouts) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(Duration) + **out = **in + } + if in.BackendRequest != nil { + in, out := &in.BackendRequest, &out.BackendRequest + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeouts. +func (in *HTTPRouteTimeouts) DeepCopy() *HTTPRouteTimeouts { + if in == nil { + return nil + } + out := new(HTTPRouteTimeouts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPURLRewriteFilter) DeepCopyInto(out *HTTPURLRewriteFilter) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(PreciseHostname) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPPathModifier) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPURLRewriteFilter. +func (in *HTTPURLRewriteFilter) DeepCopy() *HTTPURLRewriteFilter { + if in == nil { + return nil + } + out := new(HTTPURLRewriteFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Listener) DeepCopyInto(out *Listener) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(Hostname) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ListenerTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.AllowedRoutes != nil { + in, out := &in.AllowedRoutes, &out.AllowedRoutes + *out = new(AllowedRoutes) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener. +func (in *Listener) DeepCopy() *Listener { + if in == nil { + return nil + } + out := new(Listener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerNamespaces) DeepCopyInto(out *ListenerNamespaces) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(FromNamespaces) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerNamespaces. +func (in *ListenerNamespaces) DeepCopy() *ListenerNamespaces { + if in == nil { + return nil + } + out := new(ListenerNamespaces) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerStatus) DeepCopyInto(out *ListenerStatus) { + *out = *in + if in.SupportedKinds != nil { + in, out := &in.SupportedKinds, &out.SupportedKinds + *out = make([]RouteGroupKind, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerStatus. +func (in *ListenerStatus) DeepCopy() *ListenerStatus { + if in == nil { + return nil + } + out := new(ListenerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSConfig) DeepCopyInto(out *ListenerTLSConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(TLSModeType) + **out = **in + } + if in.CertificateRefs != nil { + in, out := &in.CertificateRefs, &out.CertificateRefs + *out = make([]SecretObjectReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSConfig. +func (in *ListenerTLSConfig) DeepCopy() *ListenerTLSConfig { + if in == nil { + return nil + } + out := new(ListenerTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalParametersReference) DeepCopyInto(out *LocalParametersReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalParametersReference. +func (in *LocalParametersReference) DeepCopy() *LocalParametersReference { + if in == nil { + return nil + } + out := new(LocalParametersReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalPolicyTargetReference) DeepCopyInto(out *LocalPolicyTargetReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalPolicyTargetReference. +func (in *LocalPolicyTargetReference) DeepCopy() *LocalPolicyTargetReference { + if in == nil { + return nil + } + out := new(LocalPolicyTargetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalPolicyTargetReferenceWithSectionName) DeepCopyInto(out *LocalPolicyTargetReferenceWithSectionName) { + *out = *in + out.LocalPolicyTargetReference = in.LocalPolicyTargetReference + if in.SectionName != nil { + in, out := &in.SectionName, &out.SectionName + *out = new(SectionName) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalPolicyTargetReferenceWithSectionName. +func (in *LocalPolicyTargetReferenceWithSectionName) DeepCopy() *LocalPolicyTargetReferenceWithSectionName { + if in == nil { + return nil + } + out := new(LocalPolicyTargetReferenceWithSectionName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespacedPolicyTargetReference) DeepCopyInto(out *NamespacedPolicyTargetReference) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedPolicyTargetReference. +func (in *NamespacedPolicyTargetReference) DeepCopy() *NamespacedPolicyTargetReference { + if in == nil { + return nil + } + out := new(NamespacedPolicyTargetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersReference) DeepCopyInto(out *ParametersReference) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersReference. +func (in *ParametersReference) DeepCopy() *ParametersReference { + if in == nil { + return nil + } + out := new(ParametersReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(Group) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(Kind) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } + if in.SectionName != nil { + in, out := &in.SectionName, &out.SectionName + *out = new(SectionName) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(PortNumber) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyAncestorStatus) DeepCopyInto(out *PolicyAncestorStatus) { + *out = *in + in.AncestorRef.DeepCopyInto(&out.AncestorRef) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAncestorStatus. +func (in *PolicyAncestorStatus) DeepCopy() *PolicyAncestorStatus { + if in == nil { + return nil + } + out := new(PolicyAncestorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { + *out = *in + if in.Ancestors != nil { + in, out := &in.Ancestors, &out.Ancestors + *out = make([]PolicyAncestorStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { + if in == nil { + return nil + } + out := new(PolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteGroupKind) DeepCopyInto(out *RouteGroupKind) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(Group) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteGroupKind. +func (in *RouteGroupKind) DeepCopy() *RouteGroupKind { + if in == nil { + return nil + } + out := new(RouteGroupKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteNamespaces) DeepCopyInto(out *RouteNamespaces) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(FromNamespaces) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteNamespaces. +func (in *RouteNamespaces) DeepCopy() *RouteNamespaces { + if in == nil { + return nil + } + out := new(RouteNamespaces) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteParentStatus) DeepCopyInto(out *RouteParentStatus) { + *out = *in + in.ParentRef.DeepCopyInto(&out.ParentRef) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteParentStatus. +func (in *RouteParentStatus) DeepCopy() *RouteParentStatus { + if in == nil { + return nil + } + out := new(RouteParentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { + *out = *in + if in.Parents != nil { + in, out := &in.Parents, &out.Parents + *out = make([]RouteParentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. +func (in *RouteStatus) DeepCopy() *RouteStatus { + if in == nil { + return nil + } + out := new(RouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretObjectReference) DeepCopyInto(out *SecretObjectReference) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(Group) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(Kind) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretObjectReference. +func (in *SecretObjectReference) DeepCopy() *SecretObjectReference { + if in == nil { + return nil + } + out := new(SecretObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionPersistence) DeepCopyInto(out *SessionPersistence) { + *out = *in + if in.SessionName != nil { + in, out := &in.SessionName, &out.SessionName + *out = new(string) + **out = **in + } + if in.AbsoluteTimeout != nil { + in, out := &in.AbsoluteTimeout, &out.AbsoluteTimeout + *out = new(Duration) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(Duration) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(SessionPersistenceType) + **out = **in + } + if in.CookieConfig != nil { + in, out := &in.CookieConfig, &out.CookieConfig + *out = new(CookieConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionPersistence. +func (in *SessionPersistence) DeepCopy() *SessionPersistence { + if in == nil { + return nil + } + out := new(SessionPersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAltName) DeepCopyInto(out *SubjectAltName) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAltName. +func (in *SubjectAltName) DeepCopy() *SubjectAltName { + if in == nil { + return nil + } + out := new(SubjectAltName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupportedFeature) DeepCopyInto(out *SupportedFeature) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupportedFeature. +func (in *SupportedFeature) DeepCopy() *SupportedFeature { + if in == nil { + return nil + } + out := new(SupportedFeature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(FrontendTLSValidation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSPortConfig) DeepCopyInto(out *TLSPortConfig) { + *out = *in + in.TLS.DeepCopyInto(&out.TLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSPortConfig. +func (in *TLSPortConfig) DeepCopy() *TLSPortConfig { + if in == nil { + return nil + } + out := new(TLSPortConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go new file mode 100644 index 00000000..1f390588 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go @@ -0,0 +1,78 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by register-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName specifies the group name used to register the objects. +const GroupName = "gateway.networking.k8s.io" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = metav1.GroupVersion{Group: GroupName, Version: "v1"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Deprecated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &BackendTLSPolicy{}, + &BackendTLSPolicyList{}, + &GRPCRoute{}, + &GRPCRouteList{}, + &Gateway{}, + &GatewayClass{}, + &GatewayClassList{}, + &GatewayList{}, + &HTTPRoute{}, + &HTTPRouteList{}, + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +}