diff --git a/Makefile b/Makefile index e298476..130dc91 100644 --- a/Makefile +++ b/Makefile @@ -127,6 +127,12 @@ build-installer: manifests generate kustomize ## Generate a consolidated YAML wi cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default > dist/install.yaml +KIND_CLUSTER_NAME ?= kind + +.PHONY: kind-load +kind-load: ## Loads the docker image into a local kind cluster. + kind load docker-image ${IMG} --name "$(KIND_CLUSTER_NAME)" + ##@ Deployment ifndef ignore-not-found @@ -134,20 +140,20 @@ ifndef ignore-not-found endif .PHONY: install -install: kubectl kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. +install: kubectl kustomize ## Install CRDs into the K8s cluster specified by $KUBECONFIG. $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - .PHONY: uninstall -uninstall: kubectl kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. +uninstall: kubectl kustomize ## Uninstall CRDs from the K8s cluster specified by $KUBECONFIG. Call with ignore-not-found=true to ignore resource not found errors during deletion. $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy -deploy: kubectl kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. +deploy: kubectl kustomize ## Deploy controller to the K8s cluster specified by $KUBECONFIG. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - .PHONY: undeploy -undeploy: kubectl kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. +undeploy: kubectl kustomize ## Undeploy controller from the K8s cluster specified by $KUBECONFIG. Call with ignore-not-found=true to ignore resource not found errors during deletion. $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - ##@ Dependencies diff --git a/cmd/main.go b/cmd/main.go index a322066..aa70ce6 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -39,6 +39,7 @@ import ( "github.com/kcp-dev/kcp-operator/internal/controller/cacheserver" "github.com/kcp-dev/kcp-operator/internal/controller/frontproxy" "github.com/kcp-dev/kcp-operator/internal/controller/kubeconfig" + kubeconfigrbac "github.com/kcp-dev/kcp-operator/internal/controller/kubeconfig-rbac" "github.com/kcp-dev/kcp-operator/internal/controller/rootshard" "github.com/kcp-dev/kcp-operator/internal/controller/shard" "github.com/kcp-dev/kcp-operator/internal/reconciling" @@ -188,6 +189,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Kubeconfig") os.Exit(1) } + if err = (&kubeconfigrbac.KubeconfigRBACReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "KubeconfigRBAC") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/operator.kcp.io_kubeconfigs.yaml b/config/crd/bases/operator.kcp.io_kubeconfigs.yaml index bacfd38..fb62c37 100644 --- a/config/crd/bases/operator.kcp.io_kubeconfigs.yaml +++ b/config/crd/bases/operator.kcp.io_kubeconfigs.yaml @@ -39,6 +39,25 @@ spec: spec: description: KubeconfigSpec defines the desired state of Kubeconfig. properties: + authorization: + description: Authorization allows to provision permissions for this + kubeconfig. + properties: + clusterRoleBindings: + properties: + clusterRoles: + items: + type: string + type: array + workspacePath: + type: string + required: + - clusterRoles + - workspacePath + type: object + required: + - clusterRoleBindings + type: object certificateTemplate: description: |- CertificateTemplate allows to customize the properties on the generated diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index c54dbc1..88133df 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -5,3 +5,4 @@ resources: images: - name: controller newName: ghcr.io/kcp-dev/kcp-operator + newTag: e2e diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f301f10..3551ef0 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -50,19 +50,15 @@ spec: # - linux securityContext: runAsNonRoot: true - # TODO(user): For common cases that do not require escalating privileges - # it is recommended to ensure that all your Pods/Containers are restrictive. - # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted - # Please uncomment the following code if your project does NOT have to work on old Kubernetes - # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). - # seccompProfile: - # type: RuntimeDefault + seccompProfile: + type: RuntimeDefault containers: - command: - /manager args: - --leader-elect - --health-probe-bind-address=:8081 + - --zap-time-encoding=iso8601 image: controller:latest name: manager securityContext: diff --git a/docs/content/contributing/local-setup.md b/docs/content/contributing/local-setup.md index 457b8c5..9a6fab9 100644 --- a/docs/content/contributing/local-setup.md +++ b/docs/content/contributing/local-setup.md @@ -58,19 +58,20 @@ run the operator as a binary. Build the image: ```sh -make docker-build IMG=ghcr.io/kcp-dev/kcp-operator:1 +export IMG=ghcr.io/kcp-dev/kcp-operator:local +make docker-build ``` Load the image into the kind cluster: ```sh -kind load docker-image ghcr.io/kcp-dev/kcp-operator:1 +kind load docker-image "$IMG" ``` Deploy the operator manifests into the cluster: ```sh -make deploy IMG=ghcr.io/kcp-dev/kcp-operator:1 +make deploy ``` ### Option 2: Run Operator Directly @@ -87,12 +88,12 @@ Then start the operator via `go run`: go run ./cmd/main.go ``` -## Create kcp Instance +## Create kcp Instance Now you can create a root shard: ```sh -kubectl apply -f config/samples/operator.kcp.io_v1alpha1_rootshard.yaml +kubectl apply -f config/samples/operator.kcp.io_v1alpha1_rootshard.yaml ``` Create the additional shard: diff --git a/hack/run-e2e-tests.sh b/hack/run-e2e-tests.sh index dd4a6c0..0097c3d 100755 --- a/hack/run-e2e-tests.sh +++ b/hack/run-e2e-tests.sh @@ -16,7 +16,7 @@ set -euo pipefail -KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-e2e}" +export KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-e2e}" DATA_DIR=".e2e-$KIND_CLUSTER_NAME" OPERATOR_PID=0 PROTOKOL_PID=0 @@ -35,12 +35,6 @@ kind create cluster --name "$KIND_CLUSTER_NAME" chmod 600 "$KUBECONFIG" teardown_kind() { - if [[ $OPERATOR_PID -gt 0 ]]; then - echo "Stopping kcp-operator…" - kill -TERM $OPERATOR_PID - wait $OPERATOR_PID - fi - if [[ $PROTOKOL_PID -gt 0 ]]; then echo "Stopping protokol…" kill -TERM $PROTOKOL_PID @@ -60,7 +54,7 @@ echo "Kubeconfig is in $KUBECONFIG." # deploying operator CRDs echo "Deploying operator CRDs…" -kubectl apply --kustomize config/crd +make --no-print-directory install # deploying cert-manager echo "Deploying cert-manager…" @@ -79,16 +73,10 @@ _tools/helm upgrade \ kubectl apply --filename hack/ci/testdata/clusterissuer.yaml -# start the operator locally -echo "Starting kcp-operator…" -_build/manager \ - -kubeconfig "$KUBECONFIG" \ - -zap-log-level debug \ - -zap-encoder console \ - -zap-time-encoding iso8601 \ - >"$DATA_DIR/kcp-operator.log" 2>&1 & -OPERATOR_PID=$! -echo "Running as process $OPERATOR_PID." +# build operator image and deploy it into kind +echo "Building and deploying kcp-operator…" +export IMG="ghcr.io/kcp-dev/kcp-operator:e2e" +make --no-print-directory docker-build kind-load deploy if command -v protokol &> /dev/null; then protokol --namespace 'e2e-*' --output "$DATA_DIR/kind-logs" 2>/dev/null & diff --git a/internal/client/clients.go b/internal/client/clients.go new file mode 100644 index 0000000..305966f --- /dev/null +++ b/internal/client/clients.go @@ -0,0 +1,140 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/kcp-dev/logicalcluster/v3" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kcp-dev/kcp-operator/internal/resources" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func NewRootShardClient(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) { + baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetRootShardServiceName(rootShard), rootShard.Namespace) + + if !cluster.Empty() { + baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String()) + } + + return newClient(ctx, c, baseUrl, scheme, rootShard, nil, nil) +} + +func NewRootShardProxyClient(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) { + baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetRootShardProxyServiceName(rootShard), rootShard.Namespace) + + if !cluster.Empty() { + baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String()) + } + + return newClient(ctx, c, baseUrl, scheme, rootShard, nil, nil) +} + +func NewShardClient(ctx context.Context, c ctrlruntimeclient.Client, shard *operatorv1alpha1.Shard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) { + baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetShardServiceName(shard), shard.Namespace) + + if !cluster.Empty() { + baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String()) + } + + return newClient(ctx, c, baseUrl, scheme, nil, shard, nil) +} + +func newClient( + ctx context.Context, + c ctrlruntimeclient.Client, + url string, + scheme *runtime.Scheme, + // only one of these three should be provided, the others nil + rootShard *operatorv1alpha1.RootShard, + shard *operatorv1alpha1.Shard, + frontProxy *operatorv1alpha1.FrontProxy, +) (ctrlruntimeclient.Client, error) { + tlsConfig, err := getTLSConfig(ctx, c, rootShard, shard, frontProxy) + if err != nil { + return nil, fmt.Errorf("failed to determine TLS settings: %w", err) + } + + cfg := &rest.Config{ + Host: url, + TLSClientConfig: tlsConfig, + } + + return ctrlruntimeclient.New(cfg, ctrlruntimeclient.Options{Scheme: scheme}) +} + +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get + +func getTLSConfig(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, shard *operatorv1alpha1.Shard, frontProxy *operatorv1alpha1.FrontProxy) (rest.TLSClientConfig, error) { + rootShard, err := getRootShard(ctx, c, rootShard, shard, frontProxy) + if err != nil { + return rest.TLSClientConfig{}, fmt.Errorf("failed to determine effective RootShard: %w", err) + } + + // get the secret for the kcp-operator client cert + key := types.NamespacedName{ + Namespace: rootShard.Namespace, + Name: resources.GetRootShardCertificateName(rootShard, operatorv1alpha1.OperatorCertificate), + } + + certSecret := &corev1.Secret{} + if err := c.Get(ctx, key, certSecret); err != nil { + return rest.TLSClientConfig{}, fmt.Errorf("failed to get root shard proxy Secret: %w", err) + } + + return rest.TLSClientConfig{ + CAData: certSecret.Data["ca.crt"], + CertData: certSecret.Data["tls.crt"], + KeyData: certSecret.Data["tls.key"], + }, nil +} + +// +kubebuilder:rbac:groups=operator.kcp.io,resources=rootshards,verbs=get + +func getRootShard(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, shard *operatorv1alpha1.Shard, frontProxy *operatorv1alpha1.FrontProxy) (*operatorv1alpha1.RootShard, error) { + if rootShard != nil { + return rootShard, nil + } + + var ref *corev1.LocalObjectReference + + switch { + case shard != nil: + ref = shard.Spec.RootShard.Reference + + case frontProxy != nil: + ref = frontProxy.Spec.RootShard.Reference + + default: + panic("Must be called with either RootShard, Shard or FrontProxy.") + } + + rootShard = &operatorv1alpha1.RootShard{} + if err := c.Get(ctx, types.NamespacedName{Namespace: rootShard.Namespace, Name: ref.Name}, rootShard); err != nil { + return nil, fmt.Errorf("failed to get RootShard: %w", err) + } + + return rootShard, nil +} diff --git a/internal/client/frontproxy.go b/internal/client/frontproxy.go new file mode 100644 index 0000000..23534aa --- /dev/null +++ b/internal/client/frontproxy.go @@ -0,0 +1,71 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "errors" + "fmt" + + "github.com/kcp-dev/logicalcluster/v3" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +// +kubebuilder:rbac:groups=operator.kcp.io,resources=rootshards;shards;frontproxies,verbs=get + +func NewInternalKubeconfigClient(ctx context.Context, c ctrlruntimeclient.Client, kubeconfig *operatorv1alpha1.Kubeconfig, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) { + target := kubeconfig.Spec.Target + + switch { + case target.RootShardRef != nil: + rootShard := &operatorv1alpha1.RootShard{} + if err := c.Get(ctx, types.NamespacedName{Name: target.RootShardRef.Name, Namespace: kubeconfig.Namespace}, rootShard); err != nil { + return nil, fmt.Errorf("failed to get RootShard: %w", err) + } + + return NewRootShardClient(ctx, c, rootShard, cluster, scheme) + + case target.ShardRef != nil: + shard := &operatorv1alpha1.Shard{} + if err := c.Get(ctx, types.NamespacedName{Name: target.ShardRef.Name, Namespace: kubeconfig.Namespace}, shard); err != nil { + return nil, fmt.Errorf("failed to get Shard: %w", err) + } + + return NewShardClient(ctx, c, shard, cluster, scheme) + + case target.FrontProxyRef != nil: + frontProxy := &operatorv1alpha1.FrontProxy{} + if err := c.Get(ctx, types.NamespacedName{Name: target.FrontProxyRef.Name, Namespace: kubeconfig.Namespace}, frontProxy); err != nil { + return nil, fmt.Errorf("failed to get FrontProxy: %w", err) + } + + rootShard := &operatorv1alpha1.RootShard{} + if err := c.Get(ctx, types.NamespacedName{Name: frontProxy.Spec.RootShard.Reference.Name, Namespace: kubeconfig.Namespace}, rootShard); err != nil { + return nil, fmt.Errorf("failed to get RootShard: %w", err) + } + + return NewRootShardProxyClient(ctx, c, rootShard, cluster, scheme) + + default: + return nil, errors.New("no valid target configured in Kubeconfig: neither rootShard, shard nor frontProxy ref set") + } +} diff --git a/internal/controller/cacheserver/controller.go b/internal/controller/cacheserver/controller.go index 8c95b63..23936eb 100644 --- a/internal/controller/cacheserver/controller.go +++ b/internal/controller/cacheserver/controller.go @@ -57,6 +57,7 @@ func (r *CacheServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) // SetupWithManager sets up the controller with the Manager. func (r *CacheServerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). + Named("cacheserver"). For(&operatorv1alpha1.CacheServer{}). Complete(r) } diff --git a/internal/controller/frontproxy/controller.go b/internal/controller/frontproxy/controller.go index 84f663c..d833e6d 100644 --- a/internal/controller/frontproxy/controller.go +++ b/internal/controller/frontproxy/controller.go @@ -71,6 +71,7 @@ func (r *FrontProxyReconciler) SetupWithManager(mgr ctrl.Manager) error { }) return ctrl.NewControllerManagedBy(mgr). + Named("frontproxy"). For(&operatorv1alpha1.FrontProxy{}). Owns(&appsv1.Deployment{}). Owns(&corev1.ConfigMap{}). diff --git a/internal/controller/kubeconfig-rbac/controller.go b/internal/controller/kubeconfig-rbac/controller.go new file mode 100644 index 0000000..ecd22eb --- /dev/null +++ b/internal/controller/kubeconfig-rbac/controller.go @@ -0,0 +1,235 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfigrbac + +import ( + "context" + "fmt" + "slices" + + "github.com/kcp-dev/logicalcluster/v3" + "k8c.io/reconciler/pkg/reconciling" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + ctrl "sigs.k8s.io/controller-runtime" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/kcp-dev/kcp-operator/internal/client" + "github.com/kcp-dev/kcp-operator/internal/resources/kubeconfig" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +const cleanupFinalizer = "operator.kcp.io/cleanup-rbac" + +// KubeconfigRBACReconciler reconciles a Kubeconfig object +type KubeconfigRBACReconciler struct { + ctrlruntimeclient.Client + Scheme *runtime.Scheme +} + +// SetupWithManager sets up the controller with the Manager. +func (r *KubeconfigRBACReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&operatorv1alpha1.Kubeconfig{}). + Named("kubeconfig-rbac"). + Complete(r) +} + +// +kubebuilder:rbac:groups=operator.kcp.io,resources=kubeconfigs,verbs=get;update;patch +// +kubebuilder:rbac:groups=operator.kcp.io,resources=kubeconfigs/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *KubeconfigRBACReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.V(4).Info("Reconciling") + + config := &operatorv1alpha1.Kubeconfig{} + if err := r.Get(ctx, req.NamespacedName, config); err != nil { + return ctrl.Result{}, ctrlruntimeclient.IgnoreNotFound(err) + } + + err := r.reconcile(ctx, config) + + return ctrl.Result{}, err +} + +func (r *KubeconfigRBACReconciler) reconcile(ctx context.Context, config *operatorv1alpha1.Kubeconfig) error { + if config.DeletionTimestamp != nil { + return r.handleDeletion(ctx, config) + } + + // NB: Reconciling a Kubeconfig assumes that the authz settings are immutable, i.e. it is not + // possible to first configure RBAC for workspace A and then update the Kubeconfig to mean workspace B. + + // No auth configured right now and since there is no finalizer, we also have nothing to + // potentially clean up, hence we're done here. + if config.Spec.Authorization == nil && !slices.Contains(config.Finalizers, cleanupFinalizer) { + return nil + } + + // If there is any kind of authorization configured, first we ensure our own finalizer. + if config.Spec.Authorization != nil { + updated, err := r.ensureFinalizer(ctx, config) + if err != nil { + return fmt.Errorf("failed to ensure cleanup finalizer: %w", err) + } + + if updated { + return nil // will requeue because we changed the object + } + } + + // Make sure whatever is in the workspace matches what is configured in the Kubeconfig + if err := r.reconcileBindings(ctx, config); err != nil { + return fmt.Errorf("failed to ensure ClusterRoleBindings: %w", err) + } + + // If nothing is configured, now it the perfect time to remove our finalizer again + // so that for future reconciliations, we quickly know that we can ignore this Kubeconfig. + if config.Spec.Authorization == nil { + if err := r.removeFinalizer(ctx, config); err != nil { + return fmt.Errorf("failed to remove cleanup finalizer: %w", err) + } + } + + return nil +} + +func (r *KubeconfigRBACReconciler) reconcileBindings(ctx context.Context, kc *operatorv1alpha1.Kubeconfig) error { + targetClient, err := client.NewInternalKubeconfigClient(ctx, r.Client, kc, logicalcluster.Name(kc.Spec.Authorization.ClusterRoleBindings.WorkspacePath), nil) + if err != nil { + return fmt.Errorf("failed to create client to kubeconfig target: %w", err) + } + + // find all existing bindings + ownerLabels := kubeconfig.OwnerLabels(kc) + crbList := &rbacv1.ClusterRoleBindingList{} + if err := targetClient.List(ctx, crbList, ctrlruntimeclient.MatchingLabels(ownerLabels)); err != nil { + return fmt.Errorf("failed to list existing ClusterRoleBindings: %w", err) + } + + // delete those not configured in the kubeconfig anymore + var desiredBindings sets.Set[string] + if a := kc.Spec.Authorization; a != nil { + desiredBindings = sets.New(a.ClusterRoleBindings.ClusterRoles...) + } + + logger := log.FromContext(ctx) + + for _, crb := range crbList.Items { + roleName := crb.RoleRef.Name + + if !desiredBindings.Has(roleName) { + logger.V(2).WithValues("name", crb.Name, "clusterrole", roleName).Info("Deleting overhanging ClusterRoleBinding") + + if err := targetClient.Delete(ctx, &crb); err != nil { + return fmt.Errorf("failed to delete overhanging ClusterRoleBinding %s: %w", crb.Name, err) + } + } + } + + // create reconcilers for each intended binding + subject := rbacv1.Subject{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Group", + Name: kubeconfig.KubeconfigGroup(kc), + } + + reconcilers := make([]reconciling.NamedClusterRoleBindingReconcilerFactory, 0, desiredBindings.Len()) + for _, roleName := range sets.List(desiredBindings) { + reconcilers = append(reconcilers, kubeconfig.ClusterRoleBindingReconciler(kc, roleName, subject)) + } + + if err := reconciling.ReconcileClusterRoleBindings(ctx, reconcilers, "", targetClient); err != nil { + return fmt.Errorf("failed to ensure ClusterRoleBindings: %w", err) + } + + return nil +} + +func (r *KubeconfigRBACReconciler) handleDeletion(ctx context.Context, kc *operatorv1alpha1.Kubeconfig) error { + // Did we already perform our cleanup or did this kubeconfig never have any bindings? + if !slices.Contains(kc.Finalizers, cleanupFinalizer) { + return nil + } + + targetClient, err := client.NewInternalKubeconfigClient(ctx, r.Client, kc, logicalcluster.Name(kc.Spec.Authorization.ClusterRoleBindings.WorkspacePath), nil) + if err != nil { + return fmt.Errorf("failed to create client to kubeconfig target: %w", err) + } + + // find all existing bindings + ownerLabels := kubeconfig.OwnerLabels(kc) + crbList := &rbacv1.ClusterRoleBindingList{} + if err := targetClient.List(ctx, crbList, ctrlruntimeclient.MatchingLabels(ownerLabels)); err != nil { + return fmt.Errorf("failed to list existing ClusterRoleBindings: %w", err) + } + + // delete all of them + logger := log.FromContext(ctx) + + for _, crb := range crbList.Items { + logger.V(2).WithValues("name", crb.Name).Info("Deleting ClusterRoleBinding") + + if err := targetClient.Delete(ctx, &crb); err != nil { + return fmt.Errorf("failed to delete ClusterRoleBinding %s: %w", crb.Name, err) + } + } + + // when all are gone, remove the finalizer + if err := r.removeFinalizer(ctx, kc); err != nil { + return fmt.Errorf("failed to remove cleanup finalizer: %w", err) + } + + return nil +} + +func (r *KubeconfigRBACReconciler) ensureFinalizer(ctx context.Context, config *operatorv1alpha1.Kubeconfig) (updated bool, err error) { + finalizers := sets.New(config.GetFinalizers()...) + if finalizers.Has(cleanupFinalizer) { + return false, nil + } + + original := config.DeepCopy() + + finalizers.Insert(cleanupFinalizer) + config.SetFinalizers(sets.List(finalizers)) + + if err := r.Patch(ctx, config, ctrlruntimeclient.MergeFrom(original)); err != nil { + return false, err + } + + return true, nil +} + +func (r *KubeconfigRBACReconciler) removeFinalizer(ctx context.Context, config *operatorv1alpha1.Kubeconfig) error { + finalizers := sets.New(config.GetFinalizers()...) + if !finalizers.Has(cleanupFinalizer) { + return nil + } + + original := config.DeepCopy() + + finalizers.Delete(cleanupFinalizer) + config.SetFinalizers(sets.List(finalizers)) + + return r.Patch(ctx, config, ctrlruntimeclient.MergeFrom(original)) +} diff --git a/internal/controller/kubeconfig-rbac/controller_test.go b/internal/controller/kubeconfig-rbac/controller_test.go new file mode 100644 index 0000000..07127ae --- /dev/null +++ b/internal/controller/kubeconfig-rbac/controller_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfigrbac + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimefakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kcp-dev/kcp-operator/internal/controller/util" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func TestReconciling(t *testing.T) { + const namespace = "kubeconfig-tests" + + testcases := []struct { + name string + rootShard *operatorv1alpha1.RootShard + kubeConfig *operatorv1alpha1.Kubeconfig + }{ + { + name: "vanilla", + rootShard: &operatorv1alpha1.RootShard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rooty", + Namespace: namespace, + }, + Spec: operatorv1alpha1.RootShardSpec{ + External: operatorv1alpha1.ExternalConfig{ + Hostname: "example.kcp.io", + Port: 6443, + }, + CommonShardSpec: operatorv1alpha1.CommonShardSpec{ + Etcd: operatorv1alpha1.EtcdConfig{ + Endpoints: []string{"https://localhost:2379"}, + }, + }, + }, + }, + kubeConfig: &operatorv1alpha1.Kubeconfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "confy", + Namespace: namespace, + }, + Spec: operatorv1alpha1.KubeconfigSpec{ + Validity: metav1.Duration{Duration: 24 * time.Hour}, + SecretRef: corev1.LocalObjectReference{ + Name: "confy-secret", + }, + Target: operatorv1alpha1.KubeconfigTarget{ + RootShardRef: &corev1.LocalObjectReference{ + Name: "rooty", + }, + }, + }, + }, + }, + } + + scheme := util.GetTestScheme() + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + client := ctrlruntimefakeclient. + NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(testcase.rootShard). + WithObjects(testcase.rootShard, testcase.kubeConfig). + Build() + + ctx := context.Background() + + controllerReconciler := &KubeconfigRBACReconciler{ + Client: client, + Scheme: client.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: ctrlruntimeclient.ObjectKeyFromObject(testcase.kubeConfig), + }) + require.NoError(t, err) + }) + } +} diff --git a/internal/controller/kubeconfig/controller.go b/internal/controller/kubeconfig/controller.go index ca20a94..53b54b4 100644 --- a/internal/controller/kubeconfig/controller.go +++ b/internal/controller/kubeconfig/controller.go @@ -49,6 +49,7 @@ type KubeconfigReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *KubeconfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). + Named("kubeconfig"). For(&operatorv1alpha1.Kubeconfig{}). Owns(&corev1.Secret{}). Owns(&certmanagerv1.Certificate{}). diff --git a/internal/controller/rootshard/controller.go b/internal/controller/rootshard/controller.go index ef3f7fa..17315cf 100644 --- a/internal/controller/rootshard/controller.go +++ b/internal/controller/rootshard/controller.go @@ -71,6 +71,7 @@ func (r *RootShardReconciler) SetupWithManager(mgr ctrl.Manager) error { }) return ctrl.NewControllerManagedBy(mgr). + Named("rootshard"). For(&operatorv1alpha1.RootShard{}). Owns(&appsv1.Deployment{}). Owns(&corev1.ConfigMap{}). diff --git a/internal/controller/shard/controller.go b/internal/controller/shard/controller.go index fdffb14..b1d173d 100644 --- a/internal/controller/shard/controller.go +++ b/internal/controller/shard/controller.go @@ -73,6 +73,7 @@ func (r *ShardReconciler) SetupWithManager(mgr ctrl.Manager) error { }) return ctrl.NewControllerManagedBy(mgr). + Named("shard"). For(&operatorv1alpha1.Shard{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Secret{}). diff --git a/internal/kubernetes/metadata.go b/internal/kubernetes/metadata.go index 50154ae..e109bf1 100644 --- a/internal/kubernetes/metadata.go +++ b/internal/kubernetes/metadata.go @@ -24,24 +24,20 @@ import ( func EnsureLabels(o metav1.Object, toEnsure map[string]string) { labels := maps.Clone(o.GetLabels()) - if labels == nil { labels = make(map[string]string) } - for key, value := range toEnsure { - labels[key] = value - } + + maps.Copy(labels, toEnsure) o.SetLabels(labels) } func EnsureAnnotations(o metav1.Object, toEnsure map[string]string) { annotations := maps.Clone(o.GetAnnotations()) - if annotations == nil { annotations = make(map[string]string) } - for key, value := range toEnsure { - annotations[key] = value - } + + maps.Copy(annotations, toEnsure) o.SetAnnotations(annotations) } diff --git a/internal/resources/frontproxy/reconciler.go b/internal/resources/frontproxy/reconciler.go index 3fc4da4..3fae8c9 100644 --- a/internal/resources/frontproxy/reconciler.go +++ b/internal/resources/frontproxy/reconciler.go @@ -55,6 +55,10 @@ func NewRootShardProxy(rootShard *operatorv1alpha1.RootShard) *reconciler { } } +// +kubebuilder:rbac:groups=core,resources=configmaps;secrets;services,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;update;patch +// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;update;patch + func (r *reconciler) Reconcile(ctx context.Context, client ctrlruntimeclient.Client, namespace string) error { var errs []error diff --git a/internal/resources/kubeconfig/rbac.go b/internal/resources/kubeconfig/rbac.go new file mode 100644 index 0000000..ff39bda --- /dev/null +++ b/internal/resources/kubeconfig/rbac.go @@ -0,0 +1,58 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "fmt" + + "k8c.io/reconciler/pkg/reconciling" + + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/kcp-dev/kcp-operator/internal/kubernetes" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func OwnerLabels(owner *operatorv1alpha1.Kubeconfig) map[string]string { + return map[string]string{ + "operator.kcp.io/kubeconfig": string(owner.UID), + } +} + +func KubeconfigGroup(kc *operatorv1alpha1.Kubeconfig) string { + return fmt.Sprintf("kubeconfig:%s", kc.Name) +} + +func ClusterRoleBindingReconciler(owner *operatorv1alpha1.Kubeconfig, clusterRole string, subject rbacv1.Subject) reconciling.NamedClusterRoleBindingReconcilerFactory { + name := fmt.Sprintf("%s:%s", owner.UID, clusterRole) + + return func() (string, reconciling.ClusterRoleBindingReconciler) { + return name, func(crb *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, error) { + kubernetes.EnsureLabels(crb, OwnerLabels(owner)) + + crb.RoleRef = rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRole, + } + + crb.Subjects = []rbacv1.Subject{subject} + + return crb, nil + } + } +} diff --git a/sdk/apis/operator/v1alpha1/kubeconfig_types.go b/sdk/apis/operator/v1alpha1/kubeconfig_types.go index 35bc954..f023511 100644 --- a/sdk/apis/operator/v1alpha1/kubeconfig_types.go +++ b/sdk/apis/operator/v1alpha1/kubeconfig_types.go @@ -42,6 +42,9 @@ type KubeconfigSpec struct { // CertificateTemplate allows to customize the properties on the generated // certificate for this kubeconfig. CertificateTemplate *CertificateTemplate `json:"certificateTemplate,omitempty"` + + // Authorization allows to provision permissions for this kubeconfig. + Authorization *KubeconfigAuthorization `json:"authorization,omitempty"` } type KubeconfigTarget struct { @@ -50,6 +53,15 @@ type KubeconfigTarget struct { FrontProxyRef *corev1.LocalObjectReference `json:"frontProxyRef,omitempty"` } +type KubeconfigAuthorization struct { + ClusterRoleBindings KubeconfigClusterRoleBindings `json:"clusterRoleBindings"` +} + +type KubeconfigClusterRoleBindings struct { + WorkspacePath string `json:"workspacePath"` + ClusterRoles []string `json:"clusterRoles"` +} + // KubeconfigStatus defines the observed state of Kubeconfig type KubeconfigStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster diff --git a/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go index de5e57e..666bb05 100644 --- a/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go +++ b/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go @@ -843,6 +843,42 @@ func (in *Kubeconfig) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigAuthorization) DeepCopyInto(out *KubeconfigAuthorization) { + *out = *in + in.ClusterRoleBindings.DeepCopyInto(&out.ClusterRoleBindings) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigAuthorization. +func (in *KubeconfigAuthorization) DeepCopy() *KubeconfigAuthorization { + if in == nil { + return nil + } + out := new(KubeconfigAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigClusterRoleBindings) DeepCopyInto(out *KubeconfigClusterRoleBindings) { + *out = *in + if in.ClusterRoles != nil { + in, out := &in.ClusterRoles, &out.ClusterRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigClusterRoleBindings. +func (in *KubeconfigClusterRoleBindings) DeepCopy() *KubeconfigClusterRoleBindings { + if in == nil { + return nil + } + out := new(KubeconfigClusterRoleBindings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigList) DeepCopyInto(out *KubeconfigList) { *out = *in @@ -891,6 +927,11 @@ func (in *KubeconfigSpec) DeepCopyInto(out *KubeconfigSpec) { *out = new(CertificateTemplate) (*in).DeepCopyInto(*out) } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(KubeconfigAuthorization) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigSpec. diff --git a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorization.go b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorization.go new file mode 100644 index 0000000..e8a0f7f --- /dev/null +++ b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorization.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// KubeconfigAuthorizationApplyConfiguration represents a declarative configuration of the KubeconfigAuthorization type for use +// with apply. +type KubeconfigAuthorizationApplyConfiguration struct { + ClusterRoleBindings *KubeconfigClusterRoleBindingsApplyConfiguration `json:"clusterRoleBindings,omitempty"` +} + +// KubeconfigAuthorizationApplyConfiguration constructs a declarative configuration of the KubeconfigAuthorization type for use with +// apply. +func KubeconfigAuthorization() *KubeconfigAuthorizationApplyConfiguration { + return &KubeconfigAuthorizationApplyConfiguration{} +} + +// WithClusterRoleBindings sets the ClusterRoleBindings field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterRoleBindings field is set to the value of the last call. +func (b *KubeconfigAuthorizationApplyConfiguration) WithClusterRoleBindings(value *KubeconfigClusterRoleBindingsApplyConfiguration) *KubeconfigAuthorizationApplyConfiguration { + b.ClusterRoleBindings = value + return b +} diff --git a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigclusterrolebindings.go b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigclusterrolebindings.go new file mode 100644 index 0000000..a8da550 --- /dev/null +++ b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigclusterrolebindings.go @@ -0,0 +1,50 @@ +/* +Copyright 2024 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// KubeconfigClusterRoleBindingsApplyConfiguration represents a declarative configuration of the KubeconfigClusterRoleBindings type for use +// with apply. +type KubeconfigClusterRoleBindingsApplyConfiguration struct { + WorkspacePath *string `json:"workspacePath,omitempty"` + ClusterRoles []string `json:"clusterRoles,omitempty"` +} + +// KubeconfigClusterRoleBindingsApplyConfiguration constructs a declarative configuration of the KubeconfigClusterRoleBindings type for use with +// apply. +func KubeconfigClusterRoleBindings() *KubeconfigClusterRoleBindingsApplyConfiguration { + return &KubeconfigClusterRoleBindingsApplyConfiguration{} +} + +// WithWorkspacePath sets the WorkspacePath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WorkspacePath field is set to the value of the last call. +func (b *KubeconfigClusterRoleBindingsApplyConfiguration) WithWorkspacePath(value string) *KubeconfigClusterRoleBindingsApplyConfiguration { + b.WorkspacePath = &value + return b +} + +// WithClusterRoles adds the given value to the ClusterRoles field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClusterRoles field. +func (b *KubeconfigClusterRoleBindingsApplyConfiguration) WithClusterRoles(values ...string) *KubeconfigClusterRoleBindingsApplyConfiguration { + for i := range values { + b.ClusterRoles = append(b.ClusterRoles, values[i]) + } + return b +} diff --git a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigspec.go b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigspec.go index 4830a5c..9503933 100644 --- a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigspec.go +++ b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigspec.go @@ -26,12 +26,13 @@ import ( // KubeconfigSpecApplyConfiguration represents a declarative configuration of the KubeconfigSpec type for use // with apply. type KubeconfigSpecApplyConfiguration struct { - Target *KubeconfigTargetApplyConfiguration `json:"target,omitempty"` - Username *string `json:"username,omitempty"` - Groups []string `json:"groups,omitempty"` - Validity *v1.Duration `json:"validity,omitempty"` - SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` - CertificateTemplate *CertificateTemplateApplyConfiguration `json:"certificateTemplate,omitempty"` + Target *KubeconfigTargetApplyConfiguration `json:"target,omitempty"` + Username *string `json:"username,omitempty"` + Groups []string `json:"groups,omitempty"` + Validity *v1.Duration `json:"validity,omitempty"` + SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` + CertificateTemplate *CertificateTemplateApplyConfiguration `json:"certificateTemplate,omitempty"` + Authorization *KubeconfigAuthorizationApplyConfiguration `json:"authorization,omitempty"` } // KubeconfigSpecApplyConfiguration constructs a declarative configuration of the KubeconfigSpec type for use with @@ -89,3 +90,11 @@ func (b *KubeconfigSpecApplyConfiguration) WithCertificateTemplate(value *Certif b.CertificateTemplate = value return b } + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *KubeconfigSpecApplyConfiguration) WithAuthorization(value *KubeconfigAuthorizationApplyConfiguration) *KubeconfigSpecApplyConfiguration { + b.Authorization = value + return b +} diff --git a/sdk/applyconfiguration/utils.go b/sdk/applyconfiguration/utils.go index 117713b..a4e35ca 100644 --- a/sdk/applyconfiguration/utils.go +++ b/sdk/applyconfiguration/utils.go @@ -87,6 +87,10 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &operatorv1alpha1.ImageSpecApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("Kubeconfig"): return &operatorv1alpha1.KubeconfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigAuthorization"): + return &operatorv1alpha1.KubeconfigAuthorizationApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigClusterRoleBindings"): + return &operatorv1alpha1.KubeconfigClusterRoleBindingsApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigSpec"): return &operatorv1alpha1.KubeconfigSpecApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigTarget"):