From c81dce999751f8db7515d8531d9d5532955d7f25 Mon Sep 17 00:00:00 2001 From: Simon Bein Date: Mon, 19 May 2025 13:29:36 +0200 Subject: [PATCH 1/5] include hierarchy and overall workflow for initializer docs On-behalf-of: SAP Signed-off-by: Simon Bein --- .../workspaces/workspace-initialization.md | 62 ++++++++++--------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/docs/content/concepts/workspaces/workspace-initialization.md b/docs/content/concepts/workspaces/workspace-initialization.md index c27ef8fc65e..b7dd05a8446 100644 --- a/docs/content/concepts/workspaces/workspace-initialization.md +++ b/docs/content/concepts/workspaces/workspace-initialization.md @@ -8,7 +8,7 @@ Initializers are used to customize workspaces and bootstrap required resources u ### Defining Initializers in WorkspaceTypes -A `WorkspaceType` can specify an initializer using the `initializer` field. Here is an example of a `WorkspaceType` with an initializer. +A `WorkspaceType` can specify having an initializer using the `initializer` field. Here is an example of a `WorkspaceType` with an initializer. ```yaml apiVersion: tenancy.kcp.io/v1alpha1 @@ -22,6 +22,15 @@ spec: path: root ``` +Each initializer has a unique name, which gets automatically generated using `:`. So for example, if you were to apply the aforementioned WorkspaceType on the root workspace, your initializer would be called `root:example`. + +Since `WorkspaceType.spec.initializer` is a boolean field, each WorkspaceType comes with a single initializer by default. However each WorkspaceType inherits the initializers of its parent workspaces. As a result, it is possible to have multiple initializers on a WorkspaceType, but you will need to nest them. +Here is a example: + +1. In `root` workspace, create a new WorkspaceType called `parent`. You will receive a `root:parent` initializer +2. In the newly created `parent` workspace, create a new WorkspaceType `child`. You will receive a `root:parent:child` initializer +3. Whenever a new workspace is created in the child workspace, it will receive both the `root:parent` as well as the `root:parent:child` initializer + ### Enforcing Permissions for Initializers The non-root user must have the `verb=initialize` on the `WorkspaceType` that the initializer is for. This ensures that only authorized users can perform initialization actions using virtual workspace endpoint. Here is an example of the `ClusterRole`. @@ -37,6 +46,7 @@ rules: resourceNames: ["example"] verbs: ["initialize"] ``` + You can then bind this role to a user or a group. ```yaml @@ -54,46 +64,38 @@ roleRef: apiGroup: rbac.authorization.k8s.io ``` -## initializingworkspaces Virtual Workspace +## Writing Custom Initialization Controllers -As a service provider, you can use the `initializingworkspaces` virtual workspace to manage workspace resources in the initializing phase. This virtual workspace allows you to fetch `LogicalCluster` objects that are in the initializing phase and request initialization by a specific controller. +### Responsibilities Of Custom Intitialization Controllers -This Virtual Workspace can fetch `LogicalCluster` either by specific its name or using wildcard. +Custom Initialization Controllers are responsible for handling initialization logic for custom WorkspaceTypes. They interact with kcp by: -### Endpoint URL path +1. Watching for the creation of new LogicalClusters (the backing object behind Workspaces) with the corresponding initializer on them +2. Running any custom initialization logic +3. Removing the corresponding initializer from the `.status.initializers` list of the LogicalCluster after initialization logic has successfully finished -`initializingworkspaces` Virtual Workspace provide a virtual api-server to access workspaces that are initializing with the specific initializer. These URLs are published in the status of WorkspaceType object. +In order to simplify these processes, kcp provides the `initializingworkspaces` virtual workspace. +### The `initializingworkspaces` Virtual Workspace -```yaml - virtualWorkspaces: - - url: https://:6443/services/initializingworkspaces/ -``` +As a service provider, you can use the `initializingworkspaces` virtual workspace to manage workspace resources in the initializing phase. This virtual workspace allows you to fetch `LogicalCluster` objects that are in the initializing phase and request initialization by a specific controller. -This is an example URL path for accessing logical cluster apis for a specific initializer in a `initializingworkspaces` virtual workspace. +You can retrieve the url of a Virtual Workspace directly from the `.status.virtualWorkspaces` field of the corresponding WorkspaceType. Returning to our previous example using a custom WorkspaceType called "example", you will receive the following output: -```yaml -/services/initializingworkspaces//clusters/*/apis/core.kcp.io/v1alpha1/logicalclusters -``` - -You can also use `LogicalCluster` name for the direct view, allowing to manage all resources within that logical cluster. +```sh +$ kubectl get workspacetype example -o yaml -```yaml -/services/initializingworkspaces//clusters//apis/core.kcp.io/v1alpha1/logicalclusters +... +status: + virtualWorkspaces: + - url: https:///services/initializingworkspaces/root:example ``` -### Example workflow - -* Add your custom WorkspaceType to the platform with an initializer. +You can use this url to construct a kubeconfig for your controller. To do so, use the url directly as the `cluster.server` in your kubeconfig and provide a user with sufficient permissions (see [Enforcing Permissions for Initializers](#enforcing-permissions-for-initializers)) -* Create a workspace with the necessary warrants and scopes. The workspace will stay in the initializing state as the initializer is present. - -* Use a controller to watch your initializing workspaces, you can interact with the workspace through the virtual workspace endpoint: - -```yaml -/services/initializingworkspaces/foo/clusters/*/apis/core.kcp.io/v1alpha1/logicalclusters -``` +### Code Sample -* Once you get the object, you need to initialize the workspace with its related resources, using the same endpoint +* It is important to use the kcp-dev controller runtime fork, as regular controller runtime is not able to deal with all logical clusters being name "cluster" +* LogicalClusters cannot updated using update api, but must be updated using patch api -* Once the initialization is complete, use the same endpoint to remove the initializer from the workspace. +// TODO paste in sample once it is finished From accedf19be435b881e1fce4577deb270b2b12650 Mon Sep 17 00:00:00 2001 From: Simon Bein Date: Tue, 27 May 2025 14:01:51 +0200 Subject: [PATCH 2/5] clarify function arguments for CommitFunc On-behalf-of: SAP Signed-off-by: Simon Bein --- pkg/reconciler/committer/committer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/reconciler/committer/committer.go b/pkg/reconciler/committer/committer.go index 8dee1aa0925..4055f9d95e5 100644 --- a/pkg/reconciler/committer/committer.go +++ b/pkg/reconciler/committer/committer.go @@ -51,7 +51,7 @@ type Patcher[R runtime.Object] interface { } // CommitFunc is an alias to clean up type declarations. -type CommitFunc[Sp any, St any] func(context.Context, *Resource[Sp, St], *Resource[Sp, St]) error +type CommitFunc[Sp any, St any] func(_ context.Context, old *Resource[Sp, St], new *Resource[Sp, St]) error // NewCommitter returns a function that can patch instances of R based on meta, // spec or status changes using a cluster-aware patcher. From 4c9ac9b66e4f7b3bccc02bd86a28b06a7d003fdd Mon Sep 17 00:00:00 2001 From: Simon Bein Date: Tue, 27 May 2025 14:42:53 +0200 Subject: [PATCH 3/5] code-sample for workspace initializer On-behalf-of: SAP Signed-off-by: Simon Bein --- .../workspaces/workspace-initialization.md | 157 +++++++++++++++++- 1 file changed, 153 insertions(+), 4 deletions(-) diff --git a/docs/content/concepts/workspaces/workspace-initialization.md b/docs/content/concepts/workspaces/workspace-initialization.md index b7dd05a8446..bba02d30973 100644 --- a/docs/content/concepts/workspaces/workspace-initialization.md +++ b/docs/content/concepts/workspaces/workspace-initialization.md @@ -95,7 +95,156 @@ You can use this url to construct a kubeconfig for your controller. To do so, us ### Code Sample -* It is important to use the kcp-dev controller runtime fork, as regular controller runtime is not able to deal with all logical clusters being name "cluster" -* LogicalClusters cannot updated using update api, but must be updated using patch api - -// TODO paste in sample once it is finished +When writing a custom initializer, the following needs to be taken into account: + +* You need to use the kcp-dev controller-runtime fork, as regular controller-runtime is not able to work as under the hood all LogicalClusters have the sam name +* You need to update LogicalClusters using patches; They cannot be updated using the update api + +Keeping this in mind, you can use the following example as a starting point for your intitialization controller + +=== "main.go" + + ```Go + package main + + import ( + "context" + "fmt" + "log/slog" + "os" + "slices" + "strings" + + "github.com/go-logr/logr" + kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + "github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization" + "k8s.io/client-go/tools/clientcmd" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/kcp" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + ) + + type Reconciler struct { + Client client.Client + Log logr.Logger + InitializerName kcpcorev1alpha1.LogicalClusterInitializer + } + + func main() { + if err := execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } + } + + func execute() error { + kubeconfigpath := "" + + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath) + if err != nil { + return err + } + + logger := logr.FromSlogHandler(slog.NewTextHandler(os.Stderr, nil)) + ctrl.SetLogger(logger) + + mgr, err := kcp.NewClusterAwareManager(config, manager.Options{ + Logger: logger, + }) + if err != nil { + return err + } + if err := kcpcorev1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + return err + } + + // since the initializers name is is the last part of the hostname, we can take it from there + initializerName := config.Host[strings.LastIndex(config.Host, "/")+1:] + + r := Reconciler{ + Client: mgr.GetClient(), + Log: mgr.GetLogger().WithName("initializer-controller"), + InitializerName: kcpcorev1alpha1.LogicalClusterInitializer(initializerName), + } + + if err := r.SetupWithManager(mgr); err != nil { + return err + } + mgr.GetLogger().Info("Setup complete") + + if err := mgr.Start(context.Background()); err != nil { + return err + } + + return nil + } + + func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&kcpcorev1alpha1.LogicalCluster{}). + // we need to use kcp.WithClusterInContext here to target the correct logical clusters during reconciliation + Complete(kcp.WithClusterInContext(r)) + } + + func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + log := r.Log.WithValues("clustername", req.ClusterName) + log.Info("Reconciling") + + lc := &kcpcorev1alpha1.LogicalCluster{} + if err := r.Client.Get(ctx, req.NamespacedName, lc); err != nil { + return reconcile.Result{}, err + } + + // check if your initializer is still set on the logicalcluster + if slices.Contains(lc.Status.Initializers, r.InitializerName) { + + log.Info("Starting to initialize cluster") + // your logic here to initialize a Workspace + + // after your initialization is done, don't forget to remove your initializer + // Since LogicalCluster objects cannot be directly updated, we need to create a patch. + patch := client.MergeFrom(lc.DeepCopy()) + lc.Status.Initializers = initialization.EnsureInitializerAbsent(r.InitializerName, lc.Status.Initializers) + if err := r.Client.Status().Patch(ctx, lc, patch); err != nil { + return reconcile.Result{}, err + } + } + + return reconcile.Result{}, nil + } + ``` + +=== "kubeconfig" + + ```yaml + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: + # obtain the server url from the status of your WorkspaceType + server: "" + name: finalizer + contexts: + - context: + cluster: finalizer + user: + name: finalizer + current-context: finalizer + kind: Config + preferences: {} + users: + - name: + user: + token: + ``` + +=== "go.mod" + + ```Go + ... + // replace upstream controller-runtime with kcp cluster aware fork + replace sigs.k8s.io/controller-runtime v0.19.7 => github.com/kcp-dev/controller-runtime v0.19.0-kcp.1 + ... + ``` From fb0b655f9a0f0abeaa21be97e13b7f2ed3ee8a62 Mon Sep 17 00:00:00 2001 From: Simon Bein Date: Thu, 7 Aug 2025 19:29:44 +0200 Subject: [PATCH 4/5] switch to initializingworkspace multicluster-provider On-behalf-of: SAP Signed-off-by: Simon Bein --- .../workspaces/workspace-initialization.md | 230 +++++++++--------- 1 file changed, 115 insertions(+), 115 deletions(-) diff --git a/docs/content/concepts/workspaces/workspace-initialization.md b/docs/content/concepts/workspaces/workspace-initialization.md index bba02d30973..c46a2f84438 100644 --- a/docs/content/concepts/workspaces/workspace-initialization.md +++ b/docs/content/concepts/workspaces/workspace-initialization.md @@ -97,11 +97,78 @@ You can use this url to construct a kubeconfig for your controller. To do so, us When writing a custom initializer, the following needs to be taken into account: -* You need to use the kcp-dev controller-runtime fork, as regular controller-runtime is not able to work as under the hood all LogicalClusters have the sam name +* We strongly recommend to use the kcp [initializingworkspace multicluster-provider](github.com/kcp-dev/multicluster-provider) to build your custom initializer * You need to update LogicalClusters using patches; They cannot be updated using the update api Keeping this in mind, you can use the following example as a starting point for your intitialization controller +=== "reconcile.go" + + ```Go + package main + + import ( + "context" + "slices" + + "github.com/go-logr/logr" + kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + "github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile" + ) + + type Reconciler struct { + Log logr.Logger + InitializerName kcpcorev1alpha1.LogicalClusterInitializer + ClusterGetter func(context.Context, string) (cluster.Cluster, error) + } + + func (r *Reconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (reconcile.Result, error) { + log := r.Log.WithValues("clustername", req.ClusterName) + log.Info("Reconciling") + + // create a client scoped to the logical cluster the request came from + cluster, err := r.ClusterGetter(ctx, req.ClusterName) + if err != nil { + return reconcile.Result{}, err + } + client := cluster.GetClient() + + lc := &kcpcorev1alpha1.LogicalCluster{} + if err := client.Get(ctx, req.NamespacedName, lc); err != nil { + return reconcile.Result{}, err + } + + // check if your initializer is still set on the logicalcluster + if slices.Contains(lc.Status.Initializers, r.InitializerName) { + + // your logic to initialize a Workspace goes here + log.Info("Starting to initialize cluster") + + // after your initialization is done, don't forget to remove your initializer. + // You will need to use patch, to update the LogicalCluster + patch := ctrlclient.MergeFrom(lc.DeepCopy()) + lc.Status.Initializers = initialization.EnsureInitializerAbsent(r.InitializerName, lc.Status.Initializers) + if err := client.Status().Patch(ctx, lc, patch); err != nil { + return reconcile.Result{}, err + } + } + + return reconcile.Result{}, nil + } + + func (r *Reconciler) SetupWithManager(mgr mcmanager.Manager) error { + return mcbuilder.ControllerManagedBy(mgr). + For(&kcpcorev1alpha1.LogicalCluster{}). + Complete(r) + } + ``` + === "main.go" ```Go @@ -112,139 +179,72 @@ Keeping this in mind, you can use the following example as a starting point for "fmt" "log/slog" "os" - "slices" "strings" - + "github.com/go-logr/logr" kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization" + "github.com/kcp-dev/multicluster-provider/initializingworkspaces" + "golang.org/x/sync/errgroup" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/kcp" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - ) - - type Reconciler struct { - Client client.Client - Log logr.Logger - InitializerName kcpcorev1alpha1.LogicalClusterInitializer - } - - func main() { + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + ) + + // glue and setup code + func main() { if err := execute(); err != nil { - fmt.Println(err) - os.Exit(1) + fmt.Println(err) + os.Exit(1) } - } - - func execute() error { - kubeconfigpath := "" - + } + func execute() error { + // your kubeconfig here + kubeconfigpath := "" + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath) if err != nil { - return err + return err } - + + // since the initializers name is is the last part of the hostname, we can take it from there + initializerName := config.Host[strings.LastIndex(config.Host, "/")+1:] + + provider, err := initializingworkspaces.New(config, initializingworkspaces.Options{InitializerName: initializerName}) + if err != nil { + return err + } + logger := logr.FromSlogHandler(slog.NewTextHandler(os.Stderr, nil)) ctrl.SetLogger(logger) - - mgr, err := kcp.NewClusterAwareManager(config, manager.Options{ - Logger: logger, - }) + + mgr, err := mcmanager.New(config, provider, manager.Options{Logger: logger}) if err != nil { - return err + return err } - if err := kcpcorev1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - return err + + // add the logicalcluster scheme + if err := kcpcorev1alpha1.AddToScheme(scheme.Scheme); err != nil { + return err } - - // since the initializers name is is the last part of the hostname, we can take it from there - initializerName := config.Host[strings.LastIndex(config.Host, "/")+1:] - + r := Reconciler{ - Client: mgr.GetClient(), - Log: mgr.GetLogger().WithName("initializer-controller"), - InitializerName: kcpcorev1alpha1.LogicalClusterInitializer(initializerName), + Log: mgr.GetLogger().WithName("initializer-controller"), + InitializerName: kcpcorev1alpha1.LogicalClusterInitializer(initializerName), + ClusterGetter: mgr.GetCluster, } - + if err := r.SetupWithManager(mgr); err != nil { - return err + return err } mgr.GetLogger().Info("Setup complete") - - if err := mgr.Start(context.Background()); err != nil { - return err - } - - return nil - } - - func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&kcpcorev1alpha1.LogicalCluster{}). - // we need to use kcp.WithClusterInContext here to target the correct logical clusters during reconciliation - Complete(kcp.WithClusterInContext(r)) - } - - func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := r.Log.WithValues("clustername", req.ClusterName) - log.Info("Reconciling") - - lc := &kcpcorev1alpha1.LogicalCluster{} - if err := r.Client.Get(ctx, req.NamespacedName, lc); err != nil { - return reconcile.Result{}, err - } - - // check if your initializer is still set on the logicalcluster - if slices.Contains(lc.Status.Initializers, r.InitializerName) { - - log.Info("Starting to initialize cluster") - // your logic here to initialize a Workspace - - // after your initialization is done, don't forget to remove your initializer - // Since LogicalCluster objects cannot be directly updated, we need to create a patch. - patch := client.MergeFrom(lc.DeepCopy()) - lc.Status.Initializers = initialization.EnsureInitializerAbsent(r.InitializerName, lc.Status.Initializers) - if err := r.Client.Status().Patch(ctx, lc, patch); err != nil { - return reconcile.Result{}, err - } - } - - return reconcile.Result{}, nil - } - ``` - -=== "kubeconfig" - - ```yaml - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: - # obtain the server url from the status of your WorkspaceType - server: "" - name: finalizer - contexts: - - context: - cluster: finalizer - user: - name: finalizer - current-context: finalizer - kind: Config - preferences: {} - users: - - name: - user: - token: - ``` - -=== "go.mod" - - ```Go - ... - // replace upstream controller-runtime with kcp cluster aware fork - replace sigs.k8s.io/controller-runtime v0.19.7 => github.com/kcp-dev/controller-runtime v0.19.0-kcp.1 - ... + + // start the provider and manager + g, ctx := errgroup.WithContext(context.Background()) + g.Go(func() error { return provider.Run(ctx, mgr) }) + g.Go(func() error { return mgr.Start(ctx) }) + + return g.Wait() + } ``` From c85829d508b427de66f9fc50affa8ded6aada29a Mon Sep 17 00:00:00 2001 From: Simon Bein Date: Thu, 23 Oct 2025 10:35:24 +0200 Subject: [PATCH 5/5] link to the official multicluster-provider example and fix hiearchy documentation On-behalf-of: SAP Signed-off-by: Simon Bein --- .../workspaces/workspace-initialization.md | 177 ++---------------- 1 file changed, 20 insertions(+), 157 deletions(-) diff --git a/docs/content/concepts/workspaces/workspace-initialization.md b/docs/content/concepts/workspaces/workspace-initialization.md index c46a2f84438..9eb6c041987 100644 --- a/docs/content/concepts/workspaces/workspace-initialization.md +++ b/docs/content/concepts/workspaces/workspace-initialization.md @@ -24,12 +24,22 @@ spec: Each initializer has a unique name, which gets automatically generated using `:`. So for example, if you were to apply the aforementioned WorkspaceType on the root workspace, your initializer would be called `root:example`. -Since `WorkspaceType.spec.initializer` is a boolean field, each WorkspaceType comes with a single initializer by default. However each WorkspaceType inherits the initializers of its parent workspaces. As a result, it is possible to have multiple initializers on a WorkspaceType, but you will need to nest them. -Here is a example: +Since `WorkspaceType.spec.initializers` is a boolean field, each WorkspaceType comes with a single initializer by default. However each WorkspaceType inherits the initializers of its parent WorkspaceType. As a result, it is possible to have multiple initializers on a WorkspaceType using [WorkspaceType Extension](../../concepts/workspaces/workspace-types.md#workspace-type-extensions-and-constraints) -1. In `root` workspace, create a new WorkspaceType called `parent`. You will receive a `root:parent` initializer -2. In the newly created `parent` workspace, create a new WorkspaceType `child`. You will receive a `root:parent:child` initializer -3. Whenever a new workspace is created in the child workspace, it will receive both the `root:parent` as well as the `root:parent:child` initializer +In the following example, `child` inherits the initializers of `parent`. As a result, child workspaces will have the `root:child` and `root:parent` initializers set. + +```yaml +apiVersion: tenancy.kcp.io/v1alpha1 +kind: WorkspaceType +metadata: + name: child +spec: + initializer: true + extend: + with: + - name: parent + path: root +``` ### Enforcing Permissions for Initializers @@ -66,7 +76,7 @@ roleRef: ## Writing Custom Initialization Controllers -### Responsibilities Of Custom Intitialization Controllers +### Responsibilities Of Custom Initialization Controllers Custom Initialization Controllers are responsible for handling initialization logic for custom WorkspaceTypes. They interact with kcp by: @@ -88,163 +98,16 @@ $ kubectl get workspacetype example -o yaml ... status: virtualWorkspaces: - - url: https:///services/initializingworkspaces/root:example + - url: https:///services/initializingworkspaces/root:example ``` -You can use this url to construct a kubeconfig for your controller. To do so, use the url directly as the `cluster.server` in your kubeconfig and provide a user with sufficient permissions (see [Enforcing Permissions for Initializers](#enforcing-permissions-for-initializers)) +You can use this url to construct a kubeconfig for your controller. To do so, use the url directly as the `cluster.server` in your kubeconfig and provide the subject with sufficient permissions (see [Enforcing Permissions for Initializers](#enforcing-permissions-for-initializers)) ### Code Sample When writing a custom initializer, the following needs to be taken into account: -* We strongly recommend to use the kcp [initializingworkspace multicluster-provider](github.com/kcp-dev/multicluster-provider) to build your custom initializer +* We strongly recommend to use the kcp [initializingworkspace multicluster-provider](https://github.com/kcp-dev/multicluster-provider) to build your custom initializer * You need to update LogicalClusters using patches; They cannot be updated using the update api -Keeping this in mind, you can use the following example as a starting point for your intitialization controller - -=== "reconcile.go" - - ```Go - package main - - import ( - "context" - "slices" - - "github.com/go-logr/logr" - kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder" - mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" - mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile" - ) - - type Reconciler struct { - Log logr.Logger - InitializerName kcpcorev1alpha1.LogicalClusterInitializer - ClusterGetter func(context.Context, string) (cluster.Cluster, error) - } - - func (r *Reconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (reconcile.Result, error) { - log := r.Log.WithValues("clustername", req.ClusterName) - log.Info("Reconciling") - - // create a client scoped to the logical cluster the request came from - cluster, err := r.ClusterGetter(ctx, req.ClusterName) - if err != nil { - return reconcile.Result{}, err - } - client := cluster.GetClient() - - lc := &kcpcorev1alpha1.LogicalCluster{} - if err := client.Get(ctx, req.NamespacedName, lc); err != nil { - return reconcile.Result{}, err - } - - // check if your initializer is still set on the logicalcluster - if slices.Contains(lc.Status.Initializers, r.InitializerName) { - - // your logic to initialize a Workspace goes here - log.Info("Starting to initialize cluster") - - // after your initialization is done, don't forget to remove your initializer. - // You will need to use patch, to update the LogicalCluster - patch := ctrlclient.MergeFrom(lc.DeepCopy()) - lc.Status.Initializers = initialization.EnsureInitializerAbsent(r.InitializerName, lc.Status.Initializers) - if err := client.Status().Patch(ctx, lc, patch); err != nil { - return reconcile.Result{}, err - } - } - - return reconcile.Result{}, nil - } - - func (r *Reconciler) SetupWithManager(mgr mcmanager.Manager) error { - return mcbuilder.ControllerManagedBy(mgr). - For(&kcpcorev1alpha1.LogicalCluster{}). - Complete(r) - } - ``` - -=== "main.go" - - ```Go - package main - - import ( - "context" - "fmt" - "log/slog" - "os" - "strings" - - "github.com/go-logr/logr" - kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - "github.com/kcp-dev/multicluster-provider/initializingworkspaces" - "golang.org/x/sync/errgroup" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/clientcmd" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/manager" - mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" - ) - - // glue and setup code - func main() { - if err := execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } - } - func execute() error { - // your kubeconfig here - kubeconfigpath := "" - - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath) - if err != nil { - return err - } - - // since the initializers name is is the last part of the hostname, we can take it from there - initializerName := config.Host[strings.LastIndex(config.Host, "/")+1:] - - provider, err := initializingworkspaces.New(config, initializingworkspaces.Options{InitializerName: initializerName}) - if err != nil { - return err - } - - logger := logr.FromSlogHandler(slog.NewTextHandler(os.Stderr, nil)) - ctrl.SetLogger(logger) - - mgr, err := mcmanager.New(config, provider, manager.Options{Logger: logger}) - if err != nil { - return err - } - - // add the logicalcluster scheme - if err := kcpcorev1alpha1.AddToScheme(scheme.Scheme); err != nil { - return err - } - - r := Reconciler{ - Log: mgr.GetLogger().WithName("initializer-controller"), - InitializerName: kcpcorev1alpha1.LogicalClusterInitializer(initializerName), - ClusterGetter: mgr.GetCluster, - } - - if err := r.SetupWithManager(mgr); err != nil { - return err - } - mgr.GetLogger().Info("Setup complete") - - // start the provider and manager - g, ctx := errgroup.WithContext(context.Background()) - g.Go(func() error { return provider.Run(ctx, mgr) }) - g.Go(func() error { return mgr.Start(ctx) }) - - return g.Wait() - } - ``` +Keeping this in mind, you can use the [multicluster-provider initializingworkspaces example](https://github.com/kcp-dev/multicluster-provider/tree/main/examples/initializingworkspaces) as a starting point for your initialization controller