Skip to content

Commit

Permalink
Merge pull request #13 from loft-sh/misc-updates-fixes
Browse files Browse the repository at this point in the history
Dependency updates + fixes + improvements
  • Loading branch information
FabianKramm authored Jul 1, 2022
2 parents b3c7f51 + 29d68e5 commit 9f0a7ce
Show file tree
Hide file tree
Showing 879 changed files with 59,767 additions and 12,805 deletions.
4 changes: 2 additions & 2 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
"version": "0.2.0",
"configurations": [
{
"name": "Debug (localhost:2346)",
"name": "Debug (localhost:2348)",
"type": "go",
"request": "attach",
"mode": "remote",
"port": 2346,
"port": 2348,
"host": "localhost",
"substitutePath": [
{
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and Cust

.PHONY: generate
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
$(CONTROLLER_GEN) object paths="./..."

.PHONY: fmt
fmt: ## Run go fmt against code.
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ clusterctl generate cluster ${CLUSTER_NAME} \

Now we just need to wait until vcluster custom resource reports ready status:
```shell
kubectl wait --for=condition=ready vcluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME
kubectl wait --for=condition=ready vcluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME --timeout=300s
```
At this point the cluster is ready to be used. Please refer to the next chapter to get the credentials.

Expand Down Expand Up @@ -109,6 +109,6 @@ cat templates/cluster-template.yaml | ./bin/envsubst | kubectl apply -n ${CLUSTE

Now we just need to wait until VCluster custom resource reports ready status:
```shell
kubectl wait --for=condition=ready vcluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME
kubectl wait --for=condition=ready vcluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME --timeout=300s
```
At this point the cluster is ready to be used. Please refer to "How to connect to your vcluster" chapter above to get the credentials.
4 changes: 4 additions & 0 deletions api/v1alpha1/vcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ type VClusterStatus struct {
// +optional
Ready bool `json:"ready"`

// Initialized defines if the virtual cluster control plane was initialized.
// +optional
Initialized bool `json:"initialized"`

// Phase describes the current phase the virtual cluster is in
// +optional
Phase VirtualClusterPhase `json:"phase,omitempty"`
Expand Down
16 changes: 0 additions & 16 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,10 @@ spec:
- type
type: object
type: array
initialized:
description: Initialized defines if the virtual cluster control plane
was initialized.
type: boolean
message:
description: Message describes the reason in human readable form why
the cluster is in the currrent phase
Expand Down
8 changes: 4 additions & 4 deletions config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
control-plane: cluster-api-provider-vcluster-controller-manager
name: system
---
apiVersion: apps/v1
Expand All @@ -11,18 +11,18 @@ metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
control-plane: cluster-api-provider-vcluster-controller-manager
spec:
selector:
matchLabels:
control-plane: controller-manager
control-plane: cluster-api-provider-vcluster-controller-manager
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
control-plane: controller-manager
control-plane: cluster-api-provider-vcluster-controller-manager
spec:
containers:
- command:
Expand Down
104 changes: 69 additions & 35 deletions controllers/vcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ package controllers
import (
"context"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
Expand All @@ -35,6 +37,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
clusterv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -45,7 +48,6 @@ import (
v1alpha1 "github.com/loft-sh/cluster-api-provider-vcluster/api/v1alpha1"
"github.com/loft-sh/cluster-api-provider-vcluster/pkg/constants"
"github.com/loft-sh/cluster-api-provider-vcluster/pkg/helm"
"github.com/loft-sh/cluster-api-provider-vcluster/pkg/util/cidrdiscovery"
"github.com/loft-sh/cluster-api-provider-vcluster/pkg/util/conditions"
"github.com/loft-sh/cluster-api-provider-vcluster/pkg/util/kubeconfighelper"
"github.com/loft-sh/cluster-api-provider-vcluster/pkg/util/patch"
Expand Down Expand Up @@ -148,7 +150,7 @@ func (r *VClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_

defer func() {
// Always reconcile the Status.Phase field.
r.reconcilePhase(ctx, vCluster)
r.reconcilePhase(vCluster)

// Always attempt to Patch the Cluster object and status after each reconciliation.
// Patch ObservedGeneration only if the reconciliation completed successfully
Expand All @@ -166,25 +168,27 @@ func (r *VClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
if err != nil {
r.Log.Infof("error during virtual cluster deploy %s/%s: %v", vCluster.Namespace, vCluster.Name, err)
conditions.MarkFalse(vCluster, v1alpha1.HelmChartDeployedCondition, "HelmDeployFailed", v1alpha1.ConditionSeverityError, "%v", err)
return ctrl.Result{}, err
return ctrl.Result{RequeueAfter: time.Second * 5}, err
}

// check if vcluster is reachable and sync the kubeconfig Secret
t := time.Now()
err = r.syncVClusterKubeconfig(ctx, vCluster)
r.Log.Debugf("%s/%s: ready check took: %v", vCluster.Namespace, vCluster.Name, time.Since(t))
// check if vcluster is initialized and sync the kubeconfig Secret
restConfig, err := r.syncVClusterKubeconfig(ctx, vCluster)
if err != nil {
r.Log.Debugf("vcluster %s/%s is not ready: %v", vCluster.Namespace, vCluster.Name, err)
conditions.MarkFalse(vCluster, v1alpha1.KubeconfigReadyCondition, "CheckFailed", v1alpha1.ConditionSeverityWarning, "%v", err)
return ctrl.Result{RequeueAfter: time.Second * 5}, nil
}

return ctrl.Result{}, nil
}
vCluster.Status.Ready, err = r.checkReadyz(vCluster, restConfig)
if err != nil || !vCluster.Status.Ready {
r.Log.Debugf("readiness check failed: %v", err)
return ctrl.Result{RequeueAfter: time.Second * 5}, nil
}

func (r *VClusterReconciler) reconcilePhase(_ context.Context, vCluster *v1alpha1.VCluster) {
vCluster.Status.Ready = conditions.IsTrue(vCluster, v1alpha1.KubeconfigReadyCondition)
return ctrl.Result{RequeueAfter: time.Minute}, nil
}

func (r *VClusterReconciler) reconcilePhase(vCluster *v1alpha1.VCluster) {
if vCluster.Status.Phase != v1alpha1.VirtualClusterPending {
vCluster.Status.Phase = v1alpha1.VirtualClusterPending
}
Expand Down Expand Up @@ -214,12 +218,6 @@ func (r *VClusterReconciler) redeployIfNeeded(ctx context.Context, vCluster *v1a

r.Log.Debugf("upgrade virtual cluster helm chart %s/%s", vCluster.Namespace, vCluster.Name)

// look up CIDR
cidr, err := cidrdiscovery.NewCIDRLookup(r.Client).GetServiceCIDR(ctx, vCluster.Namespace)
if err != nil {
return fmt.Errorf("get service cidr: %v", err)
}

var chartRepo string
if vCluster.Spec.HelmRelease != nil {
chartRepo = vCluster.Spec.HelmRelease.Chart.Repo
Expand Down Expand Up @@ -274,9 +272,8 @@ func (r *VClusterReconciler) redeployIfNeeded(ctx context.Context, vCluster *v1a
}

//TODO: if .spec.controlPlaneEndpoint.Host is set it would be nice to pass it as --tls-san flag of syncer
values, err = vclustervalues.NewValuesMerger(
values, err := vclustervalues.NewValuesMerger(
kVersion,
cidr,
).Merge(&v1alpha1.VirtualClusterHelmRelease{
Chart: v1alpha1.VirtualClusterHelmChart{
Name: chartName,
Expand Down Expand Up @@ -322,20 +319,20 @@ func (r *VClusterReconciler) redeployIfNeeded(ctx context.Context, vCluster *v1a
return nil
}

func (r *VClusterReconciler) syncVClusterKubeconfig(ctx context.Context, vCluster *v1alpha1.VCluster) error {
func (r *VClusterReconciler) syncVClusterKubeconfig(ctx context.Context, vCluster *v1alpha1.VCluster) (*rest.Config, error) {
credentials, err := GetVClusterCredentials(ctx, r.Client, vCluster)
if err != nil {
return err
return nil, err
}

restConfig, err := kubeconfighelper.NewVClusterClientConfig(vCluster.Name, vCluster.Namespace, "", credentials.ClientCert, credentials.ClientKey)
if err != nil {
return err
return nil, err
}

kubeClient, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return err
return nil, err
}

ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*10)
Expand All @@ -345,19 +342,22 @@ func (r *VClusterReconciler) syncVClusterKubeconfig(ctx context.Context, vCluste
if !conditions.IsTrue(vCluster, v1alpha1.ControlPlaneInitializedCondition) {
_, err = kubeClient.CoreV1().ServiceAccounts("default").Get(ctxTimeout, "default", metav1.GetOptions{})
if err != nil {
return err
return nil, err
}

conditions.MarkTrue(vCluster, v1alpha1.ControlPlaneInitializedCondition)
}
// setting .Status.Initialized outside of the condition above to ensure
// that it is set on old CRs, which were missing this field, as well
vCluster.Status.Initialized = true

// write kubeconfig to the vcluster.Name+"-kubeconfig" Secret as expected by CAPI convention
kubeConfig, err := GetVClusterKubeConfig(ctx, r.Client, vCluster)
if err != nil {
return fmt.Errorf("can not retrieve kubeconfig: %v", err)
return nil, fmt.Errorf("can not retrieve kubeconfig: %v", err)
}
if len(kubeConfig.Clusters) != 1 {
return fmt.Errorf("unexpected kube config")
return nil, fmt.Errorf("unexpected kube config")
}

// If vcluster.spec.controlPlaneEndpoint.Host is not set, try to autodiscover it from
Expand All @@ -366,9 +366,13 @@ func (r *VClusterReconciler) syncVClusterKubeconfig(ctx context.Context, vCluste
if controlPlaneHost == "" {
controlPlaneHost, err = DiscoverHostFromService(ctx, r.Client, vCluster)
if err != nil {
return err
return nil, err
}
// write the discovered host back into vCluster CR
vCluster.Spec.ControlPlaneEndpoint.Host = controlPlaneHost
if vCluster.Spec.ControlPlaneEndpoint.Port == 0 {
vCluster.Spec.ControlPlaneEndpoint.Port = DefaultControlPlanePort
}
//TODO write back vcluster.spec.controlPlaneEndpoint.Host
}

for k := range kubeConfig.Clusters {
Expand All @@ -387,23 +391,50 @@ func (r *VClusterReconciler) syncVClusterKubeconfig(ctx context.Context, vCluste
}
outKubeConfig, err := clientcmd.Write(*kubeConfig)
if err != nil {
return err
return nil, err
}

kubeSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-kubeconfig", vCluster.Name), Namespace: vCluster.Namespace}}
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, kubeSecret, func() error {
_, err = controllerutil.CreateOrPatch(ctx, r.Client, kubeSecret, func() error {
if kubeSecret.Data == nil {
kubeSecret.Data = make(map[string][]byte)
}
kubeSecret.Data[KubeconfigDataName] = outKubeConfig
return nil
})
if err != nil {
return fmt.Errorf("can not create a kubeconfig secret: %v", err)
return nil, fmt.Errorf("can not create a kubeconfig secret: %v", err)
}

conditions.MarkTrue(vCluster, v1alpha1.KubeconfigReadyCondition)
return nil
return restConfig, nil
}

func (r *VClusterReconciler) checkReadyz(vCluster *v1alpha1.VCluster, restConfig *rest.Config) (bool, error) {
t := time.Now()
transport, err := rest.TransportFor(restConfig)
if err != nil {
return false, err
}
client := http.Client{
Timeout: 10 * time.Second,
Transport: transport,
}
resp, err := client.Get(fmt.Sprintf("https://%s:%d/readyz", vCluster.Spec.ControlPlaneEndpoint.Host, vCluster.Spec.ControlPlaneEndpoint.Port))
r.Log.Debugf("%s/%s: ready check took: %v", vCluster.Namespace, vCluster.Name, time.Since(t))
if err != nil {
return false, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return false, err
}
if string(body) != "ok" {
return false, nil
}

return true, nil
}

func DiscoverHostFromService(ctx context.Context, client client.Client, vCluster *v1alpha1.VCluster) (string, error) {
Expand All @@ -412,9 +443,9 @@ func DiscoverHostFromService(ctx context.Context, client client.Client, vCluster
service := &corev1.Service{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: vCluster.Namespace, Name: vCluster.Name}, service)
if err != nil {
// if kerrors.IsNotFound(err) {
// return true, nil
// }
if kerrors.IsNotFound(err) {
return true, nil
}

return false, err
}
Expand Down Expand Up @@ -444,6 +475,9 @@ func DiscoverHostFromService(ctx context.Context, client client.Client, vCluster
return "", fmt.Errorf("can not get vcluster service: %v", err)
}

if host == "" {
host = fmt.Sprintf("%s.%s.svc", vCluster.Name, vCluster.Namespace)
}
return host, nil
}

Expand Down
3 changes: 2 additions & 1 deletion devspace.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ deployments:
kustomize: true
manifests:
- config/default/
- config/crd/
dev:
provider:
namespace: cluster-api-provider-vcluster-system
Expand All @@ -27,7 +28,7 @@ dev:
terminal:
command: "./devspace_start.sh"
ports:
- port: 2346:2345
- port: 2348:2345
sync:
- path: ./
excludePaths:
Expand Down
Loading

0 comments on commit 9f0a7ce

Please sign in to comment.