From 1b09a1334e149637ad4bcc5588f5fd9be268ec69 Mon Sep 17 00:00:00 2001 From: Claudio Lorina Date: Wed, 5 Feb 2025 18:56:31 +0100 Subject: [PATCH] refactor(liqoctl)!: include network init in connect This patch removes the `liqoctl network init` command, including its functionalities into `connect`. This simplifies usage and removes a command which is really unlikely used alone. --- cmd/liqoctl/cmd/network.go | 32 +------ .../advanced/peering/inter-cluster-network.md | 90 ++++++++---------- pkg/liqoctl/network/cluster.go | 6 +- pkg/liqoctl/network/handler.go | 91 ++++++++----------- pkg/liqoctl/peer/handler.go | 4 - 5 files changed, 86 insertions(+), 137 deletions(-) diff --git a/cmd/liqoctl/cmd/network.go b/cmd/liqoctl/cmd/network.go index 4d8f6ff59e..0c8d6e2ad4 100644 --- a/cmd/liqoctl/cmd/network.go +++ b/cmd/liqoctl/cmd/network.go @@ -31,10 +31,6 @@ import ( const liqoctlNetworkLongHelp = `Manage liqo networking.` -const liqoctlNetworkInitLongHelp = `Initialize the liqo networking between two clusters. - -It generates all network configurations required to connect the two clusters.` - const liqoctlNetworkResetLongHelp = `Tear down all liqo networking between two clusters. It disconnects the two clusters and remove network configurations generated with the *network init* command.` @@ -44,9 +40,10 @@ const liqoctlNetworConnectLongHelp = `Connect two clusters using liqo networking This command creates the Gateways to connect the two clusters. Run this command after inizialiting the network using the *network init* command.` -const liqoctlNetworkDisconnectLongHelp = `Disconnect two clusters. +const liqoctlNetworkDisconnectLongHelp = `Disconnect two clusters keeping the network configuration. -It deletes the Gateways, but keeps the network configurations generated with the *network init* command.` +It deletes the Gateways, but keeps the network configurations generated with the *network init* command. +Useful when a user wants to disconnect the clusters keeping the same IP mapping.` func newNetworkCommand(ctx context.Context, f *factory.Factory) *cobra.Command { options := network.NewOptions(f) @@ -85,7 +82,6 @@ func newNetworkCommand(ctx context.Context, f *factory.Factory) *cobra.Command { options.LocalFactory.Printer.CheckErr(cmd.RegisterFlagCompletionFunc("remote-liqo-namespace", completion.Namespaces(ctx, options.RemoteFactory, completion.NoLimit))) - cmd.AddCommand(newNetworkInitCommand(ctx, options)) cmd.AddCommand(newNetworkResetCommand(ctx, options)) cmd.AddCommand(newNetworkConnectCommand(ctx, options)) cmd.AddCommand(newNetworkDisconnectCommand(ctx, options)) @@ -93,26 +89,6 @@ func newNetworkCommand(ctx context.Context, f *factory.Factory) *cobra.Command { return cmd } -func newNetworkInitCommand(ctx context.Context, options *network.Options) *cobra.Command { - cmd := &cobra.Command{ - Use: "init", - Short: "Initialize the liqo networking between two clusters", - Long: WithTemplate(liqoctlNetworkInitLongHelp), - Args: cobra.NoArgs, - - Run: func(_ *cobra.Command, _ []string) { - err := options.RunInit(ctx) - if err != nil { - options.LocalFactory.Printer.CheckErr( - fmt.Errorf("`network init` failed (error: %w). Issue `network reset` to cleanup the environment", err)) - } - output.ExitOnErr(err) - }, - } - - return cmd -} - func newNetworkResetCommand(ctx context.Context, options *network.Options) *cobra.Command { cmd := &cobra.Command{ Use: "reset", @@ -191,7 +167,7 @@ func newNetworkConnectCommand(ctx context.Context, options *network.Options) *co func newNetworkDisconnectCommand(ctx context.Context, options *network.Options) *cobra.Command { cmd := &cobra.Command{ Use: "disconnect", - Short: "Disconnect two clusters", + Short: "Disconnect two clusters keeping the network configuration", Long: WithTemplate(liqoctlNetworkDisconnectLongHelp), Args: cobra.NoArgs, diff --git a/docs/advanced/peering/inter-cluster-network.md b/docs/advanced/peering/inter-cluster-network.md index 2a6ed947e7..3950233c49 100644 --- a/docs/advanced/peering/inter-cluster-network.md +++ b/docs/advanced/peering/inter-cluster-network.md @@ -28,74 +28,62 @@ The unpeer process will automatically remove the Liqo Gateway from the tenant na When you have access to both clusters, you can configure the inter-cluster network connectivity via the `liqoctl network` command. -Note that when you use the `liqoctl network` command, the argument specifying the remote kubeconfig/context corresponds to the cluster that acts as gateway server for the Wireguard tunnel +Note that when you use the `liqoctl network` command, the argument specifying the remote kubeconfig/context corresponds to the cluster that acts as gateway server for the Wireguard tunnel. -The first step to configure networking is initializing the network configuration, allowing the clusters to exchange the network configurations to configure the IP addresses remapping: +To establish a connection between two clusters, you can run the following command: ```bash -liqoctl network init \ +liqoctl network connect \ --kubeconfig $CLUSTER_1_KUBECONFIG_PATH \ --remote-kubeconfig $CLUSTER_2_KUBECONFIG_PATH \ + --server-service-type NodePort \ --wait ``` -You should see the following output: +You should see an output like the following: ```text -INFO (local) Cluster identity correctly retrieved -INFO (remote) Cluster identity correctly retrieved -INFO (local) Network configuration correctly retrieved -INFO (remote) Network configuration correctly retrieved -INFO (local) Network configuration correctly set up -INFO (remote) Network configuration correctly set up -INFO (local) Configuration applied successfully -INFO (remote) Configuration applied successfully -``` - -This command will share and configure the required resources between the two clusters. -You will find in both your clusters a new Configuration in the tenant namespace. + INFO (local) Network configuration correctly retrieved + INFO (remote) Network configuration correctly retrieved + INFO (local) Network configuration correctly set up + INFO (remote) Network configuration correctly set up + INFO (local) Configuration applied successfully + INFO (remote) Configuration applied successfully + INFO (remote) Gateway server template "wireguard-server/liqo" correctly checked + INFO (local) Gateway client template "wireguard-client/liqo" correctly checked + INFO (local) Network correctly initialized + INFO (remote) Network correctly initialized + INFO (remote) Gateway server correctly set up + INFO (remote) Gateway pod gw-cl03 is ready + INFO (remote) Gateway server Service created successfully + INFO (local) Gateway client correctly set up + INFO (local) Gateway pod gw-cl04 is ready + INFO (remote) Gateway server Secret created successfully + INFO (local) Public key correctly created + INFO (local) Gateway client Secret created successfully + INFO (remote) Public key correctly created + INFO (remote) Connection created successfully + INFO (local) Connection created successfully + INFO (local) Connection is established + INFO (remote) Connection is established +``` + +If the command was successful you will be able to see a new connection resource with status `Connected`: ```bash -kubectl get configurations.networking.liqo.io -A - -NAMESPACE NAME DESIRED POD CIDR REMAPPED POD CIDR AGE -liqo-tenant-dry-paper-5d16c0 dry-paper 10.243.0.0/16 10.71.0.0/16 4m48s +kubectl get connections.networking.liqo.io -A ``` -Now, you can establish the connection between the two clusters: - -```bash -liqoctl network connect \ - --kubeconfig $CLUSTER_1_KUBECONFIG_PATH \ - --remote-kubeconfig $CLUSTER_2_KUBECONFIG_PATH \ - --server-service-type NodePort \ - --wait +```text +NAMESPACE NAME TYPE STATUS AGE +liqo-tenant-cl01 cl01 Server Connected 51s ``` -You should see the following output: +The command above applied the following changes to the clusters: -```text -INFO (local) Cluster identity correctly retrieved -INFO (remote) Cluster identity correctly retrieved -INFO (local) Network correctly initialized -INFO (remote) Network correctly initialized -INFO (remote) Gateway server correctly set up -INFO (remote) Gateway pod gw-crimson-rain is ready -INFO (remote) Gateway server Service created successfully -INFO (local) Gateway client correctly set up -INFO (local) Gateway pod gw-damp-feather is ready -INFO (remote) Gateway server Secret created successfully -INFO (local) Public key correctly created -INFO (local) Gateway client Secret created successfully -INFO (remote) Public key correctly created -INFO (remote) Connection created successfully -INFO (local) Connection created successfully -INFO (local) Connection is established -INFO (remote) Connection is established -``` - -This command will deploy a Liqo Gateway for each cluster in the tenant namespace and establish the connection between them. -In the first cluster, the Liqo Gateway will be configured as a client, while in the second cluster, it will be configured as a server. +- Exchanged the network configuration to configure the IPs remapping, which allows to reach pods and services in the other cluster +- it deployed a Liqo Gateway for each cluster in the tenant namespace and established the connection between them. + By default, in the first cluster, the Liqo Gateway is configured as a client, while in the second cluster, is configured as a server. ```{admonition} Note You can see further configuration options with `liqoctl network connect --help`. diff --git a/pkg/liqoctl/network/cluster.go b/pkg/liqoctl/network/cluster.go index 186228ef7d..0ed7f57fa1 100644 --- a/pkg/liqoctl/network/cluster.go +++ b/pkg/liqoctl/network/cluster.go @@ -350,12 +350,14 @@ func (c *Cluster) CheckNetworkInitialized(ctx context.Context, remoteClusterID l s.Fail(fmt.Sprintf("An error occurred while checking network Configuration: %v", output.PrettyErr(err))) return err case apierrors.IsNotFound(err): - s.Fail(fmt.Sprintf("Network Configuration not found. Initialize the network first with `liqoctl network init`: %v", output.PrettyErr(err))) + s.Fail(fmt.Sprintf("Network Configuration not found. Retry to issue `liqoctl network connect If the issue persist, "+ + "you can try to reset the network with `liqoctl network reset`: %v", output.PrettyErr(err))) return err } if !networkingutils.IsConfigurationStatusSet(conf.Status) { - err := fmt.Errorf("network Configuration status is not set yet. Retry later or initialize the network again with `liqoctl network init`") + err := fmt.Errorf("network Configuration status is not set yet. Retry later. If the issue persist, " + + "you can try to reset the network with `liqoctl network reset`") s.Fail(err) return err } diff --git a/pkg/liqoctl/network/handler.go b/pkg/liqoctl/network/handler.go index 0377c41db7..322e7219fd 100644 --- a/pkg/liqoctl/network/handler.go +++ b/pkg/liqoctl/network/handler.go @@ -71,58 +71,6 @@ func NewOptions(localFactory *factory.Factory) *Options { } } -// RunInit initializes the liqo networking between two clusters. -func (o *Options) RunInit(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, o.Timeout) - defer cancel() - - // Create and initialize cluster 1. - cluster1, err := NewCluster(ctx, o.LocalFactory, o.RemoteFactory, true) - if err != nil { - return err - } - - // Create and initialize cluster 2. - cluster2, err := NewCluster(ctx, o.RemoteFactory, o.LocalFactory, true) - if err != nil { - return err - } - - // Forges the local Configuration of cluster 1 to be applied on remote clusters. - if err := cluster1.SetLocalConfiguration(ctx); err != nil { - return err - } - - // Forges the local Configuration of cluster 2 to be applied on remote clusters. - if err := cluster2.SetLocalConfiguration(ctx); err != nil { - return err - } - - // Setup Configurations in cluster 1. - if err := cluster1.SetupConfiguration(ctx, cluster2.networkConfiguration); err != nil { - return err - } - - // Setup Configurations in cluster 2. - if err := cluster2.SetupConfiguration(ctx, cluster1.networkConfiguration); err != nil { - return err - } - - if o.Wait { - // Wait for cluster 1 to be ready. - if err := cluster1.waiter.ForConfiguration(ctx, cluster2.localClusterID); err != nil { - return err - } - - // Wait for cluster 2 to be ready. - if err := cluster2.waiter.ForConfiguration(ctx, cluster1.localClusterID); err != nil { - return err - } - } - - return nil -} - // RunReset reset the liqo networking between two clusters. // If the clusters are still connected through the gateways, it deletes them before removing network Configurations. func (o *Options) RunReset(ctx context.Context) error { @@ -179,7 +127,12 @@ func (o *Options) RunConnect(ctx context.Context) error { if err != nil { return err } + // Exchange network configurations between the clusters + if err := o.initNetworkConfigs(ctx, cluster1, cluster2); err != nil { + return err + } + // Connect the two clusters if !o.SkipValidation { // Check if the Templates exists and is valid on cluster 2 if err := cluster2.CheckTemplateGwServer(ctx, o); err != nil { @@ -357,6 +310,40 @@ func (o *Options) RunDisconnect(ctx context.Context, cluster1, cluster2 *Cluster return cluster2.DeleteGatewayServer(ctx, cluster1.localClusterID) } +func (o *Options) initNetworkConfigs(ctx context.Context, cluster1 *Cluster, cluster2 *Cluster) error { + // Forges the local Configuration of cluster 1 to be applied on remote clusters. + if err := cluster1.SetLocalConfiguration(ctx); err != nil { + return err + } + + // Forges the local Configuration of cluster 2 to be applied on remote clusters. + if err := cluster2.SetLocalConfiguration(ctx); err != nil { + return err + } + + // Setup Configurations in cluster 1. + if err := cluster1.SetupConfiguration(ctx, cluster2.networkConfiguration); err != nil { + return err + } + + // Setup Configurations in cluster 2. + if err := cluster2.SetupConfiguration(ctx, cluster1.networkConfiguration); err != nil { + return err + } + + // Wait for cluster 1 to be ready. + if err := cluster1.waiter.ForConfiguration(ctx, cluster2.localClusterID); err != nil { + return err + } + + // Wait for cluster 2 to be ready. + if err := cluster2.waiter.ForConfiguration(ctx, cluster1.localClusterID); err != nil { + return err + } + + return nil +} + func (o *Options) newGatewayServerForgeOptions(kubeClient kubernetes.Interface, remoteClusterID liqov1beta1.ClusterID) *forge.GwServerOptions { return &forge.GwServerOptions{ KubeClient: kubeClient, diff --git a/pkg/liqoctl/peer/handler.go b/pkg/liqoctl/peer/handler.go index 00a13afc36..1bc279c991 100644 --- a/pkg/liqoctl/peer/handler.go +++ b/pkg/liqoctl/peer/handler.go @@ -135,10 +135,6 @@ func ensureNetworking(ctx context.Context, o *Options) error { DisableSharingKeys: false, } - if err := networkOptions.RunInit(ctx); err != nil { - return err - } - if err := networkOptions.RunConnect(ctx); err != nil { return err }