diff --git a/cmd/liqoctl/cmd/network.go b/cmd/liqoctl/cmd/network.go index 4d8f6ff59e..0c8d6e2ad4 100644 --- a/cmd/liqoctl/cmd/network.go +++ b/cmd/liqoctl/cmd/network.go @@ -31,10 +31,6 @@ import ( const liqoctlNetworkLongHelp = `Manage liqo networking.` -const liqoctlNetworkInitLongHelp = `Initialize the liqo networking between two clusters. - -It generates all network configurations required to connect the two clusters.` - const liqoctlNetworkResetLongHelp = `Tear down all liqo networking between two clusters. It disconnects the two clusters and remove network configurations generated with the *network init* command.` @@ -44,9 +40,10 @@ const liqoctlNetworConnectLongHelp = `Connect two clusters using liqo networking This command creates the Gateways to connect the two clusters. Run this command after inizialiting the network using the *network init* command.` -const liqoctlNetworkDisconnectLongHelp = `Disconnect two clusters. +const liqoctlNetworkDisconnectLongHelp = `Disconnect two clusters keeping the network configuration. -It deletes the Gateways, but keeps the network configurations generated with the *network init* command.` +It deletes the Gateways, but keeps the network configurations generated with the *network init* command. +Useful when a user wants to disconnect the clusters keeping the same IP mapping.` func newNetworkCommand(ctx context.Context, f *factory.Factory) *cobra.Command { options := network.NewOptions(f) @@ -85,7 +82,6 @@ func newNetworkCommand(ctx context.Context, f *factory.Factory) *cobra.Command { options.LocalFactory.Printer.CheckErr(cmd.RegisterFlagCompletionFunc("remote-liqo-namespace", completion.Namespaces(ctx, options.RemoteFactory, completion.NoLimit))) - cmd.AddCommand(newNetworkInitCommand(ctx, options)) cmd.AddCommand(newNetworkResetCommand(ctx, options)) cmd.AddCommand(newNetworkConnectCommand(ctx, options)) cmd.AddCommand(newNetworkDisconnectCommand(ctx, options)) @@ -93,26 +89,6 @@ func newNetworkCommand(ctx context.Context, f *factory.Factory) *cobra.Command { return cmd } -func newNetworkInitCommand(ctx context.Context, options *network.Options) *cobra.Command { - cmd := &cobra.Command{ - Use: "init", - Short: "Initialize the liqo networking between two clusters", - Long: WithTemplate(liqoctlNetworkInitLongHelp), - Args: cobra.NoArgs, - - Run: func(_ *cobra.Command, _ []string) { - err := options.RunInit(ctx) - if err != nil { - options.LocalFactory.Printer.CheckErr( - fmt.Errorf("`network init` failed (error: %w). Issue `network reset` to cleanup the environment", err)) - } - output.ExitOnErr(err) - }, - } - - return cmd -} - func newNetworkResetCommand(ctx context.Context, options *network.Options) *cobra.Command { cmd := &cobra.Command{ Use: "reset", @@ -191,7 +167,7 @@ func newNetworkConnectCommand(ctx context.Context, options *network.Options) *co func newNetworkDisconnectCommand(ctx context.Context, options *network.Options) *cobra.Command { cmd := &cobra.Command{ Use: "disconnect", - Short: "Disconnect two clusters", + Short: "Disconnect two clusters keeping the network configuration", Long: WithTemplate(liqoctlNetworkDisconnectLongHelp), Args: cobra.NoArgs, diff --git a/docs/advanced/nat.md b/docs/advanced/nat.md index f651eaeea8..590099fc18 100644 --- a/docs/advanced/nat.md +++ b/docs/advanced/nat.md @@ -61,7 +61,7 @@ liqoctl peer \ ``` The command above sets up a complete peering between cluster 1 and cluster 2. -**To configure only the network**, you can pass the same parameters to the `liqoctl network connect` command, once network have been initialized with `liqoctl init`: +**To configure only the network**, you can pass the same parameters to the `liqoctl network connect` command: ```bash liqoctl network connect \ diff --git a/docs/advanced/peering/inter-cluster-network.md b/docs/advanced/peering/inter-cluster-network.md index 2a6ed947e7..cffb1b77b8 100644 --- a/docs/advanced/peering/inter-cluster-network.md +++ b/docs/advanced/peering/inter-cluster-network.md @@ -28,74 +28,62 @@ The unpeer process will automatically remove the Liqo Gateway from the tenant na When you have access to both clusters, you can configure the inter-cluster network connectivity via the `liqoctl network` command. -Note that when you use the `liqoctl network` command, the argument specifying the remote kubeconfig/context corresponds to the cluster that acts as gateway server for the Wireguard tunnel +Note that when you use the `liqoctl network` command, the argument specifying the remote kubeconfig/context corresponds to the cluster that acts as gateway server for the Wireguard tunnel. -The first step to configure networking is initializing the network configuration, allowing the clusters to exchange the network configurations to configure the IP addresses remapping: +To establish a connection between two clusters, you can run the following command: ```bash -liqoctl network init \ +liqoctl network connect \ --kubeconfig $CLUSTER_1_KUBECONFIG_PATH \ --remote-kubeconfig $CLUSTER_2_KUBECONFIG_PATH \ + --server-service-type NodePort \ --wait ``` -You should see the following output: +You should see an output like the following: ```text -INFO (local) Cluster identity correctly retrieved -INFO (remote) Cluster identity correctly retrieved -INFO (local) Network configuration correctly retrieved -INFO (remote) Network configuration correctly retrieved -INFO (local) Network configuration correctly set up -INFO (remote) Network configuration correctly set up -INFO (local) Configuration applied successfully -INFO (remote) Configuration applied successfully -``` - -This command will share and configure the required resources between the two clusters. -You will find in both your clusters a new Configuration in the tenant namespace. + INFO (local) Network configuration correctly retrieved + INFO (remote) Network configuration correctly retrieved + INFO (local) Network configuration correctly set up + INFO (remote) Network configuration correctly set up + INFO (local) Configuration applied successfully + INFO (remote) Configuration applied successfully + INFO (remote) Gateway server template "wireguard-server/liqo" correctly checked + INFO (local) Gateway client template "wireguard-client/liqo" correctly checked + INFO (local) Network correctly initialized + INFO (remote) Network correctly initialized + INFO (remote) Gateway server correctly set up + INFO (remote) Gateway pod gw-cl01 is ready + INFO (remote) Gateway server Service created successfully + INFO (local) Gateway client correctly set up + INFO (local) Gateway pod gw-cl02 is ready + INFO (remote) Gateway server Secret created successfully + INFO (local) Public key correctly created + INFO (local) Gateway client Secret created successfully + INFO (remote) Public key correctly created + INFO (remote) Connection created successfully + INFO (local) Connection created successfully + INFO (local) Connection is established + INFO (remote) Connection is established +``` + +If the command was successful you will be able to see a new connection resource with status `Connected`: ```bash -kubectl get configurations.networking.liqo.io -A - -NAMESPACE NAME DESIRED POD CIDR REMAPPED POD CIDR AGE -liqo-tenant-dry-paper-5d16c0 dry-paper 10.243.0.0/16 10.71.0.0/16 4m48s +kubectl get connections.networking.liqo.io -A ``` -Now, you can establish the connection between the two clusters: - -```bash -liqoctl network connect \ - --kubeconfig $CLUSTER_1_KUBECONFIG_PATH \ - --remote-kubeconfig $CLUSTER_2_KUBECONFIG_PATH \ - --server-service-type NodePort \ - --wait +```text +NAMESPACE NAME TYPE STATUS AGE +liqo-tenant-cl01 cl01 Server Connected 51s ``` -You should see the following output: +The command above applied the following changes to the clusters: -```text -INFO (local) Cluster identity correctly retrieved -INFO (remote) Cluster identity correctly retrieved -INFO (local) Network correctly initialized -INFO (remote) Network correctly initialized -INFO (remote) Gateway server correctly set up -INFO (remote) Gateway pod gw-crimson-rain is ready -INFO (remote) Gateway server Service created successfully -INFO (local) Gateway client correctly set up -INFO (local) Gateway pod gw-damp-feather is ready -INFO (remote) Gateway server Secret created successfully -INFO (local) Public key correctly created -INFO (local) Gateway client Secret created successfully -INFO (remote) Public key correctly created -INFO (remote) Connection created successfully -INFO (local) Connection created successfully -INFO (local) Connection is established -INFO (remote) Connection is established -``` - -This command will deploy a Liqo Gateway for each cluster in the tenant namespace and establish the connection between them. -In the first cluster, the Liqo Gateway will be configured as a client, while in the second cluster, it will be configured as a server. +* Exchanged the network configuration to configure the IPs remapping, which allows to reach pods and services in the other cluster +* it deployed a Liqo Gateway for each cluster in the tenant namespace and established the connection between them. + By default, in the first cluster, the Liqo Gateway is configured as a client, while in the second cluster, is configured as a server. ```{admonition} Note You can see further configuration options with `liqoctl network connect --help`. @@ -104,47 +92,75 @@ For instance, in the previous command we have used the `--server-service-type No Alternatively, you can use the `--server-service-type LoadBalancer` option to expose the Liqo Gateway service as a LoadBalancer service (if supported by your cloud provider). ``` -In cluster 1 you will find the following resources: +In **cluster 1**, which, in this case, **hosts the client gateway**, you will find the following resources: -```bash -kubectl get gatewayclients.networking.liqo.io -A -``` +* A `Configuration` resource describing how the POD cidr of the other cluster is remapped in the current cluster: -```text -NAMESPACE NAME TEMPLATE NAME IP PORT AGE -liqo-tenant-cl02 cl02 wireguard-client 172.19.0.8 32009 28s -``` + ```bash + kubectl get configurations.networking.liqo.io -A + ``` -```bash -kubectl get connections.networking.liqo.io -A -``` + ```text + NAMESPACE NAME DESIRED POD CIDR REMAPPED POD CIDR AGE + liqo-tenant-cl02 cl02 10.243.0.0/16 10.71.0.0/16 4m48s + ``` -```text -NAMESPACE NAME TYPE STATUS AGE -liqo-tenant-cl02 gw-cl02 Client Connected 76s -``` +* A `GatewayClient` resource, which describes the configuration of the gateway acting as a **client** for establishing the tunnel between the two clusters: -In cluster 2 you will find the following resources: + ```bash + kubectl get gatewayclients.networking.liqo.io -A + ``` -```bash -kubectl get gatewayservers.networking.liqo.io -A -``` + ```text + NAMESPACE NAME TEMPLATE NAME IP PORT AGE + liqo-tenant-cl02 cl02 wireguard-client 172.19.0.8 32009 28s + ``` -```text -NAMESPACE NAME TEMPLATE NAME IP PORT AGE -liqo-tenant-cl01 cl01 wireguard-server 172.19.0.8 32009 69s -``` +* A `Connection` resource, describing the status of the tunnel with the peer cluster: -```bash -kubectl get connections.networking.liqo.io -A -``` + ```bash + kubectl get connections.networking.liqo.io -A + ``` -```text -NAMESPACE NAME TYPE STATUS AGE -liqo-tenant-cl01 cl01 Server Connected 51s -``` + ```text + NAMESPACE NAME TYPE STATUS AGE + liqo-tenant-cl02 gw-cl02 Client Connected 76s + ``` + +In **cluster 2**, which, in this case, **hosts the server gateway**, you will find the following resources: + +* A `Configuration` resource describing how the POD cidr of the other cluster is remapped in the current cluster: + + ```bash + kubectl get configurations.networking.liqo.io -A + ``` + + ```text + NAMESPACE NAME DESIRED POD CIDR REMAPPED POD CIDR AGE + liqo-tenant-cl01 cl01 10.243.0.0/16 10.71.0.0/16 4m48s + ``` + +* A `GatewayServer` resource, which describes the configuration of the gateway acting as a **server** for establishing the tunnel between the two clusters: + + ```bash + kubectl get gatewayservers.networking.liqo.io -A + ``` + + ```text + NAMESPACE NAME TEMPLATE NAME IP PORT AGE + liqo-tenant-cl01 cl01 wireguard-server 172.19.0.8 32009 69s + ``` + +* A `Connection` resource, describing the status of the tunnel with the peer cluster: + + ```bash + kubectl get connections.networking.liqo.io -A + ``` -You can check the status of the connection to see if it is working correctly. + ```text + NAMESPACE NAME TYPE STATUS AGE + liqo-tenant-cl01 cl01 Server Connected 51s + ``` ### Tear down diff --git a/pkg/liqoctl/network/cluster.go b/pkg/liqoctl/network/cluster.go index 186228ef7d..a92b693ad5 100644 --- a/pkg/liqoctl/network/cluster.go +++ b/pkg/liqoctl/network/cluster.go @@ -350,12 +350,14 @@ func (c *Cluster) CheckNetworkInitialized(ctx context.Context, remoteClusterID l s.Fail(fmt.Sprintf("An error occurred while checking network Configuration: %v", output.PrettyErr(err))) return err case apierrors.IsNotFound(err): - s.Fail(fmt.Sprintf("Network Configuration not found. Initialize the network first with `liqoctl network init`: %v", output.PrettyErr(err))) + s.Fail(fmt.Sprintf("Network Configuration not found. Retry to issue `liqoctl network connect`. If the issue persist, "+ + "you can try to reset the network with `liqoctl network reset`: %v", output.PrettyErr(err))) return err } if !networkingutils.IsConfigurationStatusSet(conf.Status) { - err := fmt.Errorf("network Configuration status is not set yet. Retry later or initialize the network again with `liqoctl network init`") + err := fmt.Errorf("network Configuration status is not set yet. Retry later. If the issue persist, " + + "you can try to reset the network with `liqoctl network reset`") s.Fail(err) return err } diff --git a/pkg/liqoctl/network/handler.go b/pkg/liqoctl/network/handler.go index 0377c41db7..ee208566b0 100644 --- a/pkg/liqoctl/network/handler.go +++ b/pkg/liqoctl/network/handler.go @@ -71,58 +71,6 @@ func NewOptions(localFactory *factory.Factory) *Options { } } -// RunInit initializes the liqo networking between two clusters. -func (o *Options) RunInit(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, o.Timeout) - defer cancel() - - // Create and initialize cluster 1. - cluster1, err := NewCluster(ctx, o.LocalFactory, o.RemoteFactory, true) - if err != nil { - return err - } - - // Create and initialize cluster 2. - cluster2, err := NewCluster(ctx, o.RemoteFactory, o.LocalFactory, true) - if err != nil { - return err - } - - // Forges the local Configuration of cluster 1 to be applied on remote clusters. - if err := cluster1.SetLocalConfiguration(ctx); err != nil { - return err - } - - // Forges the local Configuration of cluster 2 to be applied on remote clusters. - if err := cluster2.SetLocalConfiguration(ctx); err != nil { - return err - } - - // Setup Configurations in cluster 1. - if err := cluster1.SetupConfiguration(ctx, cluster2.networkConfiguration); err != nil { - return err - } - - // Setup Configurations in cluster 2. - if err := cluster2.SetupConfiguration(ctx, cluster1.networkConfiguration); err != nil { - return err - } - - if o.Wait { - // Wait for cluster 1 to be ready. - if err := cluster1.waiter.ForConfiguration(ctx, cluster2.localClusterID); err != nil { - return err - } - - // Wait for cluster 2 to be ready. - if err := cluster2.waiter.ForConfiguration(ctx, cluster1.localClusterID); err != nil { - return err - } - } - - return nil -} - // RunReset reset the liqo networking between two clusters. // If the clusters are still connected through the gateways, it deletes them before removing network Configurations. func (o *Options) RunReset(ctx context.Context) error { @@ -179,7 +127,12 @@ func (o *Options) RunConnect(ctx context.Context) error { if err != nil { return err } + // Exchange network configurations between the clusters + if err := o.initNetworkConfigs(ctx, cluster1, cluster2); err != nil { + return err + } + // Connect the two clusters if !o.SkipValidation { // Check if the Templates exists and is valid on cluster 2 if err := cluster2.CheckTemplateGwServer(ctx, o); err != nil { @@ -357,6 +310,40 @@ func (o *Options) RunDisconnect(ctx context.Context, cluster1, cluster2 *Cluster return cluster2.DeleteGatewayServer(ctx, cluster1.localClusterID) } +func (o *Options) initNetworkConfigs(ctx context.Context, cluster1, cluster2 *Cluster) error { + // Forges the local Configuration of cluster 1 to be applied on remote clusters. + if err := cluster1.SetLocalConfiguration(ctx); err != nil { + return err + } + + // Forges the local Configuration of cluster 2 to be applied on remote clusters. + if err := cluster2.SetLocalConfiguration(ctx); err != nil { + return err + } + + // Setup Configurations in cluster 1. + if err := cluster1.SetupConfiguration(ctx, cluster2.networkConfiguration); err != nil { + return err + } + + // Setup Configurations in cluster 2. + if err := cluster2.SetupConfiguration(ctx, cluster1.networkConfiguration); err != nil { + return err + } + + // Wait for cluster 1 to be ready. + if err := cluster1.waiter.ForConfiguration(ctx, cluster2.localClusterID); err != nil { + return err + } + + // Wait for cluster 2 to be ready. + if err := cluster2.waiter.ForConfiguration(ctx, cluster1.localClusterID); err != nil { + return err + } + + return nil +} + func (o *Options) newGatewayServerForgeOptions(kubeClient kubernetes.Interface, remoteClusterID liqov1beta1.ClusterID) *forge.GwServerOptions { return &forge.GwServerOptions{ KubeClient: kubeClient, diff --git a/pkg/liqoctl/peer/handler.go b/pkg/liqoctl/peer/handler.go index 00a13afc36..b7c2594398 100644 --- a/pkg/liqoctl/peer/handler.go +++ b/pkg/liqoctl/peer/handler.go @@ -78,7 +78,7 @@ func (o *Options) RunPeer(ctx context.Context) error { // To ease the experience for most users, we disable the namespace and remote-namespace flags // so that resources are created according to the default Liqo logic. - // Advanced users can use the individual commands (e.g., liqoctl init, liqoctl connect, etc..) to + // Advanced users can use the individual commands (e.g., liqoctl network connect, liqoctl network disconnect, etc..) to // customize the namespaces according to their needs (e.g., networking resources in a specific namespace). o.LocalFactory.Namespace = "" o.RemoteFactory.Namespace = "" @@ -135,10 +135,6 @@ func ensureNetworking(ctx context.Context, o *Options) error { DisableSharingKeys: false, } - if err := networkOptions.RunInit(ctx); err != nil { - return err - } - if err := networkOptions.RunConnect(ctx); err != nil { return err }