Skip to content
This repository has been archived by the owner on Oct 5, 2018. It is now read-only.

Propagate labels from CR to deployments, services and configmaps #18

Merged
merged 4 commits into from
Jul 7, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
- 'sed -i -e ''s/imagePullPolicy: Always/imagePullPolicy: Never/g'' deploy/operator.yaml'
- kubectl create -f deploy/rbac.yaml
- kubectl create -f deploy/crd.yaml
- kubectl create -f deploy/operator.yaml
- ./tests/wait-for-operator.sh
- CLUSTER_NAME=elastic1 ./tests/test-insecure-cdm.sh
- CLUSTER_NAME=elastic1 ./tests/test-secure-cdm.sh
- kubectl delete -f deploy/operator.yaml
Expand Down
2 changes: 2 additions & 0 deletions .travis/prepare-environment.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ set -x
set -o errexit
set -o nounset

sudo sysctl -w vm.max_map_count=262144

go get -u github.com/golang/dep/cmd/dep
dep ensure
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.9.0/bin/linux/amd64/kubectl
Expand Down
17 changes: 10 additions & 7 deletions pkg/k8shandler/configmaps.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,16 @@ func CreateOrUpdateConfigMaps(dpl *v1alpha1.Elasticsearch) (string, error) {

// TODO: take all vars from CRD
pathData := "- /elasticsearch/persistent/"
err := createOrUpdateConfigMap(configMapName, dpl.Namespace, dpl.Name, defaultKibanaIndexMode, pathData, false, dpl.Spec.Secure.Disabled, owner)
err := createOrUpdateConfigMap(configMapName, dpl.Namespace, dpl.Name, defaultKibanaIndexMode, pathData, false, dpl.Spec.Secure.Disabled, owner, dpl.Labels)
if err != nil {
return configMapName, fmt.Errorf("Failure creating ConfigMap %v", err)
}
return configMapName, nil
}

func createOrUpdateConfigMap(configMapName, namespace, clusterName, kibanaIndexMode, pathData string, allowClusterReader bool, insecureCluster bool, owner metav1.OwnerReference) error {
elasticsearchCM, err := createConfigMap(configMapName, namespace, clusterName, kibanaIndexMode, pathData, allowClusterReader, insecureCluster)
func createOrUpdateConfigMap(configMapName, namespace, clusterName, kibanaIndexMode, pathData string,
allowClusterReader bool, insecureCluster bool, owner metav1.OwnerReference, labels map[string]string) error {
elasticsearchCM, err := createConfigMap(configMapName, namespace, clusterName, kibanaIndexMode, pathData, allowClusterReader, insecureCluster, labels)
if err != nil {
return err
}
Expand All @@ -43,7 +44,7 @@ func createOrUpdateConfigMap(configMapName, namespace, clusterName, kibanaIndexM
return fmt.Errorf("Failure constructing Elasticsearch ConfigMap: %v", err)
} else if errors.IsAlreadyExists(err) {
// Get existing configMap to check if it is same as what we want
existingCM := configMap(configMapName, namespace)
existingCM := configMap(configMapName, namespace, labels)
err = sdk.Get(existingCM)
if err != nil {
return fmt.Errorf("Unable to get Elasticsearch cluster configMap: %v", err)
Expand All @@ -54,8 +55,9 @@ func createOrUpdateConfigMap(configMapName, namespace, clusterName, kibanaIndexM
return nil
}

func createConfigMap(configMapName string, namespace string, clusterName string, kibanaIndexMode string, pathData string, allowClusterReader bool, insecureCluster bool) (*v1.ConfigMap, error) {
cm := configMap(configMapName, namespace)
func createConfigMap(configMapName, namespace, clusterName, kibanaIndexMode, pathData string,
allowClusterReader bool, insecureCluster bool, labels map[string]string) (*v1.ConfigMap, error) {
cm := configMap(configMapName, namespace, labels)
cm.Data = map[string]string{}
buf := &bytes.Buffer{}
if err := renderEsYml(buf, allowClusterReader, kibanaIndexMode, pathData, insecureCluster); err != nil {
Expand All @@ -73,7 +75,7 @@ func createConfigMap(configMapName string, namespace string, clusterName string,
}

// configMap returns a v1.ConfigMap object
func configMap(configMapName string, namespace string) *v1.ConfigMap {
func configMap(configMapName string, namespace string, labels map[string]string) *v1.ConfigMap {
return &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
Expand All @@ -82,6 +84,7 @@ func configMap(configMapName string, namespace string) *v1.ConfigMap {
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
Namespace: namespace,
Labels: labels,
},
}
}
14 changes: 13 additions & 1 deletion pkg/k8shandler/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,15 @@ func (node *deploymentNode) isDifferent(cfg *desiredNodeState) (bool, error) {
}
}

// Check if labels are correct
for label, value := range cfg.Labels {
val, ok := node.resource.Labels[label]
if !ok || val != value {
logrus.Infof("Labels on deployment '%v' need updating..", node.resource.GetName())
return true, nil
}
}

// TODO: Check if the Variables are the desired ones

// Check that storage configuration is the same
Expand Down Expand Up @@ -91,7 +100,10 @@ func (node *deploymentNode) constructNodeResource(cfg *desiredNodeState, owner m
deployment.Spec = apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: cfg.getLabels(),
MatchLabels: cfg.getLabelSelector(),
},
Strategy: apps.DeploymentStrategy{
Type: "Recreate",
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Expand Down
16 changes: 14 additions & 2 deletions pkg/k8shandler/desirednodestate.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ type desiredNodeState struct {
ReplicaNum int32
ServiceAccountName string
ConfigMapName string
Labels map[string]string
}

type actualNodeState struct {
Expand All @@ -59,6 +60,7 @@ func constructNodeSpec(dpl *v1alpha1.Elasticsearch, esNode v1alpha1.Elasticsearc
ReplicaNum: replicaNum,
ServiceAccountName: serviceAccountName,
ConfigMapName: configMapName,
Labels: dpl.Labels,
}
deployName, err := constructDeployName(dpl.Name, esNode.Roles, nodeNum, replicaNum)
if err != nil {
Expand Down Expand Up @@ -139,9 +141,19 @@ func (cfg *desiredNodeState) isNodeClient() bool {
}

func (cfg *desiredNodeState) getLabels() map[string]string {
labels := cfg.Labels
if labels == nil {
labels = make(map[string]string)
}
labels["es-node-client"] = strconv.FormatBool(cfg.isNodeClient())
labels["es-node-data"] = strconv.FormatBool(cfg.isNodeData())
labels["es-node-master"] = strconv.FormatBool(cfg.isNodeMaster())
labels["cluster"] = cfg.ClusterName
return labels
}

func (cfg *desiredNodeState) getLabelSelector() map[string]string {
return map[string]string{
"component": fmt.Sprintf("elasticsearch-%s", cfg.ClusterName),
//"es-node-role": cfg.NodeType,
"es-node-client": strconv.FormatBool(cfg.isNodeClient()),
"es-node-data": strconv.FormatBool(cfg.isNodeData()),
"es-node-master": strconv.FormatBool(cfg.isNodeMaster()),
Expand Down
11 changes: 6 additions & 5 deletions pkg/k8shandler/services.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,20 @@ func CreateOrUpdateServices(dpl *v1alpha1.Elasticsearch) error {
elasticsearchRestSvcName := dpl.Name
owner := asOwner(dpl)

err := createOrUpdateService(elasticsearchClusterSvcName, dpl.Namespace, dpl.Name, 9300, selectorForES("es-node-master", dpl.Name), owner)
err := createOrUpdateService(elasticsearchClusterSvcName, dpl.Namespace, dpl.Name, 9300, selectorForES("es-node-master", dpl.Name), dpl.Labels, owner)
if err != nil {
return fmt.Errorf("Failure creating service %v", err)
}

err = createOrUpdateService(elasticsearchRestSvcName, dpl.Namespace, dpl.Name, 9200, selectorForES("es-node-client", dpl.Name), owner)
err = createOrUpdateService(elasticsearchRestSvcName, dpl.Namespace, dpl.Name, 9200, selectorForES("es-node-client", dpl.Name), dpl.Labels, owner)
if err != nil {
return fmt.Errorf("Failure creating service %v", err)
}
return nil
}

func createOrUpdateService(serviceName string, namespace string, clusterName string, port int32, selector map[string]string, owner metav1.OwnerReference) error {
elasticsearchSvc := createService(serviceName, namespace, clusterName, port, selector)
func createOrUpdateService(serviceName, namespace, clusterName string, port int32, selector, labels map[string]string, owner metav1.OwnerReference) error {
elasticsearchSvc := createService(serviceName, namespace, clusterName, port, selector, labels)
addOwnerRefToObject(elasticsearchSvc, owner)
err := sdk.Create(elasticsearchSvc)
if err != nil && !errors.IsAlreadyExists(err) {
Expand All @@ -49,8 +49,9 @@ func createOrUpdateService(serviceName string, namespace string, clusterName str
return nil
}

func createService(serviceName string, namespace string, clusterName string, port int32, selector map[string]string) *v1.Service {
func createService(serviceName, namespace, clusterName string, port int32, selector, labels map[string]string) *v1.Service {
svc := service(serviceName, namespace)
svc.Labels = labels
svc.Spec = v1.ServiceSpec{
Selector: selector,
Ports: []v1.ServicePort{
Expand Down
21 changes: 21 additions & 0 deletions tests/test-insecure-cdm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,4 +61,25 @@ fi
echo "ServiceAccount: OK"


echo "*** Test changing deployment by changing a label"
# Save name of the current pod, to know what to delete later
old_pod=$(kubectl get po | awk '/elastic1-.*/{print $1}')

# Add label and see that deployment is respawned
kubectl patch elasticsearch/elastic1 --type=merge --patch '{"metadata": {"labels": {"testlabel": "addedvalue" }}}'

# Old pod must be disposed
wait_pod_completion $old_pod

# new pod must be created
timeout 20m "./tests/wait-for-container.sh" elastic1-clientdatamaster

pod=$(kubectl get po -n $NAMESPACE -l testlabel=addedvalue -o name)

if [ -z "$pod" ]; then
echo "No pod found via label.."
exit 1
fi
echo "Pod successfully found via label"

kubectl delete -f deploy/cr.yaml
6 changes: 6 additions & 0 deletions tests/utils.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,9 @@ get_serviceaccount() {
kubectl -n $NAMESPACE get serviceaccount ${1} -o jsonpath='{.metadata.name}'
}

wait_pod_completion() {
while kubectl -n $NAMESPACE get po ${1} > /dev/null;
do
sleep 5
done
}
38 changes: 23 additions & 15 deletions tests/wait-for-container.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,26 @@ sleep 10s
while : ; do
checkIfEmpty=$(kubectl get po | awk '/elastic1-clientdatamaster-.*/{print $3}')
if [ -z "$checkIfEmpty" ]; then
echo "No pod found..."
exit 1
echo "No pod found... Actual pod status: $checkIfEmpty"
kubectl get all
echo "==== Operator logs ===="
pod=$(kubectl get po | awk '/elasticsearch-operator-.*/{print $1}')
kubectl logs -f $pod
exit 1
fi
if [ $checkIfEmpty = "Running" ]; then
echo "Elasticsearch started"
break
elif [ $checkIfEmpty = "ImagePullBackOff" -o $checkIfEmpty = "CrashLoopBackOff" ]; then
echo "Failed to deploy Elasticsearch"
pod=$(kubectl get po | awk '/elastic1-.*/{print $1}')
kubectl logs -f $pod
exit 1
echo "Failed to deploy Elasticsearch"
pod=$(kubectl get po | awk '/elastic1-.*/{print $1}')
kubectl logs -f $pod
exit 1
elif [ $checkIfEmpty = "Error" ]; then
echo "Failed to deploy Elasticsearch"
pod=$(kubectl get po | awk '/elastic1-.*/{print $1}')
kubectl logs -f $pod
exit 1
echo "Failed to deploy Elasticsearch"
pod=$(kubectl get po | awk '/elastic1-.*/{print $1}')
kubectl logs -f $pod
exit 1
else
echo "Waiting for Elasticsearch pod to spin up: ${checkIfEmpty}"
sleep 20s
Expand All @@ -29,16 +33,20 @@ done
while : ; do
checkIfEmpty=$(kubectl get po | awk '/elastic1-clientdatamaster-.*/{print $2}')
if [ -z "$checkIfEmpty" ]; then
echo "No pod found..."
exit 1
echo "No pod found... Actual pod status: $checkIfEmpty"
kubectl get all
echo "==== Operator logs ===="
pod=$(kubectl get po | awk '/elasticsearch-operator-.*/{print $1}')
kubectl logs -f $pod
exit 1
fi
if [ $checkIfEmpty = "1/1" ]; then
echo "Elasticsearch Deployed"
break
elif [ $checkIfEmpty = "ImagePullBackOff" -o $checkIfEmpty = "CrashLoopBackOff" ]; then
echo "Failed to deploy Elasticsearch"
pod=$(kubectl get po | awk '/elastic1-.*/{print $1}')
kubectl logs -f $pod
echo "Failed to deploy Elasticsearch"
pod=$(kubectl get po | awk '/elastic1-.*/{print $1}')
kubectl logs -f $pod
exit 1
else
echo "Waiting for Elasticsearch pod to become ready: ${checkIfEmpty}"
Expand Down
30 changes: 30 additions & 0 deletions tests/wait-for-operator.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/bin/sh

kubectl create -f deploy/operator.yaml

while : ; do
checkIfEmpty=$(kubectl get po | awk '/elasticsearch-operator-.*/{print $3}')
if [ -z "$checkIfEmpty" ]; then
echo "No pod found... Actual pod status: $checkIfEmpty"
kubectl get all
exit 1
fi
if [ $checkIfEmpty = "Running" ]; then
echo "Operator started successfully"
break
elif [ $checkIfEmpty = "ImagePullBackOff" -o $checkIfEmpty = "CrashLoopBackOff" ]; then
echo "Failed to deploy Elasticsearch operator"
pod=$(kubectl get po | awk '/elasticsearch-operator-.*/{print $1}')
kubectl logs -f $pod
exit 1
elif [ $checkIfEmpty = "Error" ]; then
echo "Failed to deploy Elasticsearch operator"
pod=$(kubectl get po | awk '/elasticsearch-operator-.*/{print $1}')
kubectl logs -f $pod
exit 1
else
echo "Waiting for Elasticsearch operator pod to spin up: ${checkIfEmpty}"
sleep 20s
fi
done