diff --git a/.github/kind-config.yaml b/.github/kind-config.yaml index 7307f45f..c04e6fea 100644 --- a/.github/kind-config.yaml +++ b/.github/kind-config.yaml @@ -6,4 +6,4 @@ nodes: - containerPort: 30949 hostPort: 80 - containerPort: 30950 - hostPort: 443 \ No newline at end of file + hostPort: 443 diff --git a/.github/workflows/test-kubectl-plugin.yml b/.github/workflows/test-kubectl-plugin.yml index 7fce5ed0..a45e03e1 100644 --- a/.github/workflows/test-kubectl-plugin.yml +++ b/.github/workflows/test-kubectl-plugin.yml @@ -52,8 +52,8 @@ jobs: make generate manifests install - name: Install Istio run: | - curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.16.1 TARGET_ARCH=x86_64 sh - - export PATH="$PATH:$PWD/istio-1.16.1/bin" + curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.20.0 TARGET_ARCH=x86_64 sh - + export PATH="$PATH:$PWD/istio-1.20.0/bin" kubectl create namespace istio-system istioctl operator init @@ -129,6 +129,7 @@ jobs: run: | CLUSTER_IP=$(kubectl -n istio-system get svc istio-ingressgateway -o json | jq -r .spec.clusterIP) echo "CLUSTER_IP=${CLUSTER_IP}" + kubectl apply -f - <-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. If the AnyVolumeDataSource feature + gate is enabled, this field will always have the + same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + local object from a non-empty API group (non core + object) or a PersistentVolumeClaim object. When + this field is specified, volume binding will only + succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. + This field will replace the functionality of the + DataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. There are two important differences + between DataSource and DataSourceRef: * While + DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as + well as PersistentVolumeClaim objects. * While + DataSource ignores disallowed values (dropping + them), DataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + nullable: true + type: array required: - couchdb - discovery diff --git a/controllers/ca/ca_controller.go b/controllers/ca/ca_controller.go index 140b7409..2c90e72a 100644 --- a/controllers/ca/ca_controller.go +++ b/controllers/ca/ca_controller.go @@ -13,12 +13,13 @@ import ( "encoding/json" "encoding/pem" "fmt" - "github.com/go-logr/logr" "github.com/kfsoftware/hlf-operator/controllers/hlfmetrics" "github.com/kfsoftware/hlf-operator/pkg/status" "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/release" "k8s.io/kubernetes/pkg/api/v1/pod" + "sigs.k8s.io/controller-runtime/pkg/controller" "sort" "math/big" @@ -51,11 +52,14 @@ import ( // FabricCAReconciler reconciles a FabricCA object type FabricCAReconciler struct { client.Client - ChartPath string - Log logr.Logger - Scheme *runtime.Scheme - Config *rest.Config - ClientSet *kubernetes.Clientset + ChartPath string + Log logr.Logger + Scheme *runtime.Scheme + Config *rest.Config + ClientSet *kubernetes.Clientset + Wait bool + Timeout time.Duration + MaxHistory int } func parseECDSAPrivateKey(contents []byte) (*ecdsa.PrivateKey, error) { @@ -118,6 +122,24 @@ func getExistingSignCrypto(client *kubernetes.Clientset, chartName string, names return crt, key, nil } +func getAlreadyExistingCrypto(client *kubernetes.Clientset, secretName string, namespace string) (*x509.Certificate, *ecdsa.PrivateKey, error) { + secret, err := client.CoreV1().Secrets(namespace).Get(context.Background(), secretName, v1.GetOptions{}) + if err != nil { + return nil, nil, err + } + tlsKeyData := secret.Data["keyfile"] + tlsCrtData := secret.Data["certfile"] + key, err := parseECDSAPrivateKey(tlsKeyData) + if err != nil { + return nil, nil, err + } + crt, err := parseX509Certificate(tlsCrtData) + if err != nil { + return nil, nil, err + } + return crt, key, nil +} + func getExistingSignTLSCrypto(client *kubernetes.Clientset, chartName string, namespace string) (*x509.Certificate, *ecdsa.PrivateKey, error) { secretName := fmt.Sprintf("%s--msp-tls-cryptomaterial", chartName) @@ -467,10 +489,15 @@ func GetConfig(conf *hlfv1alpha1.FabricCA, client *kubernetes.Clientset, chartNa } } } - + var caRef *SecretRef signCert, signKey, err := getExistingSignCrypto(client, chartName, namespace) if err != nil { - if conf.Spec.CA.CA != nil && conf.Spec.CA.CA.Key != "" && conf.Spec.CA.CA.Cert != "" { + if conf.Spec.CA.CA != nil && conf.Spec.CA.CA.SecretRef != nil && conf.Spec.CA.CA.SecretRef.Name != "" { + caRef = &SecretRef{ + SecretName: conf.Spec.CA.CA.SecretRef.Name, + } + err = nil + } else if conf.Spec.CA.CA != nil && conf.Spec.CA.CA.Key != "" && conf.Spec.CA.CA.Cert != "" { signCert, signKey, err = parseCrypto(conf.Spec.CA.CA.Key, conf.Spec.CA.CA.Cert) } else { signCert, signKey, err = CreateDefaultCA(spec.CA) @@ -479,9 +506,15 @@ func GetConfig(conf *hlfv1alpha1.FabricCA, client *kubernetes.Clientset, chartNa return nil, err } } + var caTLSSignRef *SecretRef caTLSSignCert, caTLSSignKey, err := getExistingSignTLSCrypto(client, chartName, namespace) if err != nil { - if conf.Spec.TLSCA.CA != nil && conf.Spec.TLSCA.CA.Key != "" && conf.Spec.TLSCA.CA.Cert != "" { + if conf.Spec.TLSCA.CA != nil && conf.Spec.TLSCA.CA.SecretRef != nil && conf.Spec.TLSCA.CA.SecretRef.Name != "" { + caTLSSignRef = &SecretRef{ + SecretName: conf.Spec.TLSCA.CA.SecretRef.Name, + } + err = nil + } else if conf.Spec.TLSCA.CA != nil && conf.Spec.TLSCA.CA.Key != "" && conf.Spec.TLSCA.CA.Cert != "" { caTLSSignCert, caTLSSignKey, err = parseCrypto(conf.Spec.TLSCA.CA.Key, conf.Spec.TLSCA.CA.Cert) } else { caTLSSignCert, caTLSSignKey, err = CreateDefaultCA(spec.TLSCA) @@ -502,32 +535,42 @@ func GetConfig(conf *hlfv1alpha1.FabricCA, client *kubernetes.Clientset, chartNa Type: "PRIVATE KEY", Bytes: tlsEncodedPK, }) - - signCRTEncoded := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: signCert.Raw, - }) - signEncodedPK, err := x509.MarshalPKCS8PrivateKey(signKey) - if err != nil { - return nil, err + var signCRTEncoded []byte + if signCert != nil { + signCRTEncoded = pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: signCert.Raw, + }) } - signPEMEncodedPK := pem.EncodeToMemory(&pem.Block{ - Type: "PRIVATE KEY", - Bytes: signEncodedPK, - }) - - caTLSSignCRTEncoded := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: caTLSSignCert.Raw, - }) - caTLSSignEncodedPK, err := x509.MarshalPKCS8PrivateKey(caTLSSignKey) - if err != nil { - return nil, err + var signPEMEncodedPK []byte + if signKey != nil { + signEncodedPK, err := x509.MarshalPKCS8PrivateKey(signKey) + if err != nil { + return nil, err + } + signPEMEncodedPK = pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: signEncodedPK, + }) + } + var caTLSSignCRTEncoded []byte + if caTLSSignCert != nil { + caTLSSignCRTEncoded = pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: caTLSSignCert.Raw, + }) + } + var caTLSSignPEMEncodedPK []byte + if caTLSSignKey != nil { + caTLSSignEncodedPK, err := x509.MarshalPKCS8PrivateKey(caTLSSignKey) + if err != nil { + return nil, err + } + caTLSSignPEMEncodedPK = pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: caTLSSignEncodedPK, + }) } - caTLSSignPEMEncodedPK := pem.EncodeToMemory(&pem.Block{ - Type: "PRIVATE KEY", - Bytes: caTLSSignEncodedPK, - }) istioPort := 443 if spec.Istio != nil && spec.Istio.Port != 0 { istioPort = spec.Istio.Port @@ -547,6 +590,8 @@ func GetConfig(conf *hlfv1alpha1.FabricCA, client *kubernetes.Clientset, chartNa gatewayApiNamespace = spec.GatewayApi.GatewayNamespace } msp := Msp{ + CARef: caRef, + TLSCARef: caTLSSignRef, Keyfile: string(signPEMEncodedPK), Certfile: string(signCRTEncoded), Chainfile: "", @@ -581,7 +626,26 @@ func GetConfig(conf *hlfv1alpha1.FabricCA, client *kubernetes.Clientset, chartNa } } + traefik := Traefik{} + if spec.Traefik != nil { + var middlewares []TraefikMiddleware + if spec.Traefik.Middlewares != nil { + for _, middleware := range spec.Traefik.Middlewares { + middlewares = append(middlewares, TraefikMiddleware{ + Name: middleware.Name, + Namespace: middleware.Namespace, + }) + } + } + traefik = Traefik{ + Entrypoints: spec.Traefik.Entrypoints, + Middlewares: middlewares, + Hosts: spec.Traefik.Hosts, + } + } var c = FabricCAChart{ + PodLabels: spec.PodLabels, + PodAnnotations: spec.PodAnnotations, ImagePullSecrets: spec.ImagePullSecrets, EnvVars: spec.Env, FullNameOverride: conf.Name, @@ -605,6 +669,7 @@ func GetConfig(conf *hlfv1alpha1.FabricCA, client *kubernetes.Clientset, chartNa Type: string(spec.Service.ServiceType), Port: 7054, }, + Traefik: traefik, Persistence: Persistence{ Enabled: true, Annotations: map[string]string{}, @@ -724,9 +789,17 @@ func GetCAState(clientSet *kubernetes.Clientset, ca *hlfv1alpha1.FabricCA, relea releaseName, ns, ) - signCrt, _, err := getExistingSignCrypto(clientSet, releaseName, ns) - if err != nil { - return nil, err + var signCrt *x509.Certificate + if ca.Spec.CA.CA != nil && ca.Spec.CA.CA.SecretRef != nil && ca.Spec.CA.CA.SecretRef.Name != "" { + signCrt, _, err = getAlreadyExistingCrypto(clientSet, ca.Spec.CA.CA.SecretRef.Name, ns) + if err != nil { + return nil, err + } + } else { + signCrt, _, err = getExistingSignCrypto(clientSet, releaseName, ns) + if err != nil { + return nil, err + } } r.CACert = string(utils.EncodeX509Certificate(signCrt)) hlfmetrics.UpdateCertificateExpiry( @@ -736,9 +809,17 @@ func GetCAState(clientSet *kubernetes.Clientset, ca *hlfv1alpha1.FabricCA, relea releaseName, ns, ) - tlsCACrt, _, err := getExistingSignTLSCrypto(clientSet, releaseName, ns) - if err != nil { - return nil, err + var tlsCACrt *x509.Certificate + if ca.Spec.TLSCA.CA != nil && ca.Spec.TLSCA.CA.SecretRef != nil && ca.Spec.TLSCA.CA.SecretRef.Name != "" { + tlsCACrt, _, err = getAlreadyExistingCrypto(clientSet, ca.Spec.TLSCA.CA.SecretRef.Name, ns) + if err != nil { + return nil, err + } + } else { + tlsCACrt, _, err = getExistingSignTLSCrypto(clientSet, releaseName, ns) + if err != nil { + return nil, err + } } r.TLSCACert = string(utils.EncodeX509Certificate(tlsCACrt)) hlfmetrics.UpdateCertificateExpiry( @@ -765,6 +846,8 @@ func (r *FabricCAReconciler) finalizeCA(reqLogger logr.Logger, m *hlfv1alpha1.Fa releaseName := m.Name reqLogger.Info("Successfully finalized ca") cmd := action.NewUninstall(cfg) + cmd.Wait = r.Wait + cmd.Timeout = r.Timeout resp, err := cmd.Run(releaseName) if err != nil { if strings.Compare("Release not loaded", err.Error()) != 0 { @@ -832,13 +915,22 @@ func Reconcile( cmdStatus := action.NewStatus(cfg) exists := true - _, err = cmdStatus.Run(releaseName) + helmStatus, err := cmdStatus.Run(releaseName) if err != nil { if errors.Is(err, driver.ErrReleaseNotFound) { // it doesn't exists exists = false } else { - // it doesnt exist + // it doesn't exist + return ctrl.Result{}, err + } + } + if exists && helmStatus.Info.Status == release.StatusPendingUpgrade { + rollbackStatus := action.NewRollback(cfg) + rollbackStatus.Version = helmStatus.Version - 1 + err = rollbackStatus.Run(releaseName) + if err != nil { + // it doesn't exist return ctrl.Result{}, err } } @@ -850,6 +942,7 @@ func Reconcile( if exists { // update + log.Debugf("Release %s exists, updating", releaseName) s, err := GetCAState(r.ClientSet, hlf, releaseName, ns) if err != nil { return ctrl.Result{}, err @@ -890,8 +983,9 @@ func Reconcile( return ctrl.Result{}, err } cmd := action.NewUpgrade(cfg) - cmd.MaxHistory = 5 - cmd.Timeout = 5 * time.Minute + cmd.Timeout = r.Timeout + cmd.Wait = r.Wait + cmd.MaxHistory = r.MaxHistory settings := cli.New() chartPath, err := cmd.LocateChart(r.ChartPath, settings) ch, err := loader.Load(chartPath) @@ -930,6 +1024,8 @@ func Reconcile( return ctrl.Result{}, err } cmd.ReleaseName = name + cmd.Wait = r.Wait + cmd.Timeout = r.Timeout ch, err := loader.Load(chart) if err != nil { return ctrl.Result{}, err @@ -1035,9 +1131,12 @@ func (r *FabricCAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } -func (r *FabricCAReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *FabricCAReconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles int) error { return ctrl.NewControllerManagedBy(mgr). For(&hlfv1alpha1.FabricCA{}). Owns(&appsv1.Deployment{}). + WithOptions(controller.Options{ + MaxConcurrentReconciles: maxConcurrentReconciles, + }). Complete(r) } diff --git a/controllers/ca/types.go b/controllers/ca/types.go index 045c421f..72997421 100644 --- a/controllers/ca/types.go +++ b/controllers/ca/types.go @@ -3,7 +3,10 @@ package ca import corev1 "k8s.io/api/core/v1" type FabricCAChart struct { + PodLabels map[string]string `json:"podLabels"` + PodAnnotations map[string]string `json:"podAnnotations"` Istio Istio `json:"istio"` + Traefik Traefik `json:"traefik"` GatewayApi GatewayApi `json:"gatewayApi"` FullNameOverride string `json:"fullnameOverride"` Image Image `json:"image"` @@ -36,6 +39,15 @@ type ServiceMonitor struct { MetricRelabelings []interface{} `json:"metricRelabelings"` SampleLimit int `json:"sampleLimit"` } +type TraefikMiddleware struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} +type Traefik struct { + Entrypoints []string `json:"entryPoints"` + Middlewares []TraefikMiddleware `json:"middlewares"` + Hosts []string `json:"hosts"` +} type Istio struct { Port int `json:"port"` Hosts []string `json:"hosts"` @@ -179,7 +191,13 @@ type Persistence struct { AccessMode string `json:"accessMode"` Size string `json:"size"` } +type SecretRef struct { + SecretName string `json:"secretName"` +} type Msp struct { + CARef *SecretRef `json:"caRef"` + TLSCARef *SecretRef `json:"tlsCARef"` + Keyfile string `json:"keyfile"` Certfile string `json:"certfile"` Chainfile string `json:"chainfile"` diff --git a/controllers/certs/provision_certs.go b/controllers/certs/provision_certs.go index 99099c8b..2d2b96a4 100644 --- a/controllers/certs/provision_certs.go +++ b/controllers/certs/provision_certs.go @@ -365,82 +365,4 @@ func GetClient(ca FabricCAParams) (*lib.Client, error) { return nil, err } return client, err - //m1 := &mockIsSecurityEnabled{} - //m2 := &mockSecurityAlgorithm{} - //m3 := &mockSecurityLevel{} - //m4 := &mockSecurityProvider{} - //m5 := &mockSoftVerify{} - //m6 := &mockSecurityProviderLibPath{} - //m7 := &mockSecurityProviderPin{} - //m8 := &mockSecurityProviderLabel{} - //m9 := &mockKeyStorePath{ - // Path: keyStorePath, - //} - //mspID := ca.MSPID - //fabricConfig, err := getFabricConfig(ca) - //if err != nil { - // return nil, nil, nil, nil, err - //} - //configYaml, err := yaml.Marshal(fabricConfig) - //if err != nil { - // return nil, nil, nil, nil, err - //} - //configBackend, err := config.FromRaw(configYaml, "yaml")() - //if err != nil { - // return nil, nil, nil, nil, err - //} - //cryptSuiteConfig2 := cryptosuite.ConfigFromBackend(configBackend...) - //cryptSuiteConfigOption, err := cryptosuite.BuildCryptoSuiteConfigFromOptions( - // m1, m2, m3, m4, m5, m6, m7, m8, m9, - //) - //if err != nil { - // return nil, nil, nil, nil, err - //} - //cryptSuiteConfig1, ok := cryptSuiteConfigOption.(*cryptosuite.CryptoConfigOptions) - //if !ok { - // return nil, nil, nil, nil, errors.New(fmt.Sprintf("BuildCryptoSuiteConfigFromOptions did not return an Options instance %T", cryptSuiteConfigOption)) - //} - //cryptSuiteConfig := cryptosuite.UpdateMissingOptsWithDefaultConfig(cryptSuiteConfig1, cryptSuiteConfig2) - // - //endpointConfig, err := fabImpl.ConfigFromBackend(configBackend...) - //if err != nil { - // return nil, nil, nil, nil, err - //} - //identityConfig, err := msp.ConfigFromBackend(configBackend...) - //if err != nil { - // return nil, nil, nil, nil, err - //} - //cryptoSuite, err := sw.GetSuiteByConfig(cryptSuiteConfig) - //if err != nil { - // return nil, nil, nil, nil, err - //} - //userStore := msp.NewMemoryUserStore() - //identityManagers := make(map[string]mspprov.IdentityManager) - //netConfig := endpointConfig.NetworkConfig() - //if netConfig == nil { - // panic("failed to get network config") - //} - //for orgName := range netConfig.Organizations { - // mgr, err1 := msp.NewIdentityManager(orgName, userStore, cryptoSuite, endpointConfig) - // if err1 != nil { - // panic(fmt.Sprintf("failed to initialize identity manager for organization: %s, cause :%s", orgName, err1)) - // } - // identityManagers[orgName] = mgr - //} - // - //identityManagerProvider := &identityManagerProvider{identityManager: identityManagers} - //ctxProvider := fabricctx.NewProvider( - // fabricctx.WithIdentityManagerProvider(identityManagerProvider), - // fabricctx.WithUserStore(userStore), - // fabricctx.WithCryptoSuite(cryptoSuite), - // //fabricctx.WithCryptoSuiteConfig(cryptSuiteConfig), - // fabricctx.WithEndpointConfig(endpointConfig), - // fabricctx.WithIdentityConfig(identityConfig), - //) - //fctx := &fabricctx.Client{Providers: ctxProvider} - //client, err := msp.NewCAClient(mspID, fctx) - //if err != nil { - // return nil, nil, nil, nil, err - //} - //return client, userStore, identityManagers, cryptoSuite, nil } diff --git a/controllers/chaincode/chaincode_controller.go b/controllers/chaincode/chaincode_controller.go index 20d639ac..e76ecca8 100644 --- a/controllers/chaincode/chaincode_controller.go +++ b/controllers/chaincode/chaincode_controller.go @@ -453,11 +453,16 @@ func (r *FabricChaincodeReconciler) Reconcile(ctx context.Context, req ctrl.Requ Tolerations: fabricChaincode.Spec.Tolerations, } replicas := fabricChaincode.Spec.Replicas + podLabels := labels + for key, value := range fabricChaincode.Spec.PodLabels { + podLabels[key] = value + } appv1Deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: ns, - Labels: labels, + Name: deploymentName, + Namespace: ns, + Labels: labels, + Annotations: fabricChaincode.Spec.Annotations, }, Spec: appsv1.DeploymentSpec{ Replicas: func(i int32) *int32 { return &i }(int32(replicas)), @@ -466,7 +471,8 @@ func (r *FabricChaincodeReconciler) Reconcile(ctx context.Context, req ctrl.Requ }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: labels, + Labels: podLabels, + Annotations: fabricChaincode.Spec.PodAnnotations, }, Spec: podSpec, }, @@ -491,9 +497,10 @@ func (r *FabricChaincodeReconciler) Reconcile(ctx context.Context, req ctrl.Requ r.setConditionStatus(ctx, fabricChaincode, hlfv1alpha1.FailedStatus, false, err, false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricChaincode) } + } else { + r.setConditionStatus(ctx, fabricChaincode, hlfv1alpha1.FailedStatus, false, err, false) + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricChaincode) } - r.setConditionStatus(ctx, fabricChaincode, hlfv1alpha1.FailedStatus, false, err, false) - return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricChaincode) } else { deployment.Spec = appv1Deployment.Spec if cryptoData.Updated { @@ -568,7 +575,7 @@ func (r *FabricChaincodeReconciler) Reconcile(ctx context.Context, req ctrl.Requ } } r.setConditionStatus(ctx, fabricChaincode, hlfv1alpha1.RunningStatus, true, nil, false) - return ctrl.Result{}, nil + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricChaincode) } var ( diff --git a/controllers/console/console_controller.go b/controllers/console/console_controller.go index f258a2e8..72166cc3 100644 --- a/controllers/console/console_controller.go +++ b/controllers/console/console_controller.go @@ -391,7 +391,7 @@ func (r *FabricOperationsConsoleReconciler) upgradeChart( if err != nil { return err } - cmd.Wait = true + cmd.Wait = false cmd.Timeout = time.Minute * 5 release, err := cmd.Run(releaseName, ch, inInterface) if err != nil { diff --git a/controllers/followerchannel/followerchannel_controller.go b/controllers/followerchannel/followerchannel_controller.go index a7560a31..47253812 100644 --- a/controllers/followerchannel/followerchannel_controller.go +++ b/controllers/followerchannel/followerchannel_controller.go @@ -134,6 +134,7 @@ func (r *FabricFollowerChannelReconciler) Reconcile(ctx context.Context, req ctr r.setConditionStatus(ctx, fabricFollowerChannel, hlfv1alpha1.FailedStatus, false, err, false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricFollowerChannel) } + defer sdk.Close() idConfig := fabricFollowerChannel.Spec.HLFIdentity secret, err := clientSet.CoreV1().Secrets(idConfig.SecretNamespace).Get(ctx, idConfig.SecretName, v1.GetOptions{}) if err != nil { @@ -233,6 +234,13 @@ func (r *FabricFollowerChannelReconciler) Reconcile(ctx context.Context, req ctr r.setConditionStatus(ctx, fabricFollowerChannel, hlfv1alpha1.FailedStatus, false, err, false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricFollowerChannel) } + var buf2 bytes.Buffer + err = protolator.DeepMarshalJSON(&buf2, cfgBlock) + if err != nil { + r.setConditionStatus(ctx, fabricFollowerChannel, hlfv1alpha1.FailedStatus, false, errors.Wrapf(err, "error converting block to JSON"), false) + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricFollowerChannel) + } + log.Infof("Config block: %s", buf2.Bytes()) cftxGen := configtx.New(cfgBlock) app := cftxGen.Application().Organization(mspID) anchorPeers, err := app.AnchorPeers() @@ -352,15 +360,14 @@ func (r *FabricFollowerChannelReconciler) Reconcile(ctx context.Context, req ctr fabricFollowerChannel.Status.Status = hlfv1alpha1.RunningStatus fabricFollowerChannel.Status.Message = "Peers and anchor peers completed" fabricFollowerChannel.Status.Conditions.SetCondition(status.Condition{ - Type: "CREATED", - Status: "True", - LastTransitionTime: v1.Time{}, + Type: status.ConditionType(fabricFollowerChannel.Status.Status), + Status: "True", }) if err := r.Status().Update(ctx, fabricFollowerChannel); err != nil { r.setConditionStatus(ctx, fabricFollowerChannel, hlfv1alpha1.FailedStatus, false, err, false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricFollowerChannel) } - return ctrl.Result{}, nil + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricFollowerChannel) } var ( diff --git a/controllers/identity/identity_controller.go b/controllers/identity/identity_controller.go index bcd1a6d9..6b476b59 100644 --- a/controllers/identity/identity_controller.go +++ b/controllers/identity/identity_controller.go @@ -22,6 +22,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -52,7 +53,25 @@ func (r *FabricIdentityReconciler) finalizeMainChannel(reqLogger logr.Logger, m return nil } - +func getCertBytesFromCATLS(client *kubernetes.Clientset, caTls hlfv1alpha1.Catls) ([]byte, error) { + var signCertBytes []byte + var err error + if caTls.Cacert != "" { + signCertBytes, err = base64.StdEncoding.DecodeString(caTls.Cacert) + if err != nil { + return nil, err + } + } else if caTls.SecretRef != nil { + secret, err := client.CoreV1().Secrets(caTls.SecretRef.Namespace).Get(context.Background(), caTls.SecretRef.Name, v1.GetOptions{}) + if err != nil { + return nil, err + } + signCertBytes = secret.Data[caTls.SecretRef.Key] + } else { + return nil, errors.New("invalid ca tls") + } + return signCertBytes, nil +} func (r *FabricIdentityReconciler) addFinalizer(reqLogger logr.Logger, m *hlfv1alpha1.FabricIdentity) error { reqLogger.Info("Adding Finalizer for the MainChannel") controllerutil.AddFinalizer(m, identityFinalizer) @@ -71,6 +90,7 @@ func (r *FabricIdentityReconciler) addFinalizer(reqLogger logr.Logger, m *hlfv1a // +kubebuilder:rbac:groups=hlf.kungfusoftware.es,resources=fabricidentities/finalizers,verbs=get;update;patch func (r *FabricIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLogger := r.Log.WithValues("hlf", req.NamespacedName) + reqLogger.Info("Reconciling FabricIdentity") fabricIdentity := &hlfv1alpha1.FabricIdentity{} err := r.Get(ctx, req.NamespacedName, fabricIdentity) @@ -107,7 +127,7 @@ func (r *FabricIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Reque r.setConditionStatus(ctx, fabricIdentity, hlfv1alpha1.FailedStatus, false, err, false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricIdentity) } - tlsCert, err := base64.StdEncoding.DecodeString(fabricIdentity.Spec.Catls.Cacert) + tlsCert, err := getCertBytesFromCATLS(clientSet, fabricIdentity.Spec.Catls) if err != nil { r.setConditionStatus(ctx, fabricIdentity, hlfv1alpha1.FailedStatus, false, err, false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricIdentity) @@ -126,6 +146,29 @@ func (r *FabricIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Reque var x509Cert *x509.Certificate var pk *ecdsa.PrivateKey var rootCert *x509.Certificate + if fabricIdentity.Spec.Register != nil { + log.Infof("Registering user %s", fabricIdentity.Spec.Enrollid) + _, err = certs.RegisterUser(certs.RegisterUserRequest{ + TLSCert: string(tlsCert), + URL: fmt.Sprintf("https://%s:%d", fabricIdentity.Spec.Cahost, fabricIdentity.Spec.Caport), + Name: fabricIdentity.Spec.Caname, + MSPID: fabricIdentity.Spec.MSPID, + EnrollID: fabricIdentity.Spec.Register.Enrollid, + EnrollSecret: fabricIdentity.Spec.Register.Enrollsecret, + User: fabricIdentity.Spec.Enrollid, + Secret: fabricIdentity.Spec.Enrollsecret, + Type: fabricIdentity.Spec.Register.Type, + Attributes: []api.Attribute{}, + }) + if err != nil { + if !strings.Contains(err.Error(), "already registered") { + log.Errorf("Error registering user: %v", err) + r.setConditionStatus(ctx, fabricIdentity, hlfv1alpha1.FailedStatus, false, err, false) + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricIdentity) + } + } + } + if secretExists { // get crypto material from secret certPemBytes := secret.Data["cert.pem"] @@ -251,7 +294,7 @@ func (r *FabricIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Reque fabricIdentity.Status.Status = hlfv1alpha1.RunningStatus fabricIdentity.Status.Message = "Identity Setup" fabricIdentity.Status.Conditions.SetCondition(status.Condition{ - Type: "CREATED", + Type: status.ConditionType(fabricIdentity.Status.Status), Status: "True", LastTransitionTime: v1.Time{}, }) @@ -260,7 +303,7 @@ func (r *FabricIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Reque return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricIdentity) } return ctrl.Result{ - RequeueAfter: 60 * time.Second, + RequeueAfter: 10 * 60 * time.Second, }, nil } diff --git a/controllers/mainchannel/mainchannel_controller.go b/controllers/mainchannel/mainchannel_controller.go index 19a2a1f6..ac62979b 100644 --- a/controllers/mainchannel/mainchannel_controller.go +++ b/controllers/mainchannel/mainchannel_controller.go @@ -144,6 +144,7 @@ func (r *FabricMainChannelReconciler) Reconcile(ctx context.Context, req ctrl.Re r.setConditionStatus(ctx, fabricMainChannel, hlfv1alpha1.FailedStatus, false, err, false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricMainChannel) } + defer sdk.Close() firstAdminOrgMSPID := fabricMainChannel.Spec.AdminPeerOrganizations[0].MSPID idConfig, ok := fabricMainChannel.Spec.Identities[firstAdminOrgMSPID] if !ok { @@ -267,10 +268,15 @@ func (r *FabricMainChannelReconciler) Reconcile(ctx context.Context, req ctrl.Re r.setConditionStatus(ctx, fabricMainChannel, hlfv1alpha1.FailedStatus, false, fmt.Errorf("couldn't append certs from org %s", ordererOrg.MSPID), false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricMainChannel) } - idConfig, ok := fabricMainChannel.Spec.Identities[ordererOrg.MSPID] + idConfig, ok := fabricMainChannel.Spec.Identities[fmt.Sprintf("%s-tls", ordererOrg.MSPID)] if !ok { - r.setConditionStatus(ctx, fabricMainChannel, hlfv1alpha1.FailedStatus, false, fmt.Errorf("identity not found for MSPID %s", ordererOrg.MSPID), false) - return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricMainChannel) + log.Infof("Identity for MSPID %s not found, trying with normal identity", fmt.Sprintf("%s-tls", ordererOrg.MSPID)) + // try with normal identity + idConfig, ok = fabricMainChannel.Spec.Identities[ordererOrg.MSPID] + if !ok { + r.setConditionStatus(ctx, fabricMainChannel, hlfv1alpha1.FailedStatus, false, fmt.Errorf("identity not found for MSPID %s", ordererOrg.MSPID), false) + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricMainChannel) + } } secret, err := clientSet.CoreV1().Secrets(idConfig.SecretNamespace).Get(ctx, idConfig.SecretName, v1.GetOptions{}) if err != nil { @@ -430,8 +436,8 @@ func (r *FabricMainChannelReconciler) Reconcile(ctx context.Context, req ctrl.Re r.setConditionStatus(ctx, fabricMainChannel, hlfv1alpha1.FailedStatus, false, errors.Wrapf(err, "error converting block to JSON"), false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricMainChannel) } - r.Log.Info(fmt.Sprintf("Config block main channel: %s", buf2.String())) - r.Log.Info(fmt.Sprintf("ConfigTX: %v", newConfigTx)) + log.Debug(fmt.Sprintf("Config block main channel: %s", buf2.String())) + log.Debug(fmt.Sprintf("ConfigTX: %v", newConfigTx)) err = updateApplicationChannelConfigTx(currentConfigTx, newConfigTx) if err != nil { r.setConditionStatus(ctx, fabricMainChannel, hlfv1alpha1.FailedStatus, false, errors.Wrapf(err, "failed to update application channel config"), false) @@ -736,18 +742,15 @@ func (r *FabricMainChannelReconciler) Reconcile(ctx context.Context, req ctrl.Re fabricMainChannel.Status.Message = "Channel setup completed" fabricMainChannel.Status.Conditions.SetCondition(status.Condition{ - Type: "CREATED", - Status: "True", - LastTransitionTime: v1.Time{}, + Type: status.ConditionType(fabricMainChannel.Status.Status), + Status: "True", }) if err := r.Status().Update(ctx, fabricMainChannel); err != nil { r.setConditionStatus(ctx, fabricMainChannel, hlfv1alpha1.FailedStatus, false, err, false) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricMainChannel) } - return ctrl.Result{ - Requeue: false, - RequeueAfter: 0, - }, nil + r.setConditionStatus(ctx, fabricMainChannel, hlfv1alpha1.RunningStatus, true, nil, false) + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricMainChannel) } var ( @@ -1249,6 +1252,10 @@ func updateApplicationChannelConfigTx(currentConfigTX configtx.ConfigTx, newConf if err != nil { return errors.Wrapf(err, "failed to set ACLs") } + err = currentConfigTX.Orderer().SetBatchTimeout(newConfigTx.Orderer.BatchTimeout) + if err != nil { + return errors.Wrapf(err, "failed to set batch timeout") + } return nil } func updateOrdererChannelConfigTx(currentConfigTX configtx.ConfigTx, newConfigTx configtx.Channel) error { @@ -1371,6 +1378,24 @@ func updateOrdererChannelConfigTx(currentConfigTX configtx.ConfigTx, newConfigTx } } + err = currentConfigTX.Orderer().BatchSize().SetMaxMessageCount( + newConfigTx.Orderer.BatchSize.MaxMessageCount, + ) + if err != nil { + return errors.Wrapf(err, "failed to set max message count") + } + err = currentConfigTX.Orderer().BatchSize().SetAbsoluteMaxBytes( + newConfigTx.Orderer.BatchSize.AbsoluteMaxBytes, + ) + if err != nil { + return errors.Wrapf(err, "failed to set absolute max bytes") + } + err = currentConfigTX.Orderer().BatchSize().SetPreferredMaxBytes( + newConfigTx.Orderer.BatchSize.PreferredMaxBytes, + ) + if err != nil { + return errors.Wrapf(err, "failed to set preferred max bytes") + } err = currentConfigTX.Orderer().SetPolicies( newConfigTx.Orderer.Policies, ) diff --git a/controllers/networkconfig/networkconfig_controller.go b/controllers/networkconfig/networkconfig_controller.go index 6ab44354..f0eda2d6 100644 --- a/controllers/networkconfig/networkconfig_controller.go +++ b/controllers/networkconfig/networkconfig_controller.go @@ -77,11 +77,14 @@ organizations: {{- range $orderer := $org.OrdererNodes }} - {{ $orderer.Name }} {{- end }} + {{- range $orderer := $.ExternalOrderers }} + - {{ $orderer.Name }} + {{- end }} {{- end }} {{- end }} {{- end }} -{{- if not .Orderers }} -orderers: [] +{{ if and (empty .Orderers) (empty .ExternalOrderers) }} +orderers: {} {{- else }} orderers: {{- range $orderer := .Orderers }} @@ -97,10 +100,21 @@ orderers: pem: | {{ or $orderer.Status.TlsCACert $orderer.Status.TlsCert | indent 8 }} {{- end }} + +{{- range $orderer := .ExternalOrderers }} + {{$orderer.Name}}: + url: {{ $orderer.URL }} + grpcOptions: + allow-insecure: false + tlsCACerts: + pem: | +{{ or $orderer.TLSCACert | indent 8 }} +{{- end }} + {{- end }} -{{- if not .Peers }} -peers: [] +{{ if and (empty .Peers) (empty .ExternalPeers) }} +peers: {} {{- else }} peers: {{- range $peer := .Peers }} @@ -116,6 +130,17 @@ peers: pem: | {{ $peer.Status.TlsCACert | indent 8 }} {{- end }} + +{{- range $peer := .ExternalPeers }} + {{$peer.Name}}: + url: {{ $peer.URL }} + grpcOptions: + allow-insecure: false + tlsCACerts: + pem: | +{{ $peer.TLSCACert | indent 8 }} +{{- end }} + {{- end }} {{- if not .CertAuths }} @@ -133,7 +158,7 @@ certificateAuthorities: {{if $ca.EnrollID }} registrar: enrollId: {{ $ca.EnrollID }} - enrollSecret: {{ $ca.EnrollPWD }} + enrollSecret: "{{ $ca.EnrollPWD }}" {{ end }} caName: ca tlsCACerts: @@ -148,15 +173,18 @@ certificateAuthorities: channels: {{- range $channel := .Channels }} {{ $channel }}: -{{- if not $.Orderers }} +{{ if and (empty $.Orderers) (empty $.ExternalOrderers) }} orderers: [] {{- else }} orderers: {{- range $orderer := $.Orderers }} - {{$orderer.Name}} {{- end }} +{{- range $orderer := $.ExternalOrderers }} + - {{$orderer.Name}} +{{- end }} {{- end }} -{{- if not $.Peers }} +{{ if and (empty $.Peers) (empty $.ExternalPeers) }} peers: {} {{- else }} peers: @@ -168,6 +196,16 @@ channels: ledgerQuery: true eventSource: true {{- end }} + +{{- range $peer := $.ExternalPeers }} + {{$peer.Name}}: + discover: true + endorsingPeer: true + chaincodeQuery: true + ledgerQuery: true + eventSource: true +{{- end }} + {{- end }} {{- end }} @@ -236,11 +274,6 @@ func (r *FabricNetworkConfigReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, err } } - tmpl, err := template.New("networkConfig").Funcs(sprig.HermeticTxtFuncMap()).Parse(tmplGoConfig) - if err != nil { - r.setConditionStatus(ctx, fabricNetworkConfig, hlfv1alpha1.FailedStatus, false, err, false) - return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricNetworkConfig) - } hlfClientSet, err := operatorv1.NewForConfig(r.Config) if err != nil { r.setConditionStatus(ctx, fabricNetworkConfig, hlfv1alpha1.FailedStatus, false, err, false) @@ -280,6 +313,16 @@ func (r *FabricNetworkConfigReconciler) Reconcile(ctx context.Context, req ctrl. } for _, v := range peerOrgs { if (filterByOrgs && utils.Contains(fabricNetworkConfig.Spec.Organizations, v.MspID)) || !filterByOrgs { + var peers []*helpers.ClusterPeer + for _, peer := range v.Peers { + if filterByNS && !utils.Contains(fabricNetworkConfig.Spec.Namespaces, peer.Namespace) { + continue + } + if (filterByOrgs && utils.Contains(fabricNetworkConfig.Spec.Organizations, peer.MSPID)) || !filterByOrgs { + peers = append(peers, peer) + } + } + v.Peers = peers orgMap[v.MspID] = v } } @@ -363,14 +406,21 @@ func (r *FabricNetworkConfigReconciler) Reconcile(ctx context.Context, req ctrl. } } + tmpl, err := template.New("networkConfig").Funcs(sprig.HermeticTxtFuncMap()).Parse(tmplGoConfig) + if err != nil { + r.setConditionStatus(ctx, fabricNetworkConfig, hlfv1alpha1.FailedStatus, false, err, false) + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricNetworkConfig) + } err = tmpl.Execute(&buf, map[string]interface{}{ - "Peers": peers, - "Orderers": orderers, - "Organizations": orgMap, - "Channels": fabricNetworkConfig.Spec.Channels, - "CertAuths": certAuths, - "Organization": fabricNetworkConfig.Spec.Organization, - "Internal": fabricNetworkConfig.Spec.Internal, + "Peers": peers, + "Orderers": orderers, + "ExternalPeers": fabricNetworkConfig.Spec.ExternalPeers, + "ExternalOrderers": fabricNetworkConfig.Spec.ExternalOrderers, + "Organizations": orgMap, + "Channels": fabricNetworkConfig.Spec.Channels, + "CertAuths": certAuths, + "Organization": fabricNetworkConfig.Spec.Organization, + "Internal": fabricNetworkConfig.Spec.Internal, }) if err != nil { r.setConditionStatus(ctx, fabricNetworkConfig, hlfv1alpha1.FailedStatus, false, err, false) @@ -421,13 +471,16 @@ func (r *FabricNetworkConfigReconciler) Reconcile(ctx context.Context, req ctrl. r.setConditionStatus(ctx, fabricNetworkConfig, hlfv1alpha1.RunningStatus, true, nil, false) fca := fabricNetworkConfig.DeepCopy() fca.Status.Status = hlfv1alpha1.RunningStatus + fca.Status.Conditions.SetCondition(status.Condition{ + Type: status.ConditionType(fca.Status.Status), + Status: "True", + }) if err := r.Status().Update(ctx, fca); err != nil { log.Error(err, fmt.Sprintf("%v failed to update the application status", ErrClientK8s)) return reconcile.Result{}, err } - return ctrl.Result{ - RequeueAfter: 1 * time.Minute, - }, nil + r.setConditionStatus(ctx, fabricNetworkConfig, hlfv1alpha1.RunningStatus, true, nil, false) + return r.updateCRStatusOrFailReconcileWithRequeue(ctx, r.Log, fabricNetworkConfig, 1*time.Minute) } var ( @@ -442,6 +495,16 @@ func (r *FabricNetworkConfigReconciler) updateCRStatusOrFailReconcile(ctx contex } return reconcile.Result{}, nil } +func (r *FabricNetworkConfigReconciler) updateCRStatusOrFailReconcileWithRequeue(ctx context.Context, log logr.Logger, p *hlfv1alpha1.FabricNetworkConfig, requeueAfter time.Duration) ( + reconcile.Result, error) { + if err := r.Status().Update(ctx, p); err != nil { + log.Error(err, fmt.Sprintf("%v failed to update the application status", ErrClientK8s)) + return reconcile.Result{}, err + } + return reconcile.Result{ + RequeueAfter: requeueAfter, + }, nil +} func (r *FabricNetworkConfigReconciler) setConditionStatus(ctx context.Context, p *hlfv1alpha1.FabricNetworkConfig, conditionType hlfv1alpha1.DeploymentStatus, statusFlag bool, err error, statusUnknown bool) (update bool) { statusStr := func() corev1.ConditionStatus { diff --git a/controllers/operatorapi/operatorapi.go b/controllers/operatorapi/operatorapi.go index dd50aa2e..9ae1a8d2 100644 --- a/controllers/operatorapi/operatorapi.go +++ b/controllers/operatorapi/operatorapi.go @@ -396,7 +396,7 @@ func (r *FabricOperatorAPIReconciler) upgradeChart( if err != nil { return err } - cmd.Wait = true + cmd.Wait = false cmd.Timeout = time.Minute * 5 release, err := cmd.Run(releaseName, ch, inInterface) if err != nil { diff --git a/controllers/operatorui/operatorui.go b/controllers/operatorui/operatorui.go index ff5824d3..721d6502 100644 --- a/controllers/operatorui/operatorui.go +++ b/controllers/operatorui/operatorui.go @@ -391,7 +391,7 @@ func (r *FabricOperatorUIReconciler) upgradeChart( if err != nil { return err } - cmd.Wait = true + cmd.Wait = false cmd.Timeout = time.Minute * 5 release, err := cmd.Run(releaseName, ch, inInterface) if err != nil { diff --git a/controllers/ordnode/ordnode_controller.go b/controllers/ordnode/ordnode_controller.go index 0a840d00..529558e6 100644 --- a/controllers/ordnode/ordnode_controller.go +++ b/controllers/ordnode/ordnode_controller.go @@ -8,6 +8,7 @@ import ( "encoding/json" "encoding/pem" "fmt" + "helm.sh/helm/v3/pkg/release" "os" "reflect" "strings" @@ -51,6 +52,9 @@ type FabricOrdererNodeReconciler struct { Config *rest.Config AutoRenewCertificates bool AutoRenewCertificatesDelta time.Duration + Wait bool + Timeout time.Duration + MaxHistory int } const ordererNodeFinalizer = "finalizer.orderernode.hlf.kungfusoftware.es" @@ -67,6 +71,8 @@ func (r *FabricOrdererNodeReconciler) finalizeOrderer(reqLogger logr.Logger, m * releaseName := m.Name reqLogger.Info("Successfully finalized orderer") cmd := action.NewUninstall(cfg) + cmd.Wait = r.Wait + cmd.Timeout = r.Timeout resp, err := cmd.Run(releaseName) if err != nil { if strings.Compare("Release not loaded", err.Error()) != 0 { @@ -138,13 +144,22 @@ func (r *FabricOrdererNodeReconciler) Reconcile(ctx context.Context, req ctrl.Re } cmdStatus := action.NewStatus(cfg) exists := true - _, err = cmdStatus.Run(releaseName) + helmStatus, err := cmdStatus.Run(releaseName) if err != nil { if errors.Is(err, driver.ErrReleaseNotFound) { // it doesn't exists exists = false } else { - // it doesnt exist + // it doesn't exist + return ctrl.Result{}, err + } + } + if exists && helmStatus.Info.Status == release.StatusPendingUpgrade { + rollbackStatus := action.NewRollback(cfg) + rollbackStatus.Version = helmStatus.Version - 1 + err = rollbackStatus.Run(releaseName) + if err != nil { + // it doesn't exist return ctrl.Result{}, err } } @@ -268,6 +283,9 @@ func (r *FabricOrdererNodeReconciler) Reconcile(ctx context.Context, req ctrl.Re } } else { cmd := action.NewInstall(cfg) + cmd.Wait = r.Wait + cmd.Timeout = r.Timeout + cmd.ReleaseName = releaseName name, chart, err := cmd.NameAndChart([]string{releaseName, r.ChartPath}) if err != nil { return ctrl.Result{}, err @@ -313,6 +331,11 @@ func (r *FabricOrdererNodeReconciler) Reconcile(ctx context.Context, req ctrl.Re Status: "True", LastTransitionTime: v1.Time{}, }) + err = r.Get(ctx, req.NamespacedName, fabricOrdererNode) + if err != nil { + reqLogger.Error(err, "Failed to get Orderer before updating it.") + return ctrl.Result{}, err + } if err := r.Status().Update(ctx, fabricOrdererNode); err != nil { return ctrl.Result{}, err } @@ -381,12 +404,12 @@ func (r *FabricOrdererNodeReconciler) setConditionStatus( return p.Status.Conditions.SetCondition(condition()) } -func (r *FabricOrdererNodeReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *FabricOrdererNodeReconciler) SetupWithManager(mgr ctrl.Manager, maxReconciles int) error { return ctrl.NewControllerManagedBy(mgr). For(&hlfv1alpha1.FabricOrdererNode{}). Owns(&appsv1.Deployment{}). WithOptions(controller.Options{ - MaxConcurrentReconciles: 10, + MaxConcurrentReconciles: maxReconciles, }). Complete(r) } @@ -439,7 +462,6 @@ func (r *FabricOrdererNodeReconciler) upgradeChart( return err } cmd := action.NewUpgrade(cfg) - cmd.MaxHistory = 5 err = os.Setenv("HELM_NAMESPACE", ns) if err != nil { return err @@ -453,8 +475,9 @@ func (r *FabricOrdererNodeReconciler) upgradeChart( if err != nil { return err } - cmd.Wait = true - cmd.Timeout = time.Minute * 5 + cmd.Wait = r.Wait + cmd.Timeout = r.Timeout + cmd.MaxHistory = r.MaxHistory release, err := cmd.Run(releaseName, ch, inInterface) if err != nil { return err @@ -758,6 +781,26 @@ func ReenrollSignCryptoMaterial( } return signCert, privateKey, signRootCert, nil } + +func getCertBytesFromCATLS(client *kubernetes.Clientset, caTls hlfv1alpha1.Catls) ([]byte, error) { + var signCertBytes []byte + var err error + if caTls.Cacert != "" { + signCertBytes, err = base64.StdEncoding.DecodeString(caTls.Cacert) + if err != nil { + return nil, err + } + } else if caTls.SecretRef != nil { + secret, err := client.CoreV1().Secrets(caTls.SecretRef.Namespace).Get(context.Background(), caTls.SecretRef.Name, v1.GetOptions{}) + if err != nil { + return nil, err + } + signCertBytes = secret.Data[caTls.SecretRef.Key] + } else { + return nil, errors.New("invalid ca tls") + } + return signCertBytes, nil +} func getConfig( conf *hlfv1alpha1.FabricOrdererNode, client *kubernetes.Clientset, @@ -794,7 +837,7 @@ func getConfig( return nil, errors.Wrapf(err, "failed to parse tls private key") } } else if refreshCerts { - cacert, err := base64.StdEncoding.DecodeString(tlsParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, tlsParams.Catls) if err != nil { return nil, errors.Wrapf(err, "failed to decode tls ca cert") } @@ -820,7 +863,7 @@ func getConfig( tlsCert, tlsKey, tlsRootCert, err = getExistingTLSCrypto(client, chartName, namespace) if err != nil { log.Warnf("Failed to get existing tls crypto material for %s, will create new one", chartName) - cacert, err := base64.StdEncoding.DecodeString(tlsParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, tlsParams.Catls) if err != nil { return nil, errors.Wrapf(err, "failed to decode tls ca cert") } @@ -839,7 +882,7 @@ func getConfig( } } if refreshCerts { - cacert, err := base64.StdEncoding.DecodeString(tlsParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, tlsParams.Catls) if err != nil { return nil, errors.Wrapf(err, "failed to decode tls ca cert") } @@ -864,7 +907,7 @@ func getConfig( adminCert, adminKey, adminRootCert, adminClientRootCert, err = getExistingTLSAdminCrypto(client, chartName, namespace) if err != nil { log.Warnf("Failed to get existing tls admin crypto material, creating new one") - cacert, err := base64.StdEncoding.DecodeString(tlsParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, tlsParams.Catls) if err != nil { return nil, errors.Wrapf(err, "failed to decode tls ca cert") } @@ -902,7 +945,7 @@ func getConfig( return nil, errors.Wrapf(err, "failed to parse sign private key") } } else if refreshCerts { - cacert, err := base64.StdEncoding.DecodeString(signParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, signParams.Catls) if err != nil { return nil, errors.Wrapf(err, "failed to decode sign ca cert") } @@ -928,7 +971,7 @@ func getConfig( signCert, signKey, signRootCert, err = getExistingSignCrypto(client, chartName, namespace) if err != nil { log.Warnf("Failed to get existing sign crypto material: %s", err) - cacert, err := base64.StdEncoding.DecodeString(signParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, signParams.Catls) if err != nil { return nil, errors.Wrapf(err, "failed to decode sign ca cert") } @@ -1037,6 +1080,43 @@ func getConfig( GatewayNamespace: "", } } + + traefik := Traefik{} + if spec.Traefik != nil { + var middlewares []TraefikMiddleware + if spec.Traefik.Middlewares != nil { + for _, middleware := range spec.Traefik.Middlewares { + middlewares = append(middlewares, TraefikMiddleware{ + Name: middleware.Name, + Namespace: middleware.Namespace, + }) + } + } + traefik = Traefik{ + Entrypoints: spec.Traefik.Entrypoints, + Middlewares: middlewares, + Hosts: spec.Traefik.Hosts, + } + } + + adminTraefik := Traefik{} + if spec.AdminTraefik != nil { + var middlewares []TraefikMiddleware + if spec.AdminTraefik.Middlewares != nil { + for _, middleware := range spec.AdminTraefik.Middlewares { + middlewares = append(middlewares, TraefikMiddleware{ + Name: middleware.Name, + Namespace: middleware.Namespace, + }) + } + } + adminTraefik = Traefik{ + Entrypoints: spec.AdminTraefik.Entrypoints, + Middlewares: middlewares, + Hosts: spec.AdminTraefik.Hosts, + } + } + var adminIstio Istio if spec.AdminIstio != nil { gateway := spec.AdminIstio.IngressGateway @@ -1121,18 +1201,16 @@ func getConfig( } fabricOrdChart := fabricOrdChart{ - Affinity: spec.Affinity, - NodeSelector: spec.NodeSelector, - ImagePullSecrets: spec.ImagePullSecrets, - EnvVars: spec.Env, - Resources: spec.Resources, - Istio: istio, - AdminIstio: adminIstio, + PodLabels: spec.PodLabels, + PodAnnotations: spec.PodAnnotations, GatewayApi: gatewayApi, + Istio: istio, + Traefik: traefik, AdminGatewayApi: adminGatewayApi, + AdminIstio: adminIstio, + AdminTraefik: adminTraefik, Replicas: spec.Replicas, Genesis: spec.Genesis, - Proxy: proxy, ChannelParticipationEnabled: spec.ChannelParticipationEnabled, BootstrapMethod: string(spec.BootstrapMethod), Admin: admin{ @@ -1141,16 +1219,19 @@ func getConfig( RootCAs: string(adminRootCRTEncoded), ClientRootCAs: string(adminClientRootCRTEncoded), }, - Cacert: string(signRootCRTEncoded), - Tlsrootcert: string(tlsRootCRTEncoded), - AdminCert: "", - Cert: string(signCRTEncoded), - Key: string(signEncodedPK), - Tolerations: spec.Tolerations, + Cacert: string(signRootCRTEncoded), + NodeSelector: spec.NodeSelector, + Tlsrootcert: string(tlsRootCRTEncoded), + AdminCert: "", + Affinity: spec.Affinity, + Cert: string(signCRTEncoded), + Key: string(signEncodedPK), TLS: tls{ Cert: string(tlsCRTEncoded), Key: string(tlsEncodedPK), }, + Tolerations: spec.Tolerations, + Resources: spec.Resources, FullnameOverride: conf.Name, HostAliases: hostAliases, Service: service{ @@ -1184,10 +1265,13 @@ func getConfig( }, }, }, - Clientcerts: clientcerts{}, - Hosts: ingressHosts, - Logging: Logging{Spec: "info"}, - ServiceMonitor: monitor, + Clientcerts: clientcerts{}, + Hosts: ingressHosts, + Logging: Logging{Spec: "info"}, + ServiceMonitor: monitor, + EnvVars: spec.Env, + ImagePullSecrets: spec.ImagePullSecrets, + Proxy: proxy, } return &fabricOrdChart, nil diff --git a/controllers/ordnode/types.go b/controllers/ordnode/types.go index af83b8e8..b7c32501 100644 --- a/controllers/ordnode/types.go +++ b/controllers/ordnode/types.go @@ -3,10 +3,14 @@ package ordnode import corev1 "k8s.io/api/core/v1" type fabricOrdChart struct { + PodLabels map[string]string `json:"podLabels"` + PodAnnotations map[string]string `json:"podAnnotations"` GatewayApi GatewayApi `json:"gatewayApi"` Istio Istio `json:"istio"` + Traefik Traefik `json:"traefik"` AdminGatewayApi GatewayApi `json:"adminGatewayApi"` AdminIstio Istio `json:"adminIstio"` + AdminTraefik Traefik `json:"adminTraefik"` Replicas int `json:"replicas"` Genesis string `json:"genesis"` ChannelParticipationEnabled bool `json:"channelParticipationEnabled"` @@ -49,7 +53,15 @@ type GRPCProxy struct { // +nullable Resources *corev1.ResourceRequirements `json:"resources"` } - +type TraefikMiddleware struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} +type Traefik struct { + Entrypoints []string `json:"entryPoints"` + Middlewares []TraefikMiddleware `json:"middlewares"` + Hosts []string `json:"hosts"` +} type ServiceMonitor struct { Enabled bool `json:"enabled"` Labels map[string]string `json:"labels"` diff --git a/controllers/peer/peer_controller.go b/controllers/peer/peer_controller.go index 0cab9cc2..c7779f95 100644 --- a/controllers/peer/peer_controller.go +++ b/controllers/peer/peer_controller.go @@ -8,6 +8,7 @@ import ( "encoding/json" "encoding/pem" "fmt" + "helm.sh/helm/v3/pkg/release" "os" "reflect" "strings" @@ -58,6 +59,9 @@ type FabricPeerReconciler struct { Config *rest.Config AutoRenewCertificates bool AutoRenewCertificatesDelta time.Duration + Wait bool + Timeout time.Duration + MaxHistory int } func (r *FabricPeerReconciler) addFinalizer(reqLogger logr.Logger, m *hlfv1alpha1.FabricPeer) error { @@ -356,14 +360,23 @@ func (r *FabricPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) cmdStatus := action.NewStatus(cfg) exists := true - _, err = cmdStatus.Run(releaseName) + helmStatus, err := cmdStatus.Run(releaseName) if err != nil { if errors.Is(err, driver.ErrReleaseNotFound) { + // it doesn't exists exists = false } else { - // it doesnt exist - r.setConditionStatus(ctx, fabricPeer, hlfv1alpha1.FailedStatus, false, err, false) - return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricPeer) + // it doesn't exist + return ctrl.Result{}, err + } + } + if exists && helmStatus.Info.Status == release.StatusPendingUpgrade { + rollbackStatus := action.NewRollback(cfg) + rollbackStatus.Version = helmStatus.Version - 1 + err = rollbackStatus.Run(releaseName) + if err != nil { + // it doesn't exist + return ctrl.Result{}, err } } log.Debugf("Release %s exists=%v", releaseName, exists) @@ -497,6 +510,8 @@ func (r *FabricPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) } } else { cmd := action.NewInstall(cfg) + cmd.Wait = r.Wait + cmd.Timeout = r.Timeout name, chart, err := cmd.NameAndChart([]string{releaseName, r.ChartPath}) if err != nil { r.setConditionStatus(ctx, fabricPeer, hlfv1alpha1.FailedStatus, false, err, false) @@ -539,6 +554,11 @@ func (r *FabricPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricPeer) } log.Infof("Chart installed %s", release.Name) + err = r.Get(ctx, req.NamespacedName, fabricPeer) + if err != nil { + r.setConditionStatus(ctx, fabricPeer, hlfv1alpha1.FailedStatus, false, err, false) + return r.updateCRStatusOrFailReconcile(ctx, r.Log, fabricPeer) + } fabricPeer.Status.Status = hlfv1alpha1.PendingStatus fabricPeer.Status.Conditions.SetCondition(status.Condition{ Type: "DEPLOYED", @@ -604,7 +624,6 @@ func (r *FabricPeerReconciler) upgradeChart( return err } cmd := action.NewUpgrade(cfg) - cmd.MaxHistory = 5 err = os.Setenv("HELM_NAMESPACE", ns) if err != nil { return err @@ -615,8 +634,9 @@ func (r *FabricPeerReconciler) upgradeChart( if err != nil { return err } - cmd.Wait = true - cmd.Timeout = time.Minute * 5 + cmd.Wait = r.Wait + cmd.MaxHistory = r.MaxHistory + cmd.Timeout = r.Timeout log.Infof("Upgrading chart %s", inrec) release, err := cmd.Run(releaseName, ch, inInterface) if err != nil { @@ -887,6 +907,26 @@ func ReenrollTLSCryptoMaterial( return tlsCert, tlsKey, tlsRootCert, nil } +func getCertBytesFromCATLS(client *kubernetes.Clientset, caTls hlfv1alpha1.Catls) ([]byte, error) { + var signCertBytes []byte + var err error + if caTls.Cacert != "" { + signCertBytes, err = base64.StdEncoding.DecodeString(caTls.Cacert) + if err != nil { + return nil, err + } + } else if caTls.SecretRef != nil { + secret, err := client.CoreV1().Secrets(caTls.SecretRef.Namespace).Get(context.Background(), caTls.SecretRef.Name, v1.GetOptions{}) + if err != nil { + return nil, err + } + signCertBytes = secret.Data[caTls.SecretRef.Key] + } else { + return nil, errors.New("invalid ca tls") + } + return signCertBytes, nil +} + func GetConfig( conf *hlfv1alpha1.FabricPeer, client *kubernetes.Clientset, @@ -924,7 +964,7 @@ func GetConfig( return nil, errors.Wrapf(err, "failed to parse tls private key") } } else if refreshCerts { - cacert, err := base64.StdEncoding.DecodeString(tlsParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, tlsParams.Catls) if err != nil { return nil, errors.Wrapf(err, "failed to decode tls ca cert") } @@ -967,7 +1007,7 @@ func GetConfig( } else { tlsCert, tlsKey, tlsRootCert, err = getExistingTLSCrypto(client, chartName, namespace) if err != nil { - cacert, err := base64.StdEncoding.DecodeString(tlsParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, tlsParams.Catls) if err != nil { return nil, err } @@ -986,7 +1026,7 @@ func GetConfig( } } if refreshCerts { - cacert, err := base64.StdEncoding.DecodeString(tlsParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, tlsParams.Catls) if err != nil { return nil, err } @@ -1005,7 +1045,7 @@ func GetConfig( } else { tlsOpsCert, tlsOpsKey, _, err = getExistingTLSOPSCrypto(client, chartName, namespace) if err != nil { - cacert, err := base64.StdEncoding.DecodeString(tlsParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, tlsParams.Catls) if err != nil { return nil, err } @@ -1043,7 +1083,7 @@ func GetConfig( return nil, errors.Wrapf(err, "failed to parse sign private key") } } else if refreshCerts { - cacert, err := base64.StdEncoding.DecodeString(signParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, signParams.Catls) if err != nil { return nil, errors.Wrapf(err, "failed to decode sign ca cert") } @@ -1086,7 +1126,7 @@ func GetConfig( } else { signCert, signKey, signRootCert, err = getExistingSignCrypto(client, chartName, namespace) if err != nil { - cacert, err := base64.StdEncoding.DecodeString(signParams.Catls.Cacert) + cacert, err := getCertBytesFromCATLS(client, signParams.Catls) if err != nil { return nil, err } @@ -1196,6 +1236,8 @@ func GetConfig( stateDb = "CouchDB" case hlfv1alpha1.StateDBLevelDB: stateDb = "goleveldb" + case hlfv1alpha1.StateDBPostgres: + stateDb = "pg" default: stateDb = "goleveldb" } @@ -1233,6 +1275,23 @@ func GetConfig( IngressGateway: "", } } + traefik := Traefik{} + if spec.Traefik != nil { + var middlewares []TraefikMiddleware + if spec.Traefik.Middlewares != nil { + for _, middleware := range spec.Traefik.Middlewares { + middlewares = append(middlewares, TraefikMiddleware{ + Name: middleware.Name, + Namespace: middleware.Namespace, + }) + } + } + traefik = Traefik{ + Entrypoints: spec.Traefik.Entrypoints, + Middlewares: middlewares, + Hosts: spec.Traefik.Hosts, + } + } var gatewayApi GatewayApi if spec.GatewayApi != nil { gatewayApiName := spec.GatewayApi.GatewayName @@ -1341,11 +1400,17 @@ func GetConfig( Proxy: spec.Resources.Proxy, } var c = FabricPeerChart{ - EnvVars: spec.Env, - Replicas: spec.Replicas, - ImagePullSecrets: spec.ImagePullSecrets, - GatewayApi: gatewayApi, - Istio: istio, + DeliveryClientaddressOverrides: spec.DeliveryClientaddressOverrides, + Volumes: spec.Volumes, + PeerVolumeMounts: spec.PeerVolumeMounts, + PodLabels: spec.PodLabels, + PodAnnotations: spec.PodAnnotations, + EnvVars: spec.Env, + Replicas: spec.Replicas, + ImagePullSecrets: spec.ImagePullSecrets, + GatewayApi: gatewayApi, + Istio: istio, + Traefik: traefik, Image: Image{ Repository: spec.Image, Tag: spec.Tag, @@ -1438,12 +1503,12 @@ func GetConfig( return &c, nil } -func (r *FabricPeerReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *FabricPeerReconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles int) error { return ctrl.NewControllerManagedBy(mgr). For(&hlfv1alpha1.FabricPeer{}). Owns(&appsv1.Deployment{}). WithOptions(controller.Options{ - MaxConcurrentReconciles: 1, + MaxConcurrentReconciles: maxConcurrentReconciles, }). Complete(r) } @@ -1476,6 +1541,8 @@ func (r *FabricPeerReconciler) finalizePeer(reqLogger logr.Logger, peer *hlfv1al releaseName := peer.Name reqLogger.Info("Successfully finalized peer") cmd := action.NewUninstall(cfg) + cmd.Wait = r.Wait + cmd.Timeout = r.Timeout resp, err := cmd.Run(releaseName) if err != nil { if strings.Compare("Release not loaded", err.Error()) != 0 { @@ -1533,61 +1600,66 @@ func createPeerService( return nil, err } } - if exists { - return svc, nil - } labels := map[string]string{ "app": chartName, "release": releaseName, } - svc = &apiv1.Service{ - ObjectMeta: v1.ObjectMeta{ - Name: svcName, - Namespace: ns, - Labels: labels, - }, - Spec: corev1.ServiceSpec{ - Type: peer.Spec.Service.Type, - Ports: []corev1.ServicePort{ - { - Name: PeerPortName, - Protocol: "TCP", - Port: 7051, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 7051, - }, + serviceSpec := corev1.ServiceSpec{ + Type: peer.Spec.Service.Type, + Ports: []corev1.ServicePort{ + { + Name: PeerPortName, + Protocol: "TCP", + Port: 7051, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 7051, }, - { - Name: ChaincodePortName, - Protocol: "TCP", - Port: 7052, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 7052, - }, + }, + { + Name: ChaincodePortName, + Protocol: "TCP", + Port: 7052, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 7052, }, - { - Name: EventPortName, - Protocol: "TCP", - Port: 7053, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 7053, - }, + }, + { + Name: EventPortName, + Protocol: "TCP", + Port: 7053, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 7053, }, - { - Name: OperationsPortName, - Protocol: "TCP", - Port: 9443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 9443, - }, + }, + { + Name: OperationsPortName, + Protocol: "TCP", + Port: 9443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 9443, }, }, - Selector: labels, }, + Selector: labels, + } + + if exists { + // update the service + svc.Spec = serviceSpec + return clientSet.CoreV1().Services(ns).Update(ctx, svc, v1.UpdateOptions{}) + } + + svc = &apiv1.Service{ + ObjectMeta: v1.ObjectMeta{ + Name: svcName, + Namespace: ns, + Labels: labels, + }, + Spec: serviceSpec, Status: corev1.ServiceStatus{}, } return clientSet.CoreV1().Services(ns).Create(ctx, svc, v1.CreateOptions{}) diff --git a/controllers/peer/types.go b/controllers/peer/types.go index d41b9fe7..753a3494 100644 --- a/controllers/peer/types.go +++ b/controllers/peer/types.go @@ -1,6 +1,9 @@ package peer -import corev1 "k8s.io/api/core/v1" +import ( + v1alpha1 "github.com/kfsoftware/hlf-operator/api/hlf.kungfusoftware.es/v1alpha1" + corev1 "k8s.io/api/core/v1" +) type RBAC struct { Ns string `json:"ns"` @@ -23,42 +26,49 @@ type FSServer struct { } type FabricPeerChart struct { - FSServer FSServer `json:"fsServer"` - GatewayApi GatewayApi `json:"gatewayApi"` - Istio Istio `json:"istio"` - Replicas int `json:"replicas"` - ExternalChaincodeBuilder bool `json:"externalChaincodeBuilder"` - CouchdbUsername string `json:"couchdbUsername"` - CouchdbPassword string `json:"couchdbPassword"` - Image Image `json:"image"` - CouchDB CouchDB `json:"couchdb"` - Rbac RBAC `json:"rbac"` - DockerSocketPath string `json:"dockerSocketPath"` - Peer Peer `json:"peer"` - Cert string `json:"cert"` - Key string `json:"key"` - Hosts []string `json:"hosts"` - Proxy GRPCProxy `json:"proxy"` - TLS TLS `json:"tls"` - OPSTLS TLS `json:"opsTLS"` - Cacert string `json:"cacert"` - IntCacert string `json:"intCAcert"` - Tlsrootcert string `json:"tlsrootcert"` - Resources PeerResources `json:"resources,omitempty"` - NodeSelector *corev1.NodeSelector `json:"nodeSelector,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets"` - Affinity *corev1.Affinity `json:"affinity,omitempty"` - ExternalHost string `json:"externalHost"` - FullnameOverride string `json:"fullnameOverride"` - CouchDBExporter CouchDBExporter `json:"couchdbExporter"` - HostAliases []HostAlias `json:"hostAliases"` - Service Service `json:"service"` - Persistence PeerPersistence `json:"persistence"` - Logging Logging `json:"logging"` - ExternalBuilders []ExternalBuilder `json:"externalBuilders"` - ServiceMonitor ServiceMonitor `json:"serviceMonitor"` - EnvVars []corev1.EnvVar `json:"envVars"` + Volumes []corev1.Volume `json:"volumes"` + PeerVolumeMounts []corev1.VolumeMount `json:"peerVolumeMounts"` + + PodLabels map[string]string `json:"podLabels"` + PodAnnotations map[string]string `json:"podAnnotations"` + FSServer FSServer `json:"fsServer"` + GatewayApi GatewayApi `json:"gatewayApi"` + Istio Istio `json:"istio"` + Traefik Traefik `json:"traefik"` + Replicas int `json:"replicas"` + ExternalChaincodeBuilder bool `json:"externalChaincodeBuilder"` + CouchdbUsername string `json:"couchdbUsername"` + CouchdbPassword string `json:"couchdbPassword"` + Image Image `json:"image"` + CouchDB CouchDB `json:"couchdb"` + Rbac RBAC `json:"rbac"` + DockerSocketPath string `json:"dockerSocketPath"` + Peer Peer `json:"peer"` + Cert string `json:"cert"` + Key string `json:"key"` + Hosts []string `json:"hosts"` + Proxy GRPCProxy `json:"proxy"` + TLS TLS `json:"tls"` + OPSTLS TLS `json:"opsTLS"` + Cacert string `json:"cacert"` + IntCacert string `json:"intCAcert"` + Tlsrootcert string `json:"tlsrootcert"` + Resources PeerResources `json:"resources,omitempty"` + NodeSelector *corev1.NodeSelector `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` + ExternalHost string `json:"externalHost"` + FullnameOverride string `json:"fullnameOverride"` + CouchDBExporter CouchDBExporter `json:"couchdbExporter"` + HostAliases []HostAlias `json:"hostAliases"` + Service Service `json:"service"` + Persistence PeerPersistence `json:"persistence"` + Logging Logging `json:"logging"` + ExternalBuilders []ExternalBuilder `json:"externalBuilders"` + ServiceMonitor ServiceMonitor `json:"serviceMonitor"` + EnvVars []corev1.EnvVar `json:"envVars"` + DeliveryClientaddressOverrides []v1alpha1.AddressOverride `json:"deliveryClientaddressOverrides"` } type GatewayApi struct { Port int `json:"port"` @@ -91,7 +101,15 @@ type ExternalBuilder struct { Path string `json:"path"` PropagateEnvironment []string `json:"propagateEnvironment"` } - +type TraefikMiddleware struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} +type Traefik struct { + Entrypoints []string `json:"entryPoints"` + Middlewares []TraefikMiddleware `json:"middlewares"` + Hosts []string `json:"hosts"` +} type Istio struct { Port int `json:"port"` Hosts []string `json:"hosts"` diff --git a/controllers/tests/ca_controller_test.go b/controllers/tests/ca_controller_test.go index 55327b93..06e85532 100644 --- a/controllers/tests/ca_controller_test.go +++ b/controllers/tests/ca_controller_test.go @@ -246,7 +246,7 @@ func randomFabricCA(releaseName string, namespace string) *hlfv1alpha1.FabricCA }, CLRSizeLimit: 512000, Image: "hyperledger/fabric-ca", - Version: "1.4.9", + Version: "1.5.7", Debug: true, TLS: hlfv1alpha1.FabricCATLSConf{Subject: subject}, CA: hlfv1alpha1.FabricCAItemConf{ @@ -834,7 +834,7 @@ var _ = Describe("Fabric Controllers", func() { }, CLRSizeLimit: 512000, Image: "hyperledger/fabric-ca", - Version: "1.4.9", + Version: "1.5.7", Debug: true, TLS: hlfv1alpha1.FabricCATLSConf{Subject: subject}, CA: hlfv1alpha1.FabricCAItemConf{ diff --git a/controllers/tests/suite_test.go b/controllers/tests/suite_test.go index 4b49288d..1276465e 100644 --- a/controllers/tests/suite_test.go +++ b/controllers/tests/suite_test.go @@ -87,7 +87,7 @@ var _ = BeforeSuite(func(done Done) { ClientSet: ClientSet, ChartPath: caChartPath, } - err = caReconciler.SetupWithManager(k8sManager) + err = caReconciler.SetupWithManager(k8sManager, 10) Expect(err).ToNot(HaveOccurred()) peerChartPath, err := filepath.Abs("../../charts/hlf-peer") Expect(err).ToNot(HaveOccurred()) @@ -98,7 +98,7 @@ var _ = BeforeSuite(func(done Done) { Config: RestConfig, ChartPath: peerChartPath, } - err = peerReconciler.SetupWithManager(k8sManager) + err = peerReconciler.SetupWithManager(k8sManager, 10) Expect(err).ToNot(HaveOccurred()) ordChartPath, err := filepath.Abs("../../charts/hlf-ord") @@ -122,7 +122,7 @@ var _ = BeforeSuite(func(done Done) { ChartPath: ordNodeChartPath, Config: RestConfig, } - err = ordNodeReconciler.SetupWithManager(k8sManager) + err = ordNodeReconciler.SetupWithManager(k8sManager, 10) Expect(err).ToNot(HaveOccurred()) go func() { diff --git a/dashboards/hlf-operator.json b/dashboards/hlf-operator.json index 1fbd9228..4645a1c5 100644 --- a/dashboards/hlf-operator.json +++ b/dashboards/hlf-operator.json @@ -95,7 +95,7 @@ "targets": [ { "exemplar": true, - "expr": "hlf_operator_certificate_expiration_timestamp_seconds{exported_namespace=\"default\"} - time()", + "expr": "hlf_operator_certificate_expiration_timestamp_seconds{} - time()", "interval": "", "legendFormat": "{{exported_namespace}} / {{name}}", "refId": "A" diff --git a/go.mod b/go.mod index 499a61f8..81aea862 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/hyperledger/fabric v2.1.1+incompatible github.com/hyperledger/fabric-config v0.1.0 github.com/hyperledger/fabric-lib-go v1.0.0 - github.com/hyperledger/fabric-protos-go v0.0.0-20211118165945-23d738fc3553 + github.com/hyperledger/fabric-protos-go v0.3.0 github.com/hyperledger/fabric-sdk-go v1.0.1-0.20220124135247-4f34271d9b0f github.com/jinzhu/copier v0.3.5 github.com/jmoiron/sqlx v1.3.5 @@ -220,7 +220,6 @@ replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible github.com/docker/docker => github.com/docker/docker v20.10.17+incompatible github.com/go-kit/kit => github.com/go-kit/kit v0.8.0 - github.com/hyperledger/fabric-protos-go => github.com/hyperledger/fabric-protos-go v0.0.0-20210911123859-041d13f0980c google.golang.org/grpc => google.golang.org/grpc v1.33.2 k8s.io/api => k8s.io/api v0.24.0 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.0 diff --git a/go.sum b/go.sum index 4f1e3f8c..0f8a5329 100644 --- a/go.sum +++ b/go.sum @@ -475,8 +475,12 @@ github.com/hyperledger/fabric-config v0.1.0 h1:TsR3y5xEoUmXWfp8tcDycjJhVvXEHiV5k github.com/hyperledger/fabric-config v0.1.0/go.mod h1:aeDZ0moG/qKvwLjddcqYr8+58/oNaJy3HE0tI01546c= github.com/hyperledger/fabric-lib-go v1.0.0 h1:UL1w7c9LvHZUSkIvHTDGklxFv2kTeva1QI2emOVc324= github.com/hyperledger/fabric-lib-go v1.0.0/go.mod h1:H362nMlunurmHwkYqR5uHL2UDWbQdbfz74n8kbCFsqc= +github.com/hyperledger/fabric-protos-go v0.0.0-20200424173316-dd554ba3746e/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= github.com/hyperledger/fabric-protos-go v0.0.0-20210911123859-041d13f0980c h1:QPhSriw6EzMOj/d7gcGiKEvozVvQ5HLk9UWie4KAvSs= github.com/hyperledger/fabric-protos-go v0.0.0-20210911123859-041d13f0980c/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= +github.com/hyperledger/fabric-protos-go v0.0.0-20211118165945-23d738fc3553/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= +github.com/hyperledger/fabric-protos-go v0.3.0 h1:MXxy44WTMENOh5TI8+PCK2x6pMj47Go2vFRKDHB2PZs= +github.com/hyperledger/fabric-protos-go v0.3.0/go.mod h1:WWnyWP40P2roPmmvxsUXSvVI/CF6vwY1K1UFidnKBys= github.com/hyperledger/fabric-sdk-go v1.0.1-0.20220124135247-4f34271d9b0f h1:SYduJmSWX/Q8fw8tiJK/VnRS+zyLCFXMcAIAAVjzSF0= github.com/hyperledger/fabric-sdk-go v1.0.1-0.20220124135247-4f34271d9b0f/go.mod h1:JRplpKBeAvXjsBhOCCM/KvMRUbdDyhsAh80qbXzKc10= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= diff --git a/kubectl-hlf/cmd/chaincode/install.go b/kubectl-hlf/cmd/chaincode/install.go index c81f86be..63d12dc3 100644 --- a/kubectl-hlf/cmd/chaincode/install.go +++ b/kubectl-hlf/cmd/chaincode/install.go @@ -57,10 +57,7 @@ func (c *installChaincodeCmd) run() error { if err != nil { return err } - chLng, ok := pb.ChaincodeSpec_Type_value[strings.ToUpper(c.chaincodeLanguage)] - if !ok { - return errors.Errorf("Language %s not valid", c.chaincodeLanguage) - } + var pkg []byte if strings.HasSuffix(c.chaincodePath, ".tar.gz") || strings.HasSuffix(c.chaincodePath, ".tgz") { pkg, err = ioutil.ReadFile(c.chaincodePath) @@ -68,6 +65,10 @@ func (c *installChaincodeCmd) run() error { return err } } else { + chLng, ok := pb.ChaincodeSpec_Type_value[strings.ToUpper(c.chaincodeLanguage)] + if !ok { + return errors.Errorf("Language %s not valid", c.chaincodeLanguage) + } pkg, err = lifecycle.NewCCPackage(&lifecycle.Descriptor{ Path: c.chaincodePath, Type: pb.ChaincodeSpec_Type(chLng), diff --git a/kubectl-hlf/cmd/chaincode/invoke.go b/kubectl-hlf/cmd/chaincode/invoke.go index 5b42cd0d..89489563 100644 --- a/kubectl-hlf/cmd/chaincode/invoke.go +++ b/kubectl-hlf/cmd/chaincode/invoke.go @@ -6,6 +6,7 @@ import ( "io" "github.com/hyperledger/fabric-sdk-go/pkg/client/channel" + "github.com/hyperledger/fabric-sdk-go/pkg/common/logging" "github.com/hyperledger/fabric-sdk-go/pkg/core/config" "github.com/hyperledger/fabric-sdk-go/pkg/fabsdk" "github.com/kfsoftware/hlf-operator/kubectl-hlf/cmd/helpers" @@ -88,6 +89,7 @@ func (c *invokeChaincodeCmd) run(out io.Writer) error { return nil } func newInvokeChaincodeCMD(out io.Writer, errOut io.Writer) *cobra.Command { + logging.Initialize(helpers.HLFLoggerProvider{}) c := &invokeChaincodeCmd{} cmd := &cobra.Command{ Use: "invoke", diff --git a/kubectl-hlf/cmd/chaincode/query.go b/kubectl-hlf/cmd/chaincode/query.go index cf0e05af..53357efa 100644 --- a/kubectl-hlf/cmd/chaincode/query.go +++ b/kubectl-hlf/cmd/chaincode/query.go @@ -87,6 +87,7 @@ func (c *queryChaincodeCmd) run(out io.Writer) error { } return nil } + func newQueryChaincodeCMD(out io.Writer, errOut io.Writer) *cobra.Command { c := &queryChaincodeCmd{} cmd := &cobra.Command{ diff --git a/kubectl-hlf/cmd/channel/channel.go b/kubectl-hlf/cmd/channel/channel.go index bd4826b5..7cadc5ac 100644 --- a/kubectl-hlf/cmd/channel/channel.go +++ b/kubectl-hlf/cmd/channel/channel.go @@ -1,9 +1,10 @@ package channel import ( + "io" + "github.com/kfsoftware/hlf-operator/kubectl-hlf/cmd/channel/consenter" "github.com/kfsoftware/hlf-operator/kubectl-hlf/cmd/channel/ordorg" - "io" "github.com/spf13/cobra" ) @@ -21,6 +22,7 @@ func NewChannelCmd(stdOut io.Writer, stdErr io.Writer) *cobra.Command { newGenerateChannelCMD(stdOut, stdErr), newInspectChannelCMD(stdOut, stdErr), newTopChannelCMD(stdOut, stdErr), + newSignUpdateChannelCMD(stdOut, stdErr), newAddOrgToChannelCMD(stdOut, stdErr), ordorg.NewOrdOrgCmd(stdOut, stdErr), consenter.NewConsenterCmd(stdOut, stdErr), diff --git a/kubectl-hlf/cmd/channel/signupdate.go b/kubectl-hlf/cmd/channel/signupdate.go index 39cb5112..87882530 100644 --- a/kubectl-hlf/cmd/channel/signupdate.go +++ b/kubectl-hlf/cmd/channel/signupdate.go @@ -2,15 +2,21 @@ package channel import ( "bytes" + "fmt" + "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric-protos-go/common" - mspclient "github.com/hyperledger/fabric-sdk-go/pkg/client/msp" "github.com/hyperledger/fabric-sdk-go/pkg/client/resmgmt" + "github.com/hyperledger/fabric-sdk-go/pkg/common/providers/msp" "github.com/hyperledger/fabric-sdk-go/pkg/core/config" + "github.com/hyperledger/fabric-sdk-go/pkg/core/cryptosuite" + "github.com/hyperledger/fabric-sdk-go/pkg/core/cryptosuite/bccsp/sw" + "github.com/hyperledger/fabric-sdk-go/pkg/fab" "github.com/hyperledger/fabric-sdk-go/pkg/fabsdk" - log "github.com/sirupsen/logrus" + mspimpl "github.com/hyperledger/fabric-sdk-go/pkg/msp" "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + "io" "io/ioutil" ) @@ -20,15 +26,25 @@ type signUpdateChannelCmd struct { channelName string userName string file string - output string mspID string + signatures []string + identity string + output string } func (c *signUpdateChannelCmd) validate() error { return nil } -func (c *signUpdateChannelCmd) run() error { +type identity struct { + Cert Pem `json:"cert"` + Key Pem `json:"key"` +} +type Pem struct { + Pem string +} + +func (c *signUpdateChannelCmd) run(out io.Writer) error { configBackend := config.FromFile(c.configPath) sdk, err := fabsdk.New(configBackend) if err != nil { @@ -46,36 +62,63 @@ func (c *signUpdateChannelCmd) run() error { if err != nil { return err } - envelope := &common.Envelope{} - err = proto.Unmarshal(updateEnvelopeBytes, envelope) + configUpdateReader := bytes.NewReader(updateEnvelopeBytes) + sdkConfig, err := sdk.Config() if err != nil { return err } - configUpdateReader := bytes.NewReader(updateEnvelopeBytes) - mspClient, err := mspclient.New(org1AdminClientContext, mspclient.WithOrg(c.mspID)) + cryptoConfig := cryptosuite.ConfigFromBackend(sdkConfig) + cryptoSuite, err := sw.GetSuiteByConfig(cryptoConfig) if err != nil { return err } - usr, err := mspClient.GetSigningIdentity(c.userName) + userStore := mspimpl.NewMemoryUserStore() + endpointConfig, err := fab.ConfigFromBackend(sdkConfig) if err != nil { return err } - signature, err := resClient.CreateConfigSignatureFromReader(usr, configUpdateReader) + identityManager, err := mspimpl.NewIdentityManager(c.mspID, userStore, cryptoSuite, endpointConfig) if err != nil { return err } - signatureBytes, err := proto.Marshal(signature) + identityBytes, err := ioutil.ReadFile(c.identity) + if err != nil { + return err + } + id := &identity{} + err = yaml.Unmarshal(identityBytes, id) + if err != nil { + return err + } + signingIdentity, err := identityManager.CreateSigningIdentity( + msp.WithPrivateKey([]byte(id.Key.Pem)), + msp.WithCert([]byte(id.Cert.Pem)), + ) + if err != nil { + return err + } + signature, err := resClient.CreateConfigSignatureFromReader(signingIdentity, configUpdateReader) if err != nil { return err } - err = ioutil.WriteFile(c.output, signatureBytes, 0777) + signatureBytes, err := proto.Marshal(signature) if err != nil { return err } - log.Infof("channel signed output: %s", c.output) + if c.output != "" { + err = ioutil.WriteFile(c.output, signatureBytes, 0644) + if err != nil { + return err + } + } else { + _, err = fmt.Fprint(out, signatureBytes) + if err != nil { + return err + } + } return nil } -func newSignUpdateChannelCMD(io.Writer, io.Writer) *cobra.Command { +func newSignUpdateChannelCMD(stdOut io.Writer, stdErr io.Writer) *cobra.Command { c := &signUpdateChannelCmd{} cmd := &cobra.Command{ Use: "signupdate", @@ -83,21 +126,21 @@ func newSignUpdateChannelCMD(io.Writer, io.Writer) *cobra.Command { if err := c.validate(); err != nil { return err } - return c.run() + return c.run(stdOut) }, } persistentFlags := cmd.PersistentFlags() persistentFlags.StringVarP(&c.mspID, "mspid", "", "", "MSP ID of the organization") persistentFlags.StringVarP(&c.channelName, "channel", "", "", "Channel name") persistentFlags.StringVarP(&c.configPath, "config", "", "", "Configuration file for the SDK") - persistentFlags.StringVarP(&c.userName, "user", "", "", "User name for the transaction") + persistentFlags.StringVarP(&c.identity, "identity", "", "", "Identity file") persistentFlags.StringVarP(&c.file, "file", "f", "", "Config update file") - persistentFlags.StringVarP(&c.output, "output", "o", "", "Output signature file") + persistentFlags.StringVarP(&c.output, "output", "o", "", "Output signature") + persistentFlags.StringVarP(&c.userName, "user", "", "", "User name for the transaction") cmd.MarkPersistentFlagRequired("mspid") cmd.MarkPersistentFlagRequired("channel") cmd.MarkPersistentFlagRequired("config") cmd.MarkPersistentFlagRequired("user") cmd.MarkPersistentFlagRequired("file") - cmd.MarkPersistentFlagRequired("output") return cmd } diff --git a/kubectl-hlf/cmd/channelcrd/mainchannel/create.go b/kubectl-hlf/cmd/channelcrd/mainchannel/create.go index c5024524..2dfd7948 100644 --- a/kubectl-hlf/cmd/channelcrd/mainchannel/create.go +++ b/kubectl-hlf/cmd/channelcrd/mainchannel/create.go @@ -124,12 +124,15 @@ func (o Options) mapToFabricMainChannel() (*v1alpha1.FabricMainChannelSpec, erro tlsCACert := node.Status.TlsCACert signCACert := node.Status.SignCACert ordererNodes := []v1alpha1.FabricMainChannelExternalOrdererNode{} + channelOrdererNodes := []v1alpha1.FabricMainChannelOrdererNode{} for _, ordererNode := range nodes { - adminOrdererHost := ordererNode.Name - adminOrdererPort := 7053 - ordererNodes = append(ordererNodes, v1alpha1.FabricMainChannelExternalOrdererNode{ - Host: adminOrdererHost, - AdminPort: adminOrdererPort, + namespace := ordererNode.Item.Namespace + if namespace == "" { + namespace = "default" + } + channelOrdererNodes = append(channelOrdererNodes, v1alpha1.FabricMainChannelOrdererNode{ + Name: ordererNode.Item.Name, + Namespace: namespace, }) } ordererOrganizations = append(ordererOrganizations, v1alpha1.FabricMainChannelOrdererOrganization{ @@ -137,7 +140,7 @@ func (o Options) mapToFabricMainChannel() (*v1alpha1.FabricMainChannelSpec, erro TLSCACert: tlsCACert, SignCACert: signCACert, OrdererEndpoints: ordererEndpoints, - OrderersToJoin: []v1alpha1.FabricMainChannelOrdererNode{}, + OrderersToJoin: channelOrdererNodes, ExternalOrderersToJoin: ordererNodes, }) } diff --git a/kubectl-hlf/cmd/helpers/constants.go b/kubectl-hlf/cmd/helpers/constants.go index e997c044..1b63b1a8 100644 --- a/kubectl-hlf/cmd/helpers/constants.go +++ b/kubectl-hlf/cmd/helpers/constants.go @@ -8,10 +8,10 @@ const ( DefaultStorageclass = "" DefaultCAImage = "hyperledger/fabric-ca" - DefaultCAVersion = "1.4.9" + DefaultCAVersion = "1.5.7" DefaultPeerImage = "hyperledger/fabric-peer" - DefaultPeerVersion = "2.4.9" + DefaultPeerVersion = "2.5.5" DefaultOperationsConsoleImage = "ghcr.io/hyperledger-labs/fabric-console" DefaultOperationsConsoleVersion = "latest" @@ -29,5 +29,5 @@ const ( DefaultCouchDBVersion = "3.1.1" DefaultOrdererImage = "hyperledger/fabric-orderer" - DefaultOrdererVersion = "amd64-2.4.9" + DefaultOrdererVersion = "2.5.5" ) diff --git a/kubectl-hlf/cmd/helpers/hlf.go b/kubectl-hlf/cmd/helpers/hlf.go index ba75eca0..584c2e3c 100644 --- a/kubectl-hlf/cmd/helpers/hlf.go +++ b/kubectl-hlf/cmd/helpers/hlf.go @@ -3,9 +3,10 @@ package helpers import ( "context" "fmt" + "strings" + "github.com/kfsoftware/hlf-operator/controllers/utils" "k8s.io/client-go/kubernetes" - "strings" hlfv1alpha1 "github.com/kfsoftware/hlf-operator/api/hlf.kungfusoftware.es/v1alpha1" operatorv1 "github.com/kfsoftware/hlf-operator/pkg/client/clientset/versioned" @@ -315,13 +316,15 @@ func GetCertAuthByURL(clientSet *kubernetes.Clientset, oclient *operatorv1.Clien func GetURLForCA(certAuth *ClusterCA) (string, error) { var host string var port int - if len(certAuth.Spec.Istio.Hosts) > 0 { + if certAuth.Spec.Istio != nil && len(certAuth.Spec.Istio.Hosts) > 0 { host = certAuth.Spec.Istio.Hosts[0] port = certAuth.Spec.Istio.Port - } else if len(certAuth.Spec.GatewayApi.Hosts) > 0 { + } else if certAuth.Spec.GatewayApi != nil && len(certAuth.Spec.GatewayApi.Hosts) > 0 { host = certAuth.Spec.GatewayApi.Hosts[0] port = certAuth.Spec.GatewayApi.Port - + } else if certAuth.Spec.Traefik != nil && len(certAuth.Spec.Traefik.Hosts) > 0 { + host = certAuth.Spec.Traefik.Hosts[0] + port = 443 } else { client, err := GetKubeClient() if err != nil { @@ -389,14 +392,21 @@ func GetOrdererPublicURL(clientset *kubernetes.Clientset, node hlfv1alpha1.Fabri return fmt.Sprintf("%s:%d", hostPort.Host, hostPort.Port), nil } func GetOrdererHostAndPort(clientset *kubernetes.Clientset, nodeSpec hlfv1alpha1.FabricOrdererNodeSpec, nodeStatus hlfv1alpha1.FabricOrdererNodeStatus) (string, int, error) { - hostName, err := utils.GetPublicIPKubernetes(clientset) - if err != nil { - return "", 0, err - } - ordererPort := nodeStatus.NodePort - if len(nodeSpec.Istio.Hosts) > 0 { + var hostName string + var err error + var ordererPort int + if nodeSpec.Istio != nil && len(nodeSpec.Istio.Hosts) > 0 { hostName = nodeSpec.Istio.Hosts[0] ordererPort = nodeSpec.Istio.Port + } else if nodeSpec.Traefik != nil && len(nodeSpec.Traefik.Hosts) > 0 { + hostName = nodeSpec.Traefik.Hosts[0] + ordererPort = 443 + } else { + hostName, err = utils.GetPublicIPKubernetes(clientset) + if err != nil { + return "", 0, err + } + ordererPort = nodeStatus.NodePort } return hostName, ordererPort, nil } @@ -413,14 +423,21 @@ func GetPeerHostAndPort(clientset *kubernetes.Clientset, nodeSpec hlfv1alpha1.Fa return hostName, ordererPort, nil } func GetOrdererAdminHostAndPort(clientset *kubernetes.Clientset, nodeSpec hlfv1alpha1.FabricOrdererNodeSpec, nodeStatus hlfv1alpha1.FabricOrdererNodeStatus) (string, int, error) { - hostName, err := utils.GetPublicIPKubernetes(clientset) - if err != nil { - return "", 0, err - } - ordererPort := nodeStatus.AdminPort - if len(nodeSpec.AdminIstio.Hosts) > 0 { + var hostName string + var err error + var ordererPort int + if nodeSpec.AdminIstio != nil && len(nodeSpec.AdminIstio.Hosts) > 0 { hostName = nodeSpec.AdminIstio.Hosts[0] ordererPort = nodeSpec.AdminIstio.Port + } else if nodeSpec.AdminTraefik != nil && len(nodeSpec.AdminTraefik.Hosts) > 0 { + hostName = nodeSpec.AdminTraefik.Hosts[0] + ordererPort = 443 + } else { + hostName, err = utils.GetPublicIPKubernetes(clientset) + if err != nil { + return "", 0, err + } + ordererPort = nodeStatus.AdminPort } return hostName, ordererPort, nil } @@ -447,22 +464,29 @@ func GetPeerPublicURL(clientset *kubernetes.Clientset, node hlfv1alpha1.FabricPe return fmt.Sprintf("%s:%d", hostPort.Host, hostPort.Port), nil } func GetPeerHostPort(clientset *kubernetes.Clientset, node hlfv1alpha1.FabricPeer) (*HostPort, error) { - k8sIP, err := utils.GetPublicIPKubernetes(clientset) - if err != nil { - return nil, err - } + if node.Spec.Istio != nil && len(node.Spec.Istio.Hosts) > 0 { return &HostPort{ Host: node.Spec.Istio.Hosts[0], Port: node.Spec.Istio.Port, }, nil } + if node.Spec.Traefik != nil && len(node.Spec.Traefik.Hosts) > 0 { + return &HostPort{ + Host: node.Spec.Traefik.Hosts[0], + Port: 443, + }, nil + } if node.Spec.GatewayApi != nil && len(node.Spec.GatewayApi.Hosts) > 0 { return &HostPort{ Host: node.Spec.GatewayApi.Hosts[0], Port: node.Spec.GatewayApi.Port, }, nil } + k8sIP, err := utils.GetPublicIPKubernetes(clientset) + if err != nil { + return nil, err + } return &HostPort{ Host: k8sIP, Port: node.Status.NodePort, @@ -472,22 +496,29 @@ func GetPeerPrivateURL(node hlfv1alpha1.FabricPeer) string { return fmt.Sprintf("%s.%s:%s", node.Name, node.Namespace, "7051") } func GetOrdererHostPort(clientset *kubernetes.Clientset, node hlfv1alpha1.FabricOrdererNode) (*HostPort, error) { - k8sIP, err := utils.GetPublicIPKubernetes(clientset) - if err != nil { - return nil, err - } + if node.Spec.Istio != nil && len(node.Spec.Istio.Hosts) > 0 { return &HostPort{ Host: node.Spec.Istio.Hosts[0], Port: node.Spec.Istio.Port, }, nil } + if node.Spec.Traefik != nil && len(node.Spec.Traefik.Hosts) > 0 { + return &HostPort{ + Host: node.Spec.Traefik.Hosts[0], + Port: 443, + }, nil + } if node.Spec.GatewayApi != nil && len(node.Spec.GatewayApi.Hosts) > 0 { return &HostPort{ Host: node.Spec.GatewayApi.Hosts[0], Port: node.Spec.GatewayApi.Port, }, nil } + k8sIP, err := utils.GetPublicIPKubernetes(clientset) + if err != nil { + return nil, err + } return &HostPort{ Host: k8sIP, Port: node.Status.NodePort, @@ -495,10 +526,6 @@ func GetOrdererHostPort(clientset *kubernetes.Clientset, node hlfv1alpha1.Fabric } func GetCAHostPort(clientset *kubernetes.Clientset, node hlfv1alpha1.FabricCA) (*HostPort, error) { - k8sIP, err := utils.GetPublicIPKubernetes(clientset) - if err != nil { - return nil, err - } if node.Spec.Istio != nil && len(node.Spec.Istio.Hosts) > 0 { return &HostPort{ Host: node.Spec.Istio.Hosts[0], @@ -511,6 +538,16 @@ func GetCAHostPort(clientset *kubernetes.Clientset, node hlfv1alpha1.FabricCA) ( Port: node.Spec.GatewayApi.Port, }, nil } + if node.Spec.Traefik != nil && len(node.Spec.Traefik.Hosts) > 0 { + return &HostPort{ + Host: node.Spec.Traefik.Hosts[0], + Port: 443, + }, nil + } + k8sIP, err := utils.GetPublicIPKubernetes(clientset) + if err != nil { + return nil, err + } return &HostPort{ Host: k8sIP, Port: node.Status.NodePort, diff --git a/kubectl-hlf/cmd/helpers/hlfLogger.go b/kubectl-hlf/cmd/helpers/hlfLogger.go new file mode 100644 index 00000000..33dc1033 --- /dev/null +++ b/kubectl-hlf/cmd/helpers/hlfLogger.go @@ -0,0 +1,96 @@ +package helpers + +import "github.com/hyperledger/fabric-sdk-go/pkg/core/logging/api" + +type hlfLogger struct { +} + +func (f hlfLogger) Fatal(v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Fatalf(format string, v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Fatalln(v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Panic(v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Panicf(format string, v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Panicln(v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Print(v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Printf(format string, v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Println(v ...interface{}) { + // do nothing +} + +func (f hlfLogger) Debug(args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Debugf(format string, args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Debugln(args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Info(args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Infof(format string, args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Infoln(args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Warn(args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Warnf(format string, args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Warnln(args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Error(args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Errorf(format string, args ...interface{}) { + // do nothing +} + +func (f hlfLogger) Errorln(args ...interface{}) { + // do nothing +} + +type HLFLoggerProvider struct{} + +func (f HLFLoggerProvider) GetLogger(module string) api.Logger { + return hlfLogger{} +} diff --git a/kubectl-hlf/cmd/identity/create.go b/kubectl-hlf/cmd/identity/create.go index 4cfb04cb..2afd021b 100644 --- a/kubectl-hlf/cmd/identity/create.go +++ b/kubectl-hlf/cmd/identity/create.go @@ -11,14 +11,17 @@ import ( ) type createIdentityCmd struct { - name string - namespace string - caName string - caNamespace string - ca string - mspID string - enrollId string - enrollSecret string + name string + namespace string + caName string + caNamespace string + ca string + mspID string + enrollId string + enrollSecret string + caEnrollId string + caEnrollSecret string + caType string } func (c *createIdentityCmd) validate() error { @@ -78,6 +81,16 @@ func (c *createIdentityCmd) run() error { Enrollsecret: c.enrollSecret, MSPID: c.mspID, } + if c.caEnrollId != "" && c.caEnrollSecret != "" { + fabricIdentitySpec.Register = &v1alpha1.FabricIdentityRegister{ + Enrollid: c.caEnrollId, + Enrollsecret: c.caEnrollSecret, + Type: c.caType, + Affiliation: "", + MaxEnrollments: -1, + Attrs: []string{}, + } + } fabricIdentity := &v1alpha1.FabricIdentity{ ObjectMeta: v1.ObjectMeta{ Name: c.name, @@ -121,5 +134,8 @@ func newIdentityCreateCMD() *cobra.Command { f.StringVar(&c.mspID, "mspid", "", "MSP ID") f.StringVar(&c.enrollId, "enroll-id", "", "Enroll ID") f.StringVar(&c.enrollSecret, "enroll-secret", "", "Enroll Secret") + f.StringVar(&c.caEnrollId, "ca-enroll-id", "", "CA Enroll ID to register the user") + f.StringVar(&c.caEnrollSecret, "ca-enroll-secret", "", "CA Enroll Secret to register the user") + f.StringVar(&c.caType, "ca-type", "", "Type of the user to be registered in the CA") return cmd } diff --git a/kubectl-hlf/cmd/inspect/inspect.go b/kubectl-hlf/cmd/inspect/inspect.go index 93a30942..5a5f5d5d 100644 --- a/kubectl-hlf/cmd/inspect/inspect.go +++ b/kubectl-hlf/cmd/inspect/inspect.go @@ -137,7 +137,7 @@ certificateAuthorities: {{if $ca.EnrollID }} registrar: enrollId: {{ $ca.EnrollID }} - enrollSecret: {{ $ca.EnrollPWD }} + enrollSecret: "{{ $ca.EnrollPWD }}" {{ end }} caName: ca tlsCACerts: diff --git a/kubectl-hlf/cmd/networkconfig/create.go b/kubectl-hlf/cmd/networkconfig/create.go index dd07f28a..50531305 100644 --- a/kubectl-hlf/cmd/networkconfig/create.go +++ b/kubectl-hlf/cmd/networkconfig/create.go @@ -21,6 +21,7 @@ type CreateOptions struct { Name string Internal bool SecretName string + Channels []string } func (o CreateOptions) Validate() error { @@ -79,12 +80,15 @@ func (c *createCmd) run(args []string) error { Namespace: c.opts.NS, }, Spec: hlfv1alpha1.FabricNetworkConfigSpec{ - Organization: "", - Internal: c.opts.Internal, - Organizations: c.opts.Orgs, - Namespaces: namespaces, - SecretName: secretName, - Identities: identities, + Organization: "", + Internal: c.opts.Internal, + Organizations: c.opts.Orgs, + Namespaces: namespaces, + Channels: c.opts.Channels, + Identities: identities, + ExternalOrderers: []hlfv1alpha1.FabricNetworkConfigExternalOrderer{}, + ExternalPeers: []hlfv1alpha1.FabricNetworkConfigExternalPeer{}, + SecretName: secretName, }, } _, err = oclient.HlfV1alpha1().FabricNetworkConfigs(c.opts.NS).Create( @@ -114,6 +118,7 @@ func newCreateNetworkConfigCmd(out io.Writer, errOut io.Writer) *cobra.Command { } f := cmd.Flags() f.StringSliceVarP(&c.opts.Orgs, "orgs", "o", []string{}, "Organizations to inspect") + f.StringSliceVarP(&c.opts.Channels, "channels", "c", []string{}, "Channels to inspect") f.StringVar(&c.opts.Name, "name", "", "Name of the Network Config to create") f.StringVar(&c.opts.SecretName, "secret", "", "Secret name to store the network config") f.StringVarP(&c.opts.NS, "namespace", "n", helpers.DefaultNamespace, "Namespace scope for this request") diff --git a/kubectl-hlf/cmd/ordnode/create.go b/kubectl-hlf/cmd/ordnode/create.go index d433efd5..c5008dbc 100644 --- a/kubectl-hlf/cmd/ordnode/create.go +++ b/kubectl-hlf/cmd/ordnode/create.go @@ -91,11 +91,14 @@ func (c *createCmd) run(args []string) error { gatewayApiName := c.ordererOpts.GatewayApiName gatewayApiNamespace := c.ordererOpts.GatewayApiNamespace gatewayApiPort := c.ordererOpts.GatewayApiPort - gatewayApi := &v1alpha1.FabricGatewayApi{ - Port: gatewayApiPort, - Hosts: []string{}, - GatewayName: gatewayApiName, - GatewayNamespace: gatewayApiNamespace, + var gatewayApi *v1alpha1.FabricGatewayApi + if c.ordererOpts.GatewayApiName != "" { + gatewayApi = &v1alpha1.FabricGatewayApi{ + Port: gatewayApiPort, + Hosts: []string{}, + GatewayName: gatewayApiName, + GatewayNamespace: gatewayApiNamespace, + } } if len(c.ordererOpts.Hosts) > 0 { istio = &v1alpha1.FabricIstio{ @@ -118,12 +121,7 @@ func (c *createCmd) run(args []string) error { Hosts: []string{}, IngressGateway: ingressGateway, } - adminGatewayApi := &v1alpha1.FabricGatewayApi{ - Port: gatewayApiPort, - Hosts: []string{}, - GatewayName: gatewayApiName, - GatewayNamespace: gatewayApiNamespace, - } + var adminGatewayApi *v1alpha1.FabricGatewayApi if len(c.ordererOpts.AdminHosts) > 0 { adminIstio = &v1alpha1.FabricIstio{ Port: ingressPort, @@ -296,9 +294,9 @@ func newCreateOrdererNodeCmd(out io.Writer, errOut io.Writer) *cobra.Command { f.StringArrayVarP(&c.ordererOpts.Hosts, "hosts", "", []string{}, "Hosts") f.StringArrayVarP(&c.ordererOpts.GatewayApiHosts, "gateway-api-hosts", "", []string{}, "Hosts for GatewayApi") f.StringArrayVarP(&c.ordererOpts.AdminGatewayApiHosts, "admin-gateway-api-hosts", "", []string{}, "GatewayAPI Hosts for the admin API") - f.StringVarP(&c.ordererOpts.GatewayApiName, "gateway-api-name", "", "hlf-gateway", "Gateway-api name") - f.StringVarP(&c.ordererOpts.GatewayApiNamespace, "gateway-api-namespace", "", "default", "Namespace of GatewayApi") - f.IntVarP(&c.ordererOpts.GatewayApiPort, "gateway-api-port", "", 443, "Gateway API port") + f.StringVarP(&c.ordererOpts.GatewayApiName, "gateway-api-name", "", "", "Gateway-api name") + f.StringVarP(&c.ordererOpts.GatewayApiNamespace, "gateway-api-namespace", "", "", "Namespace of GatewayApi") + f.IntVarP(&c.ordererOpts.GatewayApiPort, "gateway-api-port", "", 0, "Gateway API port") f.StringArrayVarP(&c.ordererOpts.AdminHosts, "admin-hosts", "", []string{}, "Hosts for the admin API(introduced in v2.3)") f.BoolVarP(&c.ordererOpts.Output, "output", "o", false, "Output in yaml") f.StringArrayVarP(&c.ordererOpts.HostAliases, "host-aliases", "", []string{}, "Host aliases (e.g.: \"1.2.3.4:osn2.example.com,peer1.example.com\")") diff --git a/kubectl-hlf/cmd/ordnode/join.go b/kubectl-hlf/cmd/ordnode/join.go index ae37a9ad..7ab1e5fd 100644 --- a/kubectl-hlf/cmd/ordnode/join.go +++ b/kubectl-hlf/cmd/ordnode/join.go @@ -99,10 +99,16 @@ func (c *joinChannelCmd) run() error { if err != nil { return err } + defer chResponse.Body.Close() log.Infof("Status code=%d", chResponse.StatusCode) if chResponse.StatusCode != 201 { - return errors.Errorf("error joining channel, got status code=%d", chResponse.StatusCode) + errorResponse := &map[string]interface{}{} + err = json.NewDecoder(chResponse.Body).Decode(errorResponse) + if err != nil { + return err + } + return errors.Errorf("error joining channel, got status code=%d %v", chResponse.StatusCode, errorResponse) } chInfo := &osnadmin.ChannelInfo{} err = json.NewDecoder(chResponse.Body).Decode(chInfo) diff --git a/kubectl-hlf/cmd/peer/create.go b/kubectl-hlf/cmd/peer/create.go index e5101199..b99dad81 100644 --- a/kubectl-hlf/cmd/peer/create.go +++ b/kubectl-hlf/cmd/peer/create.go @@ -52,6 +52,7 @@ type Options struct { CAPort int CAHost string ImagePullSecrets []string + Env []string } func (o Options) Validate() error { @@ -64,6 +65,37 @@ type createCmd struct { peerOpts Options } +func (c *createCmd) handleEnv() ([]corev1.EnvVar, error) { + var env []corev1.EnvVar + for _, literalSource := range c.peerOpts.Env { + keyName, value, err := ParseEnv(literalSource) + if err != nil { + return nil, err + } + env = append(env, corev1.EnvVar{ + Name: keyName, + Value: value, + }) + } + return env, nil +} + +// ParseEnv parses the source key=val pair into its component pieces. +// This functionality is distinguished from strings.SplitN(source, "=", 2) since +// it returns an error in the case of empty keys, values, or a missing equals sign. +func ParseEnv(source string) (keyName, value string, err error) { + // leading equal is invalid + if strings.Index(source, "=") == 0 { + return "", "", fmt.Errorf("invalid formart %v, expected key=value", source) + } + // split after the first equal (so values can have the = character) + items := strings.SplitN(source, "=", 2) + if len(items) != 2 { + return "", "", fmt.Errorf("invalid format %v, expected key=value", source) + } + + return items[0], items[1], nil +} func (c *createCmd) validate() error { return c.peerOpts.Validate() } @@ -261,7 +293,10 @@ func (c *createCmd) run() error { }) } } - + envVars, err := c.handleEnv() + if err != nil { + return err + } fabricPeer := &v1alpha1.FabricPeer{ TypeMeta: v1.TypeMeta{ Kind: "FabricPeer", @@ -272,6 +307,7 @@ func (c *createCmd) run() error { Namespace: c.peerOpts.NS, }, Spec: v1alpha1.FabricPeerSpec{ + Env: envVars, ServiceMonitor: nil, HostAliases: hostAliases, Replicas: 1, @@ -451,6 +487,7 @@ func getPeerResourceRequirements() (*corev1.ResourceRequirements, error) { }, }, nil } + func newCreatePeerCmd(out io.Writer, errOut io.Writer) *cobra.Command { c := createCmd{out: out, errOut: errOut} cmd := &cobra.Command{ @@ -481,7 +518,7 @@ func newCreatePeerCmd(out io.Writer, errOut io.Writer) *cobra.Command { f.StringVarP(&c.peerOpts.StateDB, "statedb", "", "leveldb", "State database") f.StringVarP(&c.peerOpts.IngressGateway, "istio-ingressgateway", "", "ingressgateway", "Istio ingress gateway name") f.IntVarP(&c.peerOpts.IngressPort, "istio-port", "", 443, "Istio ingress port") - f.BoolVarP(&c.peerOpts.Leader, "leader", "", false, "Force peer to be leader") + f.BoolVarP(&c.peerOpts.Leader, "leader", "", true, "Force peer to be leader") f.StringArrayVarP(&c.peerOpts.BootstrapPeers, "bootstrap-peer", "", []string{}, "Bootstrap peers") f.StringArrayVarP(&c.peerOpts.Hosts, "hosts", "", []string{}, "External hosts") f.StringArrayVarP(&c.peerOpts.GatewayApiHosts, "gateway-api-hosts", "", []string{}, "Hosts for GatewayApi") @@ -496,5 +533,6 @@ func newCreatePeerCmd(out io.Writer, errOut io.Writer) *cobra.Command { f.StringVarP(&c.peerOpts.CouchDBImage, "couchdb-repository", "", helpers.DefaultCouchDBImage, "CouchDB image") f.StringVarP(&c.peerOpts.CouchDBTag, "couchdb-tag", "", helpers.DefaultCouchDBVersion, "CouchDB version") f.StringVarP(&c.peerOpts.CouchDBPassword, "couchdb-password", "", "", "CouchDB password") + f.StringArrayVarP(&c.peerOpts.Env, "env", "e", []string{}, "Environment variable for the Chaincode (key=value)") return cmd } diff --git a/main.go b/main.go index 4df7a5cb..636cfb82 100644 --- a/main.go +++ b/main.go @@ -22,7 +22,6 @@ import ( "path/filepath" "time" - "github.com/amplitude/analytics-go/amplitude" "github.com/kfsoftware/hlf-operator/controllers/chaincode" "github.com/kfsoftware/hlf-operator/controllers/console" "github.com/kfsoftware/hlf-operator/controllers/followerchannel" @@ -75,6 +74,10 @@ func main() { var autoRenewOrdererCertificatesDelta time.Duration var autoRenewPeerCertificatesDelta time.Duration var autoRenewIdentityCertificatesDelta time.Duration + var helmChartWait bool + var helmChartTimeout time.Duration + var maxHistory int + var maxReconciles int flag.StringVar(&metricsAddr, "metrics-addr", ":8090", "The address the metric endpoint binds to.") flag.DurationVar(&autoRenewOrdererCertificatesDelta, "auto-renew-orderer-certificates-delta", 15*24*time.Hour, "The delta to renew orderer certificates before expiration. Default is 15 days.") flag.DurationVar(&autoRenewPeerCertificatesDelta, "auto-renew-peer-certificates-delta", 15*24*time.Hour, "The delta to renew peer certificates before expiration. Default is 15 days.") @@ -82,11 +85,14 @@ func main() { flag.BoolVar(&autoRenewCertificatesPeerEnabled, "auto-renew-peer-certificates", false, "Enable auto renew certificates for orderer and peer nodes. Default is false.") flag.BoolVar(&autoRenewCertificatesOrdererEnabled, "auto-renew-orderer-certificates", false, "Enable auto renew certificates for orderer and peer nodes. Default is false.") flag.BoolVar(&autoRenewCertificatesIdentityEnabled, "auto-renew-identity-certificates", true, "Enable auto renew certificates for FabricIdentity. Default is true.") + flag.IntVar(&maxReconciles, "max-reconciles", 10, "Max reconciles for a resource. Default is 10.") + flag.BoolVar(&helmChartWait, "helm-chart-wait", false, "Wait for helm chart to be deployed. Default is false.") + flag.IntVar(&maxHistory, "helm-max-history", 10, "Max history for helm chart. Default is 10.") + flag.DurationVar(&helmChartTimeout, "helm-chart-timeout", 5*time.Minute, "Timeout for helm chart to be deployed. Default is 5 minutes.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") flag.Parse() - log.Infof("Auto renew peer certificates enabled: %t", autoRenewCertificatesPeerEnabled) log.Infof("Auto renew orderer certificates enabled: %t", autoRenewCertificatesOrdererEnabled) log.Infof("Auto renew peer certificates delta: %s", autoRenewPeerCertificatesDelta) @@ -94,15 +100,6 @@ func main() { // Pass a Config struct // to initialize a Client struct // which implements Client interface - analytics := amplitude.NewClient( - amplitude.NewConfig("569cfca546698061cf130f97745afca6"), - ) - // Track events in your application - analytics.Track(amplitude.Event{ - UserID: "user-id", - EventType: "Start operator", - EventProperties: map[string]interface{}{"source": "notification"}, - }) ctrl.SetLogger(zap.New(zap.UseDevMode(true))) kubeContext, exists := os.LookupEnv("KUBECONTEXT") @@ -136,13 +133,16 @@ func main() { } if err = (&peer.FabricPeerReconciler{ Client: mgr.GetClient(), + ChartPath: peerChartPath, Log: ctrl.Log.WithName("controllers").WithName("FabricPeer"), Scheme: mgr.GetScheme(), Config: mgr.GetConfig(), - ChartPath: peerChartPath, AutoRenewCertificates: autoRenewCertificatesPeerEnabled, AutoRenewCertificatesDelta: autoRenewPeerCertificatesDelta, - }).SetupWithManager(mgr); err != nil { + Wait: helmChartWait, + Timeout: helmChartTimeout, + MaxHistory: maxHistory, + }).SetupWithManager(mgr, maxReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "FabricPeer") os.Exit(1) } @@ -157,13 +157,16 @@ func main() { os.Exit(1) } if err = (&ca.FabricCAReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("FabricCA"), - Scheme: mgr.GetScheme(), - Config: mgr.GetConfig(), - ClientSet: clientSet, - ChartPath: caChartPath, - }).SetupWithManager(mgr); err != nil { + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("FabricCA"), + Scheme: mgr.GetScheme(), + Config: mgr.GetConfig(), + ClientSet: clientSet, + ChartPath: caChartPath, + Wait: helmChartWait, + Timeout: helmChartTimeout, + MaxHistory: maxHistory, + }).SetupWithManager(mgr, maxReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "FabricCA") os.Exit(1) } @@ -196,7 +199,10 @@ func main() { ChartPath: ordNodeChartPath, AutoRenewCertificates: autoRenewCertificatesOrdererEnabled, AutoRenewCertificatesDelta: autoRenewOrdererCertificatesDelta, - }).SetupWithManager(mgr); err != nil { + Wait: helmChartWait, + Timeout: helmChartTimeout, + MaxHistory: maxHistory, + }).SetupWithManager(mgr, maxReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "FabricOrdererNode") os.Exit(1) } diff --git a/pkg/nc/nc.go b/pkg/nc/nc.go index 24c41a18..779e81ed 100644 --- a/pkg/nc/nc.go +++ b/pkg/nc/nc.go @@ -117,7 +117,7 @@ certificateAuthorities: {{if $ca.EnrollID }} registrar: enrollId: {{ $ca.EnrollID }} - enrollSecret: {{ $ca.EnrollSecret }} + enrollSecret: "{{ $ca.EnrollSecret }}" {{ end }} caName: {{ $ca.CAName }} tlsCACerts: @@ -239,69 +239,6 @@ func GenerateNetworkConfig(channel *hlfv1alpha1.FabricMainChannel, kubeClientset } orgs = append(orgs, org) } - //for _, certAuth := range certAuths { - // tlsCACertPem := certAuth.Status.TLSCACert - // roots := x509.NewCertPool() - // ok := roots.AppendCertsFromPEM([]byte(tlsCACertPem)) - // if !ok { - // panic("failed to parse root certificate") - // } - // for mspID, org := range orgMap { - // for _, peer := range org.Peers { - // block, _ := pem.Decode([]byte(peer.Status.TlsCert)) - // if block == nil { - // continue - // } - // cert, err := x509.ParseCertificate(block.Bytes) - // if err != nil { - // continue - // } - // opts := x509.VerifyOptions{ - // Roots: roots, - // Intermediates: x509.NewCertPool(), - // } - // - // if _, err := cert.Verify(opts); err == nil { - // orgMap[mspID].CertAuths = append(orgMap[mspID].CertAuths, certAuth) - // } - // } - // } - // for _, ord := range ordererNodes { - // block, _ := pem.Decode([]byte(ord.Status.TlsCert)) - // if block == nil { - // continue - // } - // cert, err := x509.ParseCertificate(block.Bytes) - // if err != nil { - // continue - // } - // opts := x509.VerifyOptions{ - // Roots: roots, - // Intermediates: x509.NewCertPool(), - // } - // if _, err := cert.Verify(opts); err == nil { - // _, ok = orgMap[ord.Spec.MspID] - // if !ok { - // orgMap[ord.Spec.MspID] = &Organization{ - // Type: helpers.OrdererType, - // MspID: "", - // OrdererNodes: []*helpers.ClusterOrdererNode{}, - // Peers: []*helpers.ClusterPeer{}, - // CertAuths: []*helpers.ClusterCA{certAuth}, - // } - // } else { - // orgMap[ord.Spec.MspID].CertAuths = append(orgMap[ord.Spec.MspID].CertAuths, certAuth) - // } - // } - // } - // - //} - //for _, ord := range ordererNodes { - // orgMap[ord.Spec.MspID].OrdererNodes = append(orgMap[ord.Spec.MspID].OrdererNodes, ord) - //} - //for _, peer := range clusterPeers { - // peers = append(peers, peer) - //} err = tmpl.Execute(&buf, map[string]interface{}{ "Peers": peers, "Orderers": ordererNodes,