From b16999c8c08d20a014b04bf2465c7f0a271b5408 Mon Sep 17 00:00:00 2001 From: Shubhendu Date: Wed, 14 Jun 2017 18:46:05 +0530 Subject: [PATCH 01/13] Added post checks to check if object are available in etcd post create tendrl-bug-id: Tendrl/ceph-integration#282 Signed-off-by: Shubhendu --- .../objects/definition/ceph.yaml | 37 ++++++++++++++++ .../check_ec_profile_available/__init__.py | 42 ++++++++++++++++++ .../atoms/check_pool_available/__init__.py | 43 +++++++++++++++++++ .../rbd/atoms/check_rbd_available/__init__.py | 43 +++++++++++++++++++ 4 files changed, 165 insertions(+) create mode 100644 tendrl/ceph_integration/objects/ecprofile/atoms/check_ec_profile_available/__init__.py create mode 100644 tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py create mode 100644 tendrl/ceph_integration/objects/rbd/atoms/check_rbd_available/__init__.py diff --git a/tendrl/ceph_integration/objects/definition/ceph.yaml b/tendrl/ceph_integration/objects/definition/ceph.yaml index bf1edfa61..49b2497a0 100644 --- a/tendrl/ceph_integration/objects/definition/ceph.yaml +++ b/tendrl/ceph_integration/objects/definition/ceph.yaml @@ -21,6 +21,8 @@ namespace.ceph: - Pool.quota_max_bytes pre_run: - ceph.objects.Pool.atoms.NamedPoolNotExists + post_run: + - ceph.objects.Pool.atoms.CheckPoolAvailable run: ceph.flows.CreatePool type: Create uuid: faeab231-69e9-4c9d-b5ef-a67ed057f98b @@ -41,6 +43,8 @@ namespace.ceph: - ECProfile.directory - ECProfile.ruleset_failure_domain run: ceph.flows.CreateECProfile + post_run: + - ceph.objects.ECProfile.atoms.CheckECProfileAvailable type: Create uuid: faeab231-69e9-4c9d-b5ef-a67ed057f98d objects: @@ -102,6 +106,16 @@ namespace.ceph: run: ceph.objects.ECProfile.atoms.Delete type: Delete uuid: 7a2df258-9b24-4fd3-a66f-ee346e2e3740 + CheckECProfileAvailable: + enabled: true + help: Check if ECProfile available + inputs: + mandatory: + - ECProfile.name + name: ECProfile available + run: ceph.objects.ECProfile.atoms.CheckECProfileAvailable + type: Check + uuid: 7a2df258-9b24-4fd3-a66f-ee346e2e3759 flows: DeleteECProfile: tags: @@ -212,6 +226,17 @@ namespace.ceph: run: ceph.objects.Rbd.atoms.RbdExists type: Get uuid: 7a2df258-9b24-4fd3-a66f-ee346e2e3922 + CheckRbdAvailable: + enabled: true + help: Check if rbd is available + inputs: + mandatory: + - Rbd.pool_id + - Rbd.name + name: Rbd available + run: ceph.objects.Rbd.atoms.CheckRbdAvailable + type: Check + uuid: 7a2df258-9b24-4fd3-a66f-ee346e2e3929 flows: CreateRbd: tags: @@ -239,6 +264,8 @@ namespace.ceph: - Rbd.pool_quota_max_bytes pre_run: - ceph.objects.Rbd.atoms.RbdNotExists + post_run: + - ceph.objects.Rbd.atoms.CheckRbdAvailable run: ceph.objects.Rbd.flows.CreateRbd type: Create uuid: 9bc41d8f-a0cf-420a-b2fe-18761e07f3d2 @@ -442,6 +469,16 @@ namespace.ceph: run: ceph.objects.Pool.atoms.PoolNotExists type: Get uuid: 9a2df258-9b24-4fd3-a66f-ee346e2e3832 + CheckPoolAvailable: + enabled: true + help: check if pool available + inputs: + mandatory: + - Pool.poolname + name: pool is available + run: ceph.objects.Pool.atoms.CheckPoolAvailable + type: Check + uuid: 9a2df258-9b24-4fd3-a66f-ee346e2e3839 flows: DeletePool: tags: diff --git a/tendrl/ceph_integration/objects/ecprofile/atoms/check_ec_profile_available/__init__.py b/tendrl/ceph_integration/objects/ecprofile/atoms/check_ec_profile_available/__init__.py new file mode 100644 index 000000000..6e4e1cb3e --- /dev/null +++ b/tendrl/ceph_integration/objects/ecprofile/atoms/check_ec_profile_available/__init__.py @@ -0,0 +1,42 @@ +import etcd +import gevent + +from tendrl.commons.event import Event +from tendrl.commons.message import Message +from tendrl.commons import objects +from tendrl.commons.objects import AtomExecutionFailedError + + +class CheckECProfileAvailable(objects.BaseAtom): + def __init__(self, *args, **kwargs): + super(CheckECProfileAvailable, self).__init__(*args, **kwargs) + + def run(self): + retry_count = 0 + while True: + try: + NS.ceph.objects.ECProfile( + name=self.parameters['ECProfile.name'] + ).load() + return True + except etcd.EtcdKeyNotFound: + retry_count += 1 + gevent.sleep(1) + if retry_count == 600: + Event( + Message( + priority="error", + publisher=NS.publisher_id, + payload={ + "message": "ECProfile %s not reflected in tendrl yet. Timing out" % + self.parameters['ECProfile.name'] + }, + job_id=self.parameters['job_id'], + flow_id=self.parameters['flow_id'], + cluster_id=NS.tendrl_context.integration_id, + ) + ) + raise AtomExecutionFailedError( + "ECProfile %s not reflected in tendrl yet. Timing out" % + self.parameters['ECProfile.name'] + ) diff --git a/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py b/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py new file mode 100644 index 000000000..1667dae37 --- /dev/null +++ b/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py @@ -0,0 +1,43 @@ +import gevent + +from tendrl.ceph_integration.objects.pool import Pool +from tendrl.commons.event import Event +from tendrl.commons.message import Message +from tendrl.commons import objects +from tendrl.commons.objects import AtomExecutionFailedError + + +class CheckPoolAvailable(objects.BaseAtom): + def __init__(self, *args, **kwargs): + super(CheckPoolAvailable, self).__init__(*args, **kwargs) + + def run(self): + retry_count = 0 + while True: + pools = NS._int.client.read( + "clusters/%s/Pools" % NS.tendrl_context.integration_id + ) + for entry in pools.leaves: + pool = Pool(pool_id=entry.key.split("Pools/")[-1]).load() + if pool.pool_name == self.parameters['Pool.poolname']: + return True + retry_count += 1 + gevent.sleep(1) + if retry_count == 600: + Event( + Message( + priority="error", + publisher=NS.publisher_id, + payload={ + "message": "Pool %s not reflected in tendrl yet. Timing out" % + self.parameters['Pool.pool_name'] + }, + job_id=self.parameters['job_id'], + flow_id=self.parameters['flow_id'], + cluster_id=NS.tendrl_context.integration_id, + ) + ) + raise AtomExecutionFailedError( + "Pool %s not reflected in tendrl yet. Timing out" % + self.parameters['Pool.pool_name'] + ) diff --git a/tendrl/ceph_integration/objects/rbd/atoms/check_rbd_available/__init__.py b/tendrl/ceph_integration/objects/rbd/atoms/check_rbd_available/__init__.py new file mode 100644 index 000000000..d2f164dc7 --- /dev/null +++ b/tendrl/ceph_integration/objects/rbd/atoms/check_rbd_available/__init__.py @@ -0,0 +1,43 @@ +import etcd +import gevent + +from tendrl.commons.event import Event +from tendrl.commons.message import Message +from tendrl.commons import objects +from tendrl.commons.objects import AtomExecutionFailedError + + +class CheckRbdAvailable(objects.BaseAtom): + def __init__(self, *args, **kwargs): + super(CheckRbdAvailable, self).__init__(*args, **kwargs) + + def run(self): + retry_count = 0 + while True: + try: + NS.ceph.objects.Rbd( + pool_id=self.parameters['Rbd.pool_id'], + name=self.parameters['Rbd.name'] + ).load() + return True + except etcd.EtcdKeyNotFound: + retry_count += 1 + gevent.sleep(1) + if retry_count == 600: + Event( + Message( + priority="error", + publisher=NS.publisher_id, + payload={ + "message": "Rbd %s not reflected in tendrl yet. Timing out" % + self.parameters['Rbd.name'] + }, + job_id=self.parameters['job_id'], + flow_id=self.parameters['flow_id'], + cluster_id=NS.tendrl_context.integration_id, + ) + ) + raise AtomExecutionFailedError( + "Rbd %s not reflected in tendrl yet. Timing out" % + self.parameters['Rbd.name'] + ) From 4c368c4028d652526eaf78ee84b657e54abbc7ee Mon Sep 17 00:00:00 2001 From: Shubhendu Date: Thu, 15 Jun 2017 17:02:12 +0530 Subject: [PATCH 02/13] Added utilization, device path and hostname fields for OSD Also added the syncing logic for instant values of utilization tendrl-bug-id: Tendrl/ceph-integration#288 Signed-off-by: Shubhendu --- .../objects/definition/ceph.yaml | 15 +++++ .../ceph_integration/objects/osd/__init__.py | 62 +++++++++---------- tendrl/ceph_integration/sds_sync/__init__.py | 35 +++++++++++ 3 files changed, 81 insertions(+), 31 deletions(-) diff --git a/tendrl/ceph_integration/objects/definition/ceph.yaml b/tendrl/ceph_integration/objects/definition/ceph.yaml index bf1edfa61..942718647 100644 --- a/tendrl/ceph_integration/objects/definition/ceph.yaml +++ b/tendrl/ceph_integration/objects/definition/ceph.yaml @@ -565,12 +565,18 @@ namespace.ceph: uuid: help: UUID type: String + hostname: + help: Name of the host + type: String public_addr: help: Public Address type: String cluster_addr: help: Cluster Address type: String + device_path: + help: device path + type: String heartbeat_front_addr: help: Heartbeat front address type: String @@ -610,6 +616,15 @@ namespace.ceph: last_clean_end: help: Last clean end type: int + total: + help: total size of osd + type: int + used: + help: used size + type: int + used_pcnt: + help: used percent + type: String help: "Osd" enabled: true value: clusters/$TendrlContext.integration_id/Osds/$Osd.uuid diff --git a/tendrl/ceph_integration/objects/osd/__init__.py b/tendrl/ceph_integration/objects/osd/__init__.py index e3dc8e340..c49cafd5d 100644 --- a/tendrl/ceph_integration/objects/osd/__init__.py +++ b/tendrl/ceph_integration/objects/osd/__init__.py @@ -1,39 +1,39 @@ -from tendrl.commons import objects +from tendrl.integrations.ceph.objects import osd -class Osd(objects.BaseObject): +class Osd(osd.Osd): def __init__(self, id=None, - uuid=None, public_addr=None, cluster_addr=None, - heartbeat_front_addr=None, heartbeat_back_addr=None, + uuid=None, hostname=None, public_addr=None, cluster_addr=None, + device_path=None, heartbeat_front_addr=None, heartbeat_back_addr=None, down_at=None, up_from=None, lost_at=None, osd_up=None, osd_in=None, up_thru=None, weight=None, primary_affinity=None, state=None, last_clean_begin=None, - last_clean_end=None, *args, **kwargs): - super(Osd, self).__init__(*args, **kwargs) - - self.id = id - self.uuid = uuid - self.public_addr = public_addr - self.cluster_addr = cluster_addr - self.heartbeat_front_addr = heartbeat_front_addr - self.heartbeat_back_addr = heartbeat_back_addr - self.down_at = down_at - self.up_from = up_from - self.lost_at = lost_at - self.osd_up = osd_up - self.osd_in = osd_in - self.up_thru = up_thru - self.weight = weight - self.primary_affinity = primary_affinity - self.state = state - self.last_clean_begin = last_clean_begin - self.last_clean_end = last_clean_end - self.value = 'clusters/{0}/Osds/{1}' - - def render(self): - self.value = self.value.format( - NS.tendrl_context.integration_id, - self.uuid + last_clean_end=None, total=None, used=None, used_pcnt=None, + *args, **kwargs): + super(Osd, self).__init__( + id=id, + uuid=uuid, + hostname=hostname, + public_addr=public_addr, + cluster_addr=cluster_addr, + device_path=device_path, + heartbeat_front_addr=heartbeat_front_addr, + heartbeat_back_addr=heartbeat_back_addr, + down_at=down_at, + up_from=up_from, + lost_at=lost_at, + osd_up=osd_up, + osd_in=osd_in, + up_thru=up_thru, + weight=weight, + primary_affinity=primary_affinity, + state=state, + last_clean_begin=last_clean_begin, + last_clean_end=last_clean_end, + total=total, + used=used, + used_pcnt=used_pcnt, + *args, + **kwargs ) - return super(Osd, self).render() diff --git a/tendrl/ceph_integration/sds_sync/__init__.py b/tendrl/ceph_integration/sds_sync/__init__.py index f2f76f41a..4ffa22dcf 100644 --- a/tendrl/ceph_integration/sds_sync/__init__.py +++ b/tendrl/ceph_integration/sds_sync/__init__.py @@ -174,6 +174,37 @@ def on_heartbeat(self, cluster_data): # Get and update ec profiles for the cluster self._sync_ec_profiles() + # Sync the OSD utilization details + self._sync_osd_utilization() + + def _sync_osd_utilization(self): + from ceph_argparse import json_command + import rados + _conf_file = os.path.join( + "/etc/ceph", + NS.tendrl_context.cluster_name + ".conf" + ) + cluster_handle = rados.Rados( + name=ceph.RADOS_NAME, + clustername=NS.tendrl_context.cluster_name, + conffile=_conf_file + ) + cluster_handle.connect() + prefix = 'osd df' + out = ceph.rados_command( + cluster_handle, + prefix=prefix, + args={} + ) + if out: + for entry in out['nodes']: + fetched_osd = NS.ceph.objects.Osd(id=entry['id']).load() + fetched_osd.total = entry['kb'] * 1024 + fetched_osd.used = entry['kb_used'] * 1024 + fetched_osd.used_pcnt = str(entry['utilization']) + fetched_osd.save() + cluster_handle.shutdown() + def _sync_utilization(self): util_data = self._get_utilization_data() NS.ceph.objects.Utilization( @@ -773,9 +804,13 @@ def on_sync_object(self, data): } ) ) + osd_host = socket.gethostbyaddr( + raw_osd['public_addr'].split(':')[0] + )[0] NS.ceph.objects.Osd( id=raw_osd['osd'], uuid=raw_osd['uuid'], + hostname=osd_host, public_addr=raw_osd['public_addr'], cluster_addr=raw_osd['cluster_addr'], heartbeat_front_addr=raw_osd['heartbeat_front_addr'], From 80ae272115bec7afa959c8de5616e21f36e6f98b Mon Sep 17 00:00:00 2001 From: Shubhendu Date: Fri, 16 Jun 2017 10:49:21 +0530 Subject: [PATCH 03/13] Corrected path of Osd listing Signed-off-by: Shubhendu --- tendrl/ceph_integration/objects/definition/ceph.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tendrl/ceph_integration/objects/definition/ceph.yaml b/tendrl/ceph_integration/objects/definition/ceph.yaml index 55349dc91..ebd8c5c43 100644 --- a/tendrl/ceph_integration/objects/definition/ceph.yaml +++ b/tendrl/ceph_integration/objects/definition/ceph.yaml @@ -664,6 +664,6 @@ namespace.ceph: type: String help: "Osd" enabled: true - value: clusters/$TendrlContext.integration_id/Osds/$Osd.uuid + value: clusters/$TendrlContext.integration_id/Osds/$Osd.id list: clusters/$TendrlContext.integration_id/Osds tendrl_schema_version: 0.3 From b5dfe3a5a77db9f5f50f95df4242060042df3f20 Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Sat, 17 Jun 2017 00:47:14 +0530 Subject: [PATCH 04/13] Update __init__.py --- .../atoms/check_pool_available/__init__.py | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py b/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py index 1667dae37..e8b9d3ec2 100644 --- a/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py +++ b/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py @@ -1,3 +1,4 @@ +import etcd import gevent from tendrl.ceph_integration.objects.pool import Pool @@ -14,13 +15,20 @@ def __init__(self, *args, **kwargs): def run(self): retry_count = 0 while True: - pools = NS._int.client.read( - "clusters/%s/Pools" % NS.tendrl_context.integration_id - ) - for entry in pools.leaves: - pool = Pool(pool_id=entry.key.split("Pools/")[-1]).load() - if pool.pool_name == self.parameters['Pool.poolname']: - return True + pools = None + try: + pools = NS._int.client.read( + "clusters/%s/Pools" % NS.tendrl_context.integration_id + ) + except etcd.EtcdKeyNotFound: + pass + + if pools: + for entry in pools.leaves: + pool = Pool(pool_id=entry.key.split("Pools/")[-1]).load() + if pool.pool_name == self.parameters['Pool.poolname']: + return True + retry_count += 1 gevent.sleep(1) if retry_count == 600: From 011a4f593a7d002dcb7e518dd9987d362818016c Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Sat, 17 Jun 2017 00:51:08 +0530 Subject: [PATCH 05/13] Update __init__.py --- .../objects/pool/atoms/check_pool_available/__init__.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py b/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py index e8b9d3ec2..9d234bf07 100644 --- a/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py +++ b/tendrl/ceph_integration/objects/pool/atoms/check_pool_available/__init__.py @@ -25,9 +25,12 @@ def run(self): if pools: for entry in pools.leaves: - pool = Pool(pool_id=entry.key.split("Pools/")[-1]).load() - if pool.pool_name == self.parameters['Pool.poolname']: - return True + try: + pool = Pool(pool_id=entry.key.split("Pools/")[-1]).load() + if pool.pool_name == self.parameters['Pool.poolname']: + return True + except etcd.EtcdKeyNotFound: + continue retry_count += 1 gevent.sleep(1) From 4fb8f42e297766a5d2aa3c882897777a422fb92e Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Sun, 18 Jun 2017 05:12:43 +0530 Subject: [PATCH 06/13] provide config flag to turn on profiling tendrl-bug-id: Tendrl/commons#612 --- tendrl/ceph_integration/manager/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tendrl/ceph_integration/manager/__init__.py b/tendrl/ceph_integration/manager/__init__.py index c503af940..29a01fac7 100644 --- a/tendrl/ceph_integration/manager/__init__.py +++ b/tendrl/ceph_integration/manager/__init__.py @@ -80,6 +80,10 @@ def main(): NS.tendrl_context.save() NS.ceph.definitions.save() NS.ceph.config.save() + + if NS.config.data.get("with_internal_profiling", False): + from tendrl.commons import profiler + profiler.start() m = CephIntegrationManager() m.start() From b9e54e8af77fa6566021be6baaf1c171f396888a Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Sun, 18 Jun 2017 05:14:10 +0530 Subject: [PATCH 07/13] provide config flag to turn on profiling tendrl-bug-id: Tendrl/commons#612 --- etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample | 1 + 1 file changed, 1 insertion(+) diff --git a/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample b/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample index 288947ed0..267b241a9 100644 --- a/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample +++ b/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample @@ -4,3 +4,4 @@ tendrl_ansible_exec_file: $HOME/.tendrl/node-agent/ansible_exec log_cfg_path: /etc/tendrl/ceph-integration/ceph-integration_logging.yaml log_level: DEBUG logging_socket_path: /var/run/tendrl/message.sock +with_internal_profiling: False # This flag turns on internal profiling and saves stats at /var/lib/tendrl/profiling/node_agent/ on service stop (Note: profiling requires "pip install GreenletProfiler") From 8eb604e258ce45b9cd1646e81157cdf27ef0d12c Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Sun, 18 Jun 2017 15:37:52 +0530 Subject: [PATCH 08/13] Update ceph-integration.conf.yaml.sample --- .../ceph-integration/ceph-integration.conf.yaml.sample | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample b/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample index 267b241a9..90dfe737a 100644 --- a/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample +++ b/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample @@ -4,4 +4,7 @@ tendrl_ansible_exec_file: $HOME/.tendrl/node-agent/ansible_exec log_cfg_path: /etc/tendrl/ceph-integration/ceph-integration_logging.yaml log_level: DEBUG logging_socket_path: /var/run/tendrl/message.sock -with_internal_profiling: False # This flag turns on internal profiling and saves stats at /var/lib/tendrl/profiling/node_agent/ on service stop (Note: profiling requires "pip install GreenletProfiler") + +# This flag turns on internal profiling and saves stats at /var/lib/tendrl/profiling/ceph_integration/ on service stop (Note: profiling requires "pip install GreenletProfiler") + +with_internal_profiling: False From 29e75b5abde02d4bb383697859e033204791787b Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Sun, 18 Jun 2017 16:00:44 +0530 Subject: [PATCH 09/13] Multiple calls to ceph.objects.Definition.get_parsed_defs() tendrl-bug-id: Tendrl/ceph-integration#296 --- tendrl/ceph_integration/objects/definition/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tendrl/ceph_integration/objects/definition/__init__.py b/tendrl/ceph_integration/objects/definition/__init__.py index 8ea9aec05..d7c5230bd 100644 --- a/tendrl/ceph_integration/objects/definition/__init__.py +++ b/tendrl/ceph_integration/objects/definition/__init__.py @@ -17,6 +17,9 @@ def __init__(self, *args, **kwargs): self.value = 'clusters/{0}/_NS/definitions' def get_parsed_defs(self): + if self._parsed_defs: + return self._parsed_defs + self._parsed_defs = yaml.safe_load(self.data) return self._parsed_defs From 14119edc794f7fcbbe175aba4bbfd184ef127be8 Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Sun, 18 Jun 2017 19:31:43 +0530 Subject: [PATCH 10/13] Update ceph-integration.conf.yaml.sample --- etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample | 3 --- 1 file changed, 3 deletions(-) diff --git a/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample b/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample index 90dfe737a..06b134a74 100644 --- a/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample +++ b/etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample @@ -4,7 +4,4 @@ tendrl_ansible_exec_file: $HOME/.tendrl/node-agent/ansible_exec log_cfg_path: /etc/tendrl/ceph-integration/ceph-integration_logging.yaml log_level: DEBUG logging_socket_path: /var/run/tendrl/message.sock - -# This flag turns on internal profiling and saves stats at /var/lib/tendrl/profiling/ceph_integration/ on service stop (Note: profiling requires "pip install GreenletProfiler") - with_internal_profiling: False From c79b9695c27a4afa3dea6488e22382cee4692e15 Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Mon, 19 Jun 2017 15:32:11 +0530 Subject: [PATCH 11/13] Update version.py --- version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.py b/version.py index 8e3c933cd..98d186bed 100644 --- a/version.py +++ b/version.py @@ -1 +1 @@ -__version__ = '1.4.1' +__version__ = '1.4.2' From 455082bd6eb9adae3b360cf88c90f4de9635e2c9 Mon Sep 17 00:00:00 2001 From: Rohan Kanade Date: Mon, 19 Jun 2017 15:32:52 +0530 Subject: [PATCH 12/13] Update tendrl-ceph-integration.spec --- tendrl-ceph-integration.spec | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tendrl-ceph-integration.spec b/tendrl-ceph-integration.spec index b9be1d27c..547ee67ef 100644 --- a/tendrl-ceph-integration.spec +++ b/tendrl-ceph-integration.spec @@ -1,5 +1,5 @@ Name: tendrl-ceph-integration -Version: 1.4.1 +Version: 1.4.2 Release: 1%{?dist} BuildArch: noarch Summary: Tendrl bridge for Ceph Storage @@ -60,6 +60,9 @@ py.test -v tendrl/ceph_integration/tests || : %{_sysconfdir}/tendrl/ceph-integration/ceph-integration.conf.yaml %changelog +* Mon Jun 19 2017 Rohan Kanade - 1.4.2-1 +- Release tendrl-ceph-integration v1.4.2 + * Thu Jun 08 2017 Rohan Kanade - 1.4.1-1 - Release tendrl-ceph-integration v1.4.1 From 92577fa870b9cd0ccfd34144fb8047b22f172f43 Mon Sep 17 00:00:00 2001 From: Rishubh Jain Date: Tue, 20 Jun 2017 12:02:38 +0530 Subject: [PATCH 13/13] Update Readme.rst Some edits to make the Text render properly --- README.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 52661d919..ed8bbf19a 100644 --- a/README.rst +++ b/README.rst @@ -1,6 +1,6 @@ =============================== ceph-integration -================ +=============================== Tendrl integration for Ceph Storage @@ -20,13 +20,13 @@ Builds ------ .. image:: https://travis-ci.org/Tendrl/ceph_integration.svg?branch=master - :target: https://travis-ci.org/Tendrl/ceph_integration + :target: https://travis-ci.org/Tendrl/ceph-integration Code Coverage ------------- .. image:: https://coveralls.io/repos/github/Tendrl/ceph_integration/badge.svg?branch=master - :target: https://coveralls.io/github/Tendrl/ceph_integration?branch=master + :target: https://coveralls.io/github/Tendrl/ceph_integration?branch=master Developer/Install documentation -----------------------