Skip to content

Commit 1cb19ac

Browse files
Merge branch 'develop' into librbd
2 parents ff9d753 + 071b28f commit 1cb19ac

File tree

12 files changed

+274
-37
lines changed

12 files changed

+274
-37
lines changed

README.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
===============================
22
ceph-integration
3-
================
3+
===============================
44

55
Tendrl integration for Ceph Storage
66

@@ -20,13 +20,13 @@ Builds
2020
------
2121

2222
.. image:: https://travis-ci.org/Tendrl/ceph_integration.svg?branch=master
23-
:target: https://travis-ci.org/Tendrl/ceph_integration
23+
:target: https://travis-ci.org/Tendrl/ceph-integration
2424

2525
Code Coverage
2626
-------------
2727

2828
.. image:: https://coveralls.io/repos/github/Tendrl/ceph_integration/badge.svg?branch=master
29-
:target: https://coveralls.io/github/Tendrl/ceph_integration?branch=master
29+
:target: https://coveralls.io/github/Tendrl/ceph_integration?branch=master
3030

3131
Developer/Install documentation
3232
-----------------------

etc/tendrl/ceph-integration/ceph-integration.conf.yaml.sample

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,4 @@ tendrl_ansible_exec_file: $HOME/.tendrl/node-agent/ansible_exec
44
log_cfg_path: /etc/tendrl/ceph-integration/ceph-integration_logging.yaml
55
log_level: DEBUG
66
logging_socket_path: /var/run/tendrl/message.sock
7+
with_internal_profiling: False

tendrl-ceph-integration.spec

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
Name: tendrl-ceph-integration
2-
Version: 1.4.1
2+
Version: 1.4.2
33
Release: 1%{?dist}
44
BuildArch: noarch
55
Summary: Tendrl bridge for Ceph Storage
@@ -60,6 +60,9 @@ py.test -v tendrl/ceph_integration/tests || :
6060
%{_sysconfdir}/tendrl/ceph-integration/ceph-integration.conf.yaml
6161

6262
%changelog
63+
* Mon Jun 19 2017 Rohan Kanade <[email protected]> - 1.4.2-1
64+
- Release tendrl-ceph-integration v1.4.2
65+
6366
* Thu Jun 08 2017 Rohan Kanade <[email protected]> - 1.4.1-1
6467
- Release tendrl-ceph-integration v1.4.1
6568

tendrl/ceph_integration/manager/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,10 @@ def main():
8080
NS.tendrl_context.save()
8181
NS.ceph.definitions.save()
8282
NS.ceph.config.save()
83+
84+
if NS.config.data.get("with_internal_profiling", False):
85+
from tendrl.commons import profiler
86+
profiler.start()
8387

8488
m = CephIntegrationManager()
8589
m.start()

tendrl/ceph_integration/objects/definition/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ def __init__(self, *args, **kwargs):
1717
self.value = 'clusters/{0}/_NS/definitions'
1818

1919
def get_parsed_defs(self):
20+
if self._parsed_defs:
21+
return self._parsed_defs
22+
2023
self._parsed_defs = yaml.safe_load(self.data)
2124
return self._parsed_defs
2225

tendrl/ceph_integration/objects/definition/ceph.yaml

Lines changed: 53 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ namespace.ceph:
2121
- Pool.quota_max_bytes
2222
pre_run:
2323
- ceph.objects.Pool.atoms.NamedPoolNotExists
24+
post_run:
25+
- ceph.objects.Pool.atoms.CheckPoolAvailable
2426
run: ceph.flows.CreatePool
2527
type: Create
2628
uuid: faeab231-69e9-4c9d-b5ef-a67ed057f98b
@@ -41,6 +43,8 @@ namespace.ceph:
4143
- ECProfile.directory
4244
- ECProfile.ruleset_failure_domain
4345
run: ceph.flows.CreateECProfile
46+
post_run:
47+
- ceph.objects.ECProfile.atoms.CheckECProfileAvailable
4448
type: Create
4549
uuid: faeab231-69e9-4c9d-b5ef-a67ed057f98d
4650
objects:
@@ -102,6 +106,16 @@ namespace.ceph:
102106
run: ceph.objects.ECProfile.atoms.Delete
103107
type: Delete
104108
uuid: 7a2df258-9b24-4fd3-a66f-ee346e2e3740
109+
CheckECProfileAvailable:
110+
enabled: true
111+
help: Check if ECProfile available
112+
inputs:
113+
mandatory:
114+
- ECProfile.name
115+
name: ECProfile available
116+
run: ceph.objects.ECProfile.atoms.CheckECProfileAvailable
117+
type: Check
118+
uuid: 7a2df258-9b24-4fd3-a66f-ee346e2e3759
105119
flows:
106120
DeleteECProfile:
107121
tags:
@@ -212,6 +226,17 @@ namespace.ceph:
212226
run: ceph.objects.Rbd.atoms.RbdExists
213227
type: Get
214228
uuid: 7a2df258-9b24-4fd3-a66f-ee346e2e3922
229+
CheckRbdAvailable:
230+
enabled: true
231+
help: Check if rbd is available
232+
inputs:
233+
mandatory:
234+
- Rbd.pool_id
235+
- Rbd.name
236+
name: Rbd available
237+
run: ceph.objects.Rbd.atoms.CheckRbdAvailable
238+
type: Check
239+
uuid: 7a2df258-9b24-4fd3-a66f-ee346e2e3929
215240
flows:
216241
CreateRbd:
217242
tags:
@@ -239,6 +264,8 @@ namespace.ceph:
239264
- Rbd.pool_quota_max_bytes
240265
pre_run:
241266
- ceph.objects.Rbd.atoms.RbdNotExists
267+
post_run:
268+
- ceph.objects.Rbd.atoms.CheckRbdAvailable
242269
run: ceph.objects.Rbd.flows.CreateRbd
243270
type: Create
244271
uuid: 9bc41d8f-a0cf-420a-b2fe-18761e07f3d2
@@ -442,6 +469,16 @@ namespace.ceph:
442469
run: ceph.objects.Pool.atoms.PoolNotExists
443470
type: Get
444471
uuid: 9a2df258-9b24-4fd3-a66f-ee346e2e3832
472+
CheckPoolAvailable:
473+
enabled: true
474+
help: check if pool available
475+
inputs:
476+
mandatory:
477+
- Pool.poolname
478+
name: pool is available
479+
run: ceph.objects.Pool.atoms.CheckPoolAvailable
480+
type: Check
481+
uuid: 9a2df258-9b24-4fd3-a66f-ee346e2e3839
445482
flows:
446483
DeletePool:
447484
tags:
@@ -565,12 +602,18 @@ namespace.ceph:
565602
uuid:
566603
help: UUID
567604
type: String
605+
hostname:
606+
help: Name of the host
607+
type: String
568608
public_addr:
569609
help: Public Address
570610
type: String
571611
cluster_addr:
572612
help: Cluster Address
573613
type: String
614+
device_path:
615+
help: device path
616+
type: String
574617
heartbeat_front_addr:
575618
help: Heartbeat front address
576619
type: String
@@ -610,8 +653,17 @@ namespace.ceph:
610653
last_clean_end:
611654
help: Last clean end
612655
type: int
656+
total:
657+
help: total size of osd
658+
type: int
659+
used:
660+
help: used size
661+
type: int
662+
used_pcnt:
663+
help: used percent
664+
type: String
613665
help: "Osd"
614666
enabled: true
615-
value: clusters/$TendrlContext.integration_id/Osds/$Osd.uuid
667+
value: clusters/$TendrlContext.integration_id/Osds/$Osd.id
616668
list: clusters/$TendrlContext.integration_id/Osds
617669
tendrl_schema_version: 0.3
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
import etcd
2+
import gevent
3+
4+
from tendrl.commons.event import Event
5+
from tendrl.commons.message import Message
6+
from tendrl.commons import objects
7+
from tendrl.commons.objects import AtomExecutionFailedError
8+
9+
10+
class CheckECProfileAvailable(objects.BaseAtom):
11+
def __init__(self, *args, **kwargs):
12+
super(CheckECProfileAvailable, self).__init__(*args, **kwargs)
13+
14+
def run(self):
15+
retry_count = 0
16+
while True:
17+
try:
18+
NS.ceph.objects.ECProfile(
19+
name=self.parameters['ECProfile.name']
20+
).load()
21+
return True
22+
except etcd.EtcdKeyNotFound:
23+
retry_count += 1
24+
gevent.sleep(1)
25+
if retry_count == 600:
26+
Event(
27+
Message(
28+
priority="error",
29+
publisher=NS.publisher_id,
30+
payload={
31+
"message": "ECProfile %s not reflected in tendrl yet. Timing out" %
32+
self.parameters['ECProfile.name']
33+
},
34+
job_id=self.parameters['job_id'],
35+
flow_id=self.parameters['flow_id'],
36+
cluster_id=NS.tendrl_context.integration_id,
37+
)
38+
)
39+
raise AtomExecutionFailedError(
40+
"ECProfile %s not reflected in tendrl yet. Timing out" %
41+
self.parameters['ECProfile.name']
42+
)
Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,39 +1,39 @@
1-
from tendrl.commons import objects
1+
from tendrl.integrations.ceph.objects import osd
22

33

4-
class Osd(objects.BaseObject):
4+
class Osd(osd.Osd):
55
def __init__(self, id=None,
6-
uuid=None, public_addr=None, cluster_addr=None,
7-
heartbeat_front_addr=None, heartbeat_back_addr=None,
6+
uuid=None, hostname=None, public_addr=None, cluster_addr=None,
7+
device_path=None, heartbeat_front_addr=None, heartbeat_back_addr=None,
88
down_at=None, up_from=None, lost_at=None,
99
osd_up=None, osd_in=None, up_thru=None,
1010
weight=None, primary_affinity=None,
1111
state=None, last_clean_begin=None,
12-
last_clean_end=None, *args, **kwargs):
13-
super(Osd, self).__init__(*args, **kwargs)
14-
15-
self.id = id
16-
self.uuid = uuid
17-
self.public_addr = public_addr
18-
self.cluster_addr = cluster_addr
19-
self.heartbeat_front_addr = heartbeat_front_addr
20-
self.heartbeat_back_addr = heartbeat_back_addr
21-
self.down_at = down_at
22-
self.up_from = up_from
23-
self.lost_at = lost_at
24-
self.osd_up = osd_up
25-
self.osd_in = osd_in
26-
self.up_thru = up_thru
27-
self.weight = weight
28-
self.primary_affinity = primary_affinity
29-
self.state = state
30-
self.last_clean_begin = last_clean_begin
31-
self.last_clean_end = last_clean_end
32-
self.value = 'clusters/{0}/Osds/{1}'
33-
34-
def render(self):
35-
self.value = self.value.format(
36-
NS.tendrl_context.integration_id,
37-
self.uuid
12+
last_clean_end=None, total=None, used=None, used_pcnt=None,
13+
*args, **kwargs):
14+
super(Osd, self).__init__(
15+
id=id,
16+
uuid=uuid,
17+
hostname=hostname,
18+
public_addr=public_addr,
19+
cluster_addr=cluster_addr,
20+
device_path=device_path,
21+
heartbeat_front_addr=heartbeat_front_addr,
22+
heartbeat_back_addr=heartbeat_back_addr,
23+
down_at=down_at,
24+
up_from=up_from,
25+
lost_at=lost_at,
26+
osd_up=osd_up,
27+
osd_in=osd_in,
28+
up_thru=up_thru,
29+
weight=weight,
30+
primary_affinity=primary_affinity,
31+
state=state,
32+
last_clean_begin=last_clean_begin,
33+
last_clean_end=last_clean_end,
34+
total=total,
35+
used=used,
36+
used_pcnt=used_pcnt,
37+
*args,
38+
**kwargs
3839
)
39-
return super(Osd, self).render()
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import etcd
2+
import gevent
3+
4+
from tendrl.ceph_integration.objects.pool import Pool
5+
from tendrl.commons.event import Event
6+
from tendrl.commons.message import Message
7+
from tendrl.commons import objects
8+
from tendrl.commons.objects import AtomExecutionFailedError
9+
10+
11+
class CheckPoolAvailable(objects.BaseAtom):
12+
def __init__(self, *args, **kwargs):
13+
super(CheckPoolAvailable, self).__init__(*args, **kwargs)
14+
15+
def run(self):
16+
retry_count = 0
17+
while True:
18+
pools = None
19+
try:
20+
pools = NS._int.client.read(
21+
"clusters/%s/Pools" % NS.tendrl_context.integration_id
22+
)
23+
except etcd.EtcdKeyNotFound:
24+
pass
25+
26+
if pools:
27+
for entry in pools.leaves:
28+
try:
29+
pool = Pool(pool_id=entry.key.split("Pools/")[-1]).load()
30+
if pool.pool_name == self.parameters['Pool.poolname']:
31+
return True
32+
except etcd.EtcdKeyNotFound:
33+
continue
34+
35+
retry_count += 1
36+
gevent.sleep(1)
37+
if retry_count == 600:
38+
Event(
39+
Message(
40+
priority="error",
41+
publisher=NS.publisher_id,
42+
payload={
43+
"message": "Pool %s not reflected in tendrl yet. Timing out" %
44+
self.parameters['Pool.pool_name']
45+
},
46+
job_id=self.parameters['job_id'],
47+
flow_id=self.parameters['flow_id'],
48+
cluster_id=NS.tendrl_context.integration_id,
49+
)
50+
)
51+
raise AtomExecutionFailedError(
52+
"Pool %s not reflected in tendrl yet. Timing out" %
53+
self.parameters['Pool.pool_name']
54+
)
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import etcd
2+
import gevent
3+
4+
from tendrl.commons.event import Event
5+
from tendrl.commons.message import Message
6+
from tendrl.commons import objects
7+
from tendrl.commons.objects import AtomExecutionFailedError
8+
9+
10+
class CheckRbdAvailable(objects.BaseAtom):
11+
def __init__(self, *args, **kwargs):
12+
super(CheckRbdAvailable, self).__init__(*args, **kwargs)
13+
14+
def run(self):
15+
retry_count = 0
16+
while True:
17+
try:
18+
NS.ceph.objects.Rbd(
19+
pool_id=self.parameters['Rbd.pool_id'],
20+
name=self.parameters['Rbd.name']
21+
).load()
22+
return True
23+
except etcd.EtcdKeyNotFound:
24+
retry_count += 1
25+
gevent.sleep(1)
26+
if retry_count == 600:
27+
Event(
28+
Message(
29+
priority="error",
30+
publisher=NS.publisher_id,
31+
payload={
32+
"message": "Rbd %s not reflected in tendrl yet. Timing out" %
33+
self.parameters['Rbd.name']
34+
},
35+
job_id=self.parameters['job_id'],
36+
flow_id=self.parameters['flow_id'],
37+
cluster_id=NS.tendrl_context.integration_id,
38+
)
39+
)
40+
raise AtomExecutionFailedError(
41+
"Rbd %s not reflected in tendrl yet. Timing out" %
42+
self.parameters['Rbd.name']
43+
)

0 commit comments

Comments
 (0)