|
8 | 8 | """
|
9 | 9 |
|
10 | 10 | import argparse
|
11 |
| -import boto3 |
12 | 11 | from collections import Counter
|
| 12 | +import json |
13 | 13 | import logging
|
14 | 14 | import os
|
15 | 15 | import re
|
16 | 16 | import sys
|
17 | 17 | import uuid
|
18 | 18 |
|
| 19 | +import boto3 |
19 | 20 | import openstack
|
20 | 21 |
|
21 | 22 |
|
22 | 23 | TESTCONTNAME = "scs-test-container"
|
| 24 | +EC2MARKER = "TmpMandSvcTest" |
23 | 25 |
|
24 | 26 | logger = logging.getLogger(__name__)
|
25 | 27 | mandatory_services = ["compute", "identity", "image", "network",
|
@@ -114,32 +116,48 @@ def s3_from_env(creds, fieldnm, env, prefix=""):
|
114 | 116 |
|
115 | 117 |
|
116 | 118 | def s3_from_ostack(creds, conn, endpoint):
|
117 |
| - "Set creds from openstack swift/keystone" |
| 119 | + """Set creds from openstack swift/keystone |
| 120 | + Returns credential ID *if* an ec2 credential was created, |
| 121 | + None otherwise.""" |
118 | 122 | rgx = re.compile(r"^(https*://[^/]*)/")
|
119 | 123 | match = rgx.match(endpoint)
|
120 | 124 | if match:
|
121 | 125 | creds["HOST"] = match.group(1)
|
122 |
| - # Use first ec2 cred if one exists |
| 126 | + # Use first ec2 cred that matches the project (if one exists) |
| 127 | + project_id = conn.identity.get_project_id() |
123 | 128 | ec2_creds = [cred for cred in conn.identity.credentials()
|
124 |
| - if cred.type == "ec2"] |
125 |
| - if len(ec2_creds): |
126 |
| - # FIXME: Assume cloud is not evil |
127 |
| - ec2_dict = eval(ec2_creds[0].blob, {"null": None}) |
128 |
| - creds["AK"] = ec2_dict["access"] |
129 |
| - creds["SK"] = ec2_dict["secret"] |
| 129 | + if cred.type == "ec2" and cred.project_id == project_id] |
| 130 | + found_ec2 = None |
| 131 | + for cred in ec2_creds: |
| 132 | + try: |
| 133 | + ec2_dict = json.loads(cred.blob) |
| 134 | + except Exception: |
| 135 | + logger.warning(f"unable to parse credential {cred!r}", exc_info=True) |
| 136 | + continue |
| 137 | + # Clean up old EC2 creds and jump over |
| 138 | + if ec2_dict.get("owner") == EC2MARKER: |
| 139 | + logger.debug(f"Removing leftover credential {ec2_dict['access']}") |
| 140 | + conn.identity.delete_credential(cred) |
| 141 | + continue |
| 142 | + found_ec2 = ec2_dict |
| 143 | + if found_ec2: |
| 144 | + creds["AK"] = found_ec2["access"] |
| 145 | + creds["SK"] = found_ec2["secret"] |
130 | 146 | return
|
131 | 147 | # Generate keyid and secret
|
132 | 148 | ak = uuid.uuid4().hex
|
133 | 149 | sk = uuid.uuid4().hex
|
134 |
| - blob = f'{{"access": "{ak}", "secret": "{sk}"}}' |
| 150 | + blob = f'{{"access": "{ak}", "secret": "{sk}", "owner": "{EC2MARKER}"}}' |
135 | 151 | try:
|
136 |
| - conn.identity.create_credential(type="ec2", blob=blob, |
137 |
| - user_id=conn.current_user_id, |
138 |
| - project_id=conn.current_project_id) |
139 |
| - creds["AK"] = ak |
140 |
| - creds["SK"] = sk |
141 |
| - except BaseException as exc: |
142 |
| - logger.warning(f"ec2 creds creation failed: {exc!s}") |
| 152 | + crd = conn.identity.create_credential(type="ec2", blob=blob, |
| 153 | + user_id=conn.current_user_id, |
| 154 | + project_id=conn.current_project_id) |
| 155 | + except BaseException: |
| 156 | + logger.warning("ec2 creds creation failed", exc_info=True) |
| 157 | + return |
| 158 | + creds["AK"] = ak |
| 159 | + creds["SK"] = sk |
| 160 | + return crd.id |
143 | 161 |
|
144 | 162 |
|
145 | 163 | def check_for_s3_and_swift(conn: openstack.connection.Connection, s3_credentials=None):
|
@@ -170,38 +188,45 @@ def check_for_s3_and_swift(conn: openstack.connection.Connection, s3_credentials
|
170 | 188 | )
|
171 | 189 | return 1
|
172 | 190 | # Get S3 endpoint (swift) and ec2 creds from OpenStack (keystone)
|
173 |
| - s3_from_ostack(s3_creds, conn, endpoint) |
174 |
| - # Overrides (var names are from libs3, in case you wonder) |
175 |
| - s3_from_env(s3_creds, "HOST", "S3_HOSTNAME", "https://") |
176 |
| - s3_from_env(s3_creds, "AK", "S3_ACCESS_KEY_ID") |
177 |
| - s3_from_env(s3_creds, "SK", "S3_SECRET_ACCESS_KEY") |
178 |
| - |
179 |
| - # This is to be used for local debugging purposes ONLY |
180 |
| - # logger.info(f"using credentials {s3_creds}") |
181 |
| - |
182 |
| - s3 = s3_conn(s3_creds, conn) |
183 |
| - s3_buckets = list_s3_buckets(s3) or create_bucket(s3, TESTCONTNAME) |
184 |
| - if not s3_buckets: |
185 |
| - raise RuntimeError("failed to create S3 bucket") |
186 |
| - |
187 |
| - # If we got till here, s3 is working, now swift |
188 |
| - swift_containers = list_containers(conn) |
189 |
| - # if not swift_containers: |
190 |
| - # swift_containers = create_container(conn, TESTCONTNAME) |
191 |
| - result = 0 |
192 |
| - if Counter(s3_buckets) != Counter(swift_containers): |
193 |
| - logger.error("S3 buckets and Swift Containers differ:\n" |
194 |
| - f"S3: {sorted(s3_buckets)}\nSW: {sorted(swift_containers)}") |
195 |
| - result = 1 |
196 |
| - else: |
197 |
| - logger.info("SUCCESS: S3 and Swift exist and agree") |
198 |
| - # Clean up |
199 |
| - # FIXME: Cleanup created EC2 credential |
200 |
| - # if swift_containers == [TESTCONTNAME]: |
201 |
| - # del_container(conn, TESTCONTNAME) |
202 |
| - # Cleanup created S3 bucket |
203 |
| - if s3_buckets == [TESTCONTNAME]: |
204 |
| - del_bucket(s3, TESTCONTNAME) |
| 191 | + try: |
| 192 | + ec2_cred = s3_from_ostack(s3_creds, conn, endpoint) |
| 193 | + # Overrides (var names are from libs3, in case you wonder) |
| 194 | + s3_from_env(s3_creds, "HOST", "S3_HOSTNAME", "https://") |
| 195 | + s3_from_env(s3_creds, "AK", "S3_ACCESS_KEY_ID") |
| 196 | + s3_from_env(s3_creds, "SK", "S3_SECRET_ACCESS_KEY") |
| 197 | + |
| 198 | + # This is to be used for local debugging purposes ONLY |
| 199 | + # logger.info(f"using credentials {s3_creds}") |
| 200 | + |
| 201 | + s3 = s3_conn(s3_creds, conn) |
| 202 | + s3_buckets = list_s3_buckets(s3) or create_bucket(s3, TESTCONTNAME) |
| 203 | + if not s3_buckets: |
| 204 | + raise RuntimeError("failed to create S3 bucket") |
| 205 | + |
| 206 | + # If we got till here, s3 is working, now swift |
| 207 | + swift_containers = list_containers(conn) |
| 208 | + # if not swift_containers: |
| 209 | + # swift_containers = create_container(conn, TESTCONTNAME) |
| 210 | + result = 0 |
| 211 | + # Compare number of buckets/containers |
| 212 | + # FIXME: Could compare list of sorted names |
| 213 | + if Counter(s3_buckets) != Counter(swift_containers): |
| 214 | + logger.error("S3 buckets and Swift Containers differ:\n" |
| 215 | + f"S3: {sorted(s3_buckets)}\nSW: {sorted(swift_containers)}") |
| 216 | + result = 1 |
| 217 | + else: |
| 218 | + logger.info("SUCCESS: S3 and Swift exist and agree") |
| 219 | + # No need to clean up swift container, as we did not create one |
| 220 | + # (If swift and S3 agree, there will be a S3 bucket that we clean up with S3.) |
| 221 | + # if swift_containers == [TESTCONTNAME]: |
| 222 | + # del_container(conn, TESTCONTNAME) |
| 223 | + # Cleanup created S3 bucket |
| 224 | + if s3_buckets == [TESTCONTNAME]: |
| 225 | + del_bucket(s3, TESTCONTNAME) |
| 226 | + # Clean up ec2 cred IF we created one |
| 227 | + finally: |
| 228 | + if ec2_cred: |
| 229 | + conn.identity.delete_credential(ec2_cred) |
205 | 230 | return result
|
206 | 231 |
|
207 | 232 |
|
|
0 commit comments