@@ -115,8 +115,8 @@ def _location():
115
115
-------
116
116
valid http location
117
117
"""
118
- _emulator_location = os .getenv ("STORAGE_EMULATOR_HOST" , None )
119
- if _emulator_location :
118
+ _emulator_location = os .getenv ("STORAGE_EMULATOR_HOST" , "" )
119
+ if _emulator_location not in { "default" , "" , None } :
120
120
if not any (
121
121
_emulator_location .startswith (scheme ) for scheme in ("http://" , "https://" )
122
122
):
@@ -222,6 +222,10 @@ class GCSFileSystem(asyn.AsyncFileSystem):
222
222
In the default case the cache is never expired. This may be controlled via the ``cache_timeout``
223
223
GCSFileSystem parameter or via explicit calls to ``GCSFileSystem.invalidate_cache``.
224
224
225
+ NOTE on "exclusive" mode: mode=="create"" (in pipe and put) and open(mode="xb") are supported on an
226
+ experimental basis. The test harness does not currently support this, so use at your
227
+ own risk.
228
+
225
229
Parameters
226
230
----------
227
231
project : string
@@ -1332,13 +1336,14 @@ async def _pipe_file(
1332
1336
content_type = "application/octet-stream" ,
1333
1337
fixed_key_metadata = None ,
1334
1338
chunksize = 50 * 2 ** 20 ,
1339
+ mode = "overwrite" ,
1335
1340
):
1336
1341
# enforce blocksize should be a multiple of 2**18
1337
1342
consistency = consistency or self .consistency
1338
1343
bucket , key , generation = self .split_path (path )
1339
1344
size = len (data )
1340
1345
out = None
1341
- if size < 5 * 2 ** 20 :
1346
+ if size < chunksize :
1342
1347
location = await simple_upload (
1343
1348
self ,
1344
1349
bucket ,
@@ -1348,6 +1353,7 @@ async def _pipe_file(
1348
1353
consistency ,
1349
1354
content_type ,
1350
1355
fixed_key_metadata = fixed_key_metadata ,
1356
+ mode = mode ,
1351
1357
)
1352
1358
else :
1353
1359
location = await initiate_upload (
@@ -1357,12 +1363,20 @@ async def _pipe_file(
1357
1363
content_type ,
1358
1364
metadata ,
1359
1365
fixed_key_metadata = fixed_key_metadata ,
1366
+ mode = mode ,
1360
1367
)
1361
- for offset in range (0 , len (data ), chunksize ):
1362
- bit = data [offset : offset + chunksize ]
1363
- out = await upload_chunk (
1364
- self , location , bit , offset , size , content_type
1368
+ try :
1369
+ for offset in range (0 , len (data ), chunksize ):
1370
+ bit = data [offset : offset + chunksize ]
1371
+ out = await upload_chunk (
1372
+ self , location , bit , offset , size , content_type
1373
+ )
1374
+ except Exception :
1375
+ await self ._call (
1376
+ "DELETE" ,
1377
+ location .replace ("&ifGenerationMatch=0" , "" ),
1365
1378
)
1379
+ raise
1366
1380
1367
1381
checker = get_consistency_checker (consistency )
1368
1382
checker .update (data )
@@ -1381,6 +1395,7 @@ async def _put_file(
1381
1395
chunksize = 50 * 2 ** 20 ,
1382
1396
callback = None ,
1383
1397
fixed_key_metadata = None ,
1398
+ mode = "overwrite" ,
1384
1399
** kwargs ,
1385
1400
):
1386
1401
# enforce blocksize should be a multiple of 2**18
@@ -1407,6 +1422,7 @@ async def _put_file(
1407
1422
metadatain = metadata ,
1408
1423
content_type = content_type ,
1409
1424
fixed_key_metadata = fixed_key_metadata ,
1425
+ mode = mode ,
1410
1426
)
1411
1427
callback .absolute_update (size )
1412
1428
@@ -1418,18 +1434,26 @@ async def _put_file(
1418
1434
content_type ,
1419
1435
metadata = metadata ,
1420
1436
fixed_key_metadata = fixed_key_metadata ,
1437
+ mode = mode ,
1421
1438
)
1422
1439
offset = 0
1423
- while True :
1424
- bit = f0 .read (chunksize )
1425
- if not bit :
1426
- break
1427
- out = await upload_chunk (
1428
- self , location , bit , offset , size , content_type
1440
+ try :
1441
+ while True :
1442
+ bit = f0 .read (chunksize )
1443
+ if not bit :
1444
+ break
1445
+ out = await upload_chunk (
1446
+ self , location , bit , offset , size , content_type
1447
+ )
1448
+ offset += len (bit )
1449
+ callback .absolute_update (offset )
1450
+ checker .update (bit )
1451
+ except Exception :
1452
+ await self ._call (
1453
+ "DELETE" ,
1454
+ self .location .replace ("&ifGenerationMatch=0" , "" ),
1429
1455
)
1430
- offset += len (bit )
1431
- callback .absolute_update (offset )
1432
- checker .update (bit )
1456
+ raise
1433
1457
1434
1458
checker .validate_json_response (out )
1435
1459
@@ -1780,7 +1804,7 @@ def __init__(
1780
1804
self .fixed_key_metadata = _convert_fixed_key_metadata (det , from_google = True )
1781
1805
self .fixed_key_metadata .update (fixed_key_metadata or {})
1782
1806
self .timeout = timeout
1783
- if mode == "wb" :
1807
+ if mode in { "wb" , "xb" } :
1784
1808
if self .blocksize < GCS_MIN_BLOCK_SIZE :
1785
1809
warnings .warn ("Setting block size to minimum value, 2**18" )
1786
1810
self .blocksize = GCS_MIN_BLOCK_SIZE
@@ -1886,6 +1910,7 @@ def _initiate_upload(self):
1886
1910
self .content_type ,
1887
1911
self .metadata ,
1888
1912
self .fixed_key_metadata ,
1913
+ mode = "create" if "x" in self .mode else "overwrite" ,
1889
1914
timeout = self .timeout ,
1890
1915
)
1891
1916
@@ -1898,7 +1923,7 @@ def discard(self):
1898
1923
return
1899
1924
self .gcsfs .call (
1900
1925
"DELETE" ,
1901
- self .location ,
1926
+ self .location . replace ( "&ifGenerationMatch=0" , "" ) ,
1902
1927
)
1903
1928
self .location = None
1904
1929
self .closed = True
@@ -1918,6 +1943,7 @@ def _simple_upload(self):
1918
1943
self .consistency ,
1919
1944
self .content_type ,
1920
1945
self .fixed_key_metadata ,
1946
+ mode = "create" if "x" in self .mode else "overwrite" ,
1921
1947
timeout = self .timeout ,
1922
1948
)
1923
1949
@@ -1989,17 +2015,20 @@ async def initiate_upload(
1989
2015
content_type = "application/octet-stream" ,
1990
2016
metadata = None ,
1991
2017
fixed_key_metadata = None ,
2018
+ mode = "overwrie" ,
1992
2019
):
1993
2020
j = {"name" : key }
1994
2021
if metadata :
1995
2022
j ["metadata" ] = metadata
2023
+ kw = {"ifGenerationMatch" : "0" } if mode == "create" else {}
1996
2024
j .update (_convert_fixed_key_metadata (fixed_key_metadata ))
1997
2025
headers , _ = await fs ._call (
1998
2026
method = "POST" ,
1999
- path = f"{ fs ._location } /upload/storage/v1/b/{ quote (bucket )} /o" ,
2027
+ path = f"{ fs ._location } /upload/storage/v1/b/{ quote (bucket )} /o?name= { quote ( key ) } " ,
2000
2028
uploadType = "resumable" ,
2001
2029
json = j ,
2002
2030
headers = {"X-Upload-Content-Type" : content_type },
2031
+ ** kw ,
2003
2032
)
2004
2033
loc = headers ["Location" ]
2005
2034
out = loc [0 ] if isinstance (loc , list ) else loc # <- for CVR responses
@@ -2017,12 +2046,14 @@ async def simple_upload(
2017
2046
consistency = None ,
2018
2047
content_type = "application/octet-stream" ,
2019
2048
fixed_key_metadata = None ,
2049
+ mode = "overwrite" ,
2020
2050
):
2021
2051
checker = get_consistency_checker (consistency )
2022
2052
path = f"{ fs ._location } /upload/storage/v1/b/{ quote (bucket )} /o"
2023
2053
metadata = {"name" : key }
2024
2054
if metadatain is not None :
2025
2055
metadata ["metadata" ] = metadatain
2056
+ kw = {"ifGenerationMatch" : "0" } if mode == "create" else {}
2026
2057
metadata .update (_convert_fixed_key_metadata (fixed_key_metadata ))
2027
2058
metadata = json .dumps (metadata )
2028
2059
template = (
@@ -2039,6 +2070,7 @@ async def simple_upload(
2039
2070
headers = {"Content-Type" : 'multipart/related; boundary="==0=="' },
2040
2071
data = UnclosableBytesIO (data ),
2041
2072
json_out = True ,
2073
+ ** kw ,
2042
2074
)
2043
2075
checker .update (datain )
2044
2076
checker .validate_json_response (j )
0 commit comments