diff --git a/cmd/copy.go b/cmd/copy.go index 13af5f81f..eb24dc9d7 100644 --- a/cmd/copy.go +++ b/cmd/copy.go @@ -234,7 +234,7 @@ func (raw rawCopyCmdArgs) stripTrailingWildcardOnRemoteSource(location common.Lo gURLParts := common.NewGenericResourceURLParts(*resourceURL, location) if err != nil { - err = fmt.Errorf("failed to parse url %s; %s", result, err) + err = fmt.Errorf("failed to parse url %s; %w", result, err) return } diff --git a/cmd/copyEnumeratorInit.go b/cmd/copyEnumeratorInit.go index 72cfb392e..716577657 100755 --- a/cmd/copyEnumeratorInit.go +++ b/cmd/copyEnumeratorInit.go @@ -168,7 +168,7 @@ func (cca *CookedCopyCmdArgs) initEnumerator(jobPartOrder common.CopyJobPartOrde containers, err := acctTraverser.listContainers() if err != nil { - return nil, fmt.Errorf("failed to list containers: %s", err) + return nil, fmt.Errorf("failed to list containers: %w", err) } // Resolve all container names up front. diff --git a/cmd/jobsResume.go b/cmd/jobsResume.go index 5c7629b96..5dd443b98 100644 --- a/cmd/jobsResume.go +++ b/cmd/jobsResume.go @@ -307,7 +307,7 @@ func (rca resumeCmdArgs) getSourceAndDestinationServiceClients( jobID, err := common.ParseJobID(rca.jobID) if err != nil { // Error for invalid JobId format - return nil, nil, fmt.Errorf("error parsing the jobId %s. Failed with error %s", rca.jobID, err.Error()) + return nil, nil, fmt.Errorf("error parsing the jobId %s. Failed with error %w", rca.jobID, err) } // But we don't want to supply a reauth token if we're not using OAuth. That could cause problems if say, a SAS is invalid. @@ -358,7 +358,7 @@ func (rca resumeCmdArgs) process() error { jobID, err := common.ParseJobID(rca.jobID) if err != nil { // If parsing gives an error, hence it is not a valid JobId format - return fmt.Errorf("error parsing the jobId %s. Failed with error %s", rca.jobID, err.Error()) + return fmt.Errorf("error parsing the jobId %s. Failed with error %w", rca.jobID, err) } // if no logging, set this empty so that we don't display the log location diff --git a/cmd/sync.go b/cmd/sync.go index a910f76c2..a95e3e94c 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -71,6 +71,7 @@ type rawSyncCmdArgs struct { backupMode bool putMd5 bool md5ValidationOption string + includeRoot bool // this flag indicates the user agreement with respect to deleting the extra files at the destination // which do not exists at source. With this flag turned on/off, users will not be asked for permission. // otherwise the user is prompted to make a decision @@ -385,6 +386,7 @@ func (raw *rawSyncCmdArgs) cook() (cookedSyncCmdArgs, error) { cooked.deleteDestinationFileIfNecessary = raw.deleteDestinationFileIfNecessary cooked.includeDirectoryStubs = raw.includeDirectoryStubs + cooked.includeRoot = raw.includeRoot return cooked, nil } @@ -436,6 +438,7 @@ type cookedSyncCmdArgs struct { forceIfReadOnly bool backupMode bool includeDirectoryStubs bool + includeRoot bool // commandString hold the user given command which is logged to the Job log file commandString string @@ -870,6 +873,7 @@ func init() { syncCmd.PersistentFlags().StringVar(&raw.trailingDot, "trailing-dot", "", "'Enable' by default to treat file share related operations in a safe manner. Available options: "+strings.Join(common.ValidTrailingDotOptions(), ", ")+". "+ "Choose 'Disable' to go back to legacy (potentially unsafe) treatment of trailing dot files where the file service will trim any trailing dots in paths. This can result in potential data corruption if the transfer contains two paths that differ only by a trailing dot (ex: mypath and mypath.). If this flag is set to 'Disable' and AzCopy encounters a trailing dot file, it will warn customers in the scanning log but will not attempt to abort the operation."+ "If the destination does not support trailing dot files (Windows or Blob Storage), AzCopy will fail if the trailing dot file is the root of the transfer and skip any trailing dot paths encountered during enumeration.") + syncCmd.PersistentFlags().BoolVar(&raw.includeRoot, "include-root", false, "Disabled by default. Enable to include the root directory's properties when persisting properties such as SMB or HNS ACLs") syncCmd.PersistentFlags().StringVar(&raw.compareHash, "compare-hash", "None", "Inform sync to rely on hashes as an alternative to LMT. Missing hashes at a remote source will throw an error. (None, MD5) Default: None") syncCmd.PersistentFlags().StringVar(&common.LocalHashDir, "hash-meta-dir", "", "When using `--local-hash-storage-mode=HiddenFiles` you can specify an alternate directory to store hash metadata files in (as opposed to next to the related files in the source)") diff --git a/cmd/syncEnumerator.go b/cmd/syncEnumerator.go index a02bf962e..c758504cb 100644 --- a/cmd/syncEnumerator.go +++ b/cmd/syncEnumerator.go @@ -24,6 +24,10 @@ import ( "context" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" "runtime" "strings" "sync/atomic" @@ -93,11 +97,51 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s // verify that the traversers are targeting the same type of resources sourceIsDir, _ := sourceTraverser.IsDirectory(true) - destIsDir, _ := destinationTraverser.IsDirectory(true) - if sourceIsDir != destIsDir { - return nil, errors.New("trying to sync between different resource types (either file <-> directory or directory <-> file) which is not allowed." + - "sync must happen between source and destination of the same type, e.g. either file <-> file or directory <-> directory." + - "To make sure target is handled as a directory, add a trailing '/' to the target.") + destIsDir, err := destinationTraverser.IsDirectory(true) + + var resourceMismatchError = errors.New("trying to sync between different resource types (either file <-> directory or directory <-> file) which is not allowed." + + "sync must happen between source and destination of the same type, e.g. either file <-> file or directory <-> directory." + + "To make sure target is handled as a directory, add a trailing '/' to the target.") + + if cca.fromTo.To() == common.ELocation.Blob() || cca.fromTo.To() == common.ELocation.BlobFS() { + + /* + This is an "opinionated" choice. Blob has no formal understanding of directories. As such, we don't care about if it's a directory. + + If they sync a lone blob, they sync a lone blob. + If it lands on a directory stub, FNS is OK with this, but HNS isn't. It'll fail in that case. This is still semantically valid in FNS. + If they sync a prefix of blobs, they sync a prefix of blobs. This will always succeed, and won't break any semantics about FNS. + + So my (Adele's) opinion moving forward is: + - Hierarchies don't exist in flat namespaces. + - Instead, there are objects and prefixes. + - Stubs exist to clarify prefixes. + - Stubs do not exist to enforce naming conventions. + - We are a tool, tools can be misused. It is up to the customer to validate everything they intend to do. + */ + + if bloberror.HasCode(err, bloberror.ContainerNotFound) { // We can resolve a missing container. Let's create it. + bt := destinationTraverser.(*blobTraverser) + sc := bt.serviceClient // it being a blob traverser is a relatively safe assumption, because + bUrlParts, _ := blob.ParseURL(bt.rawURL) // it should totally have succeeded by now anyway + _, err = sc.NewContainerClient(bUrlParts.ContainerName).Create(ctx, nil) // If it doesn't work out, this will surely bubble up later anyway. It won't be long. + if err != nil { + glcm.Warn(fmt.Sprintf("Failed to create the missing destination container: %v", err)) + } + // At this point, we'll let the destination be written to with the original resource type. + } + } else if err != nil && fileerror.HasCode(err, fileerror.ShareNotFound) { // We can resolve a missing share. Let's create it. + ft := destinationTraverser.(*fileTraverser) + sc := ft.serviceClient + fUrlParts, _ := file.ParseURL(ft.rawURL) // this should have succeeded by now. + _, err = sc.NewShareClient(fUrlParts.ShareName).Create(ctx, nil) // If it doesn't work out, this will surely bubble up later anyway. It won't be long. + if err != nil { + glcm.Warn(fmt.Sprintf("Failed to create the missing destination container: %v", err)) + } + // At this point, we'll let the destination be written to with the original resource type, as it will get created in this transfer. + } else if err == nil && sourceIsDir != destIsDir { + // If the destination exists, and isn't blob though, we have to match resource types. + return nil, resourceMismatchError } // set up the filters in the right order @@ -129,7 +173,8 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s } // decide our folder transfer strategy - fpo, folderMessage := NewFolderPropertyOption(cca.fromTo, cca.recursive, true, filters, cca.preserveSMBInfo, cca.preservePermissions.IsTruthy(), false, strings.EqualFold(cca.destination.Value, common.Dev_Null), cca.includeDirectoryStubs) // sync always acts like stripTopDir=true + // sync always acts like stripTopDir=true, but if we intend to persist the root, we must tell NewFolderPropertyOption stripTopDir=false. + fpo, folderMessage := NewFolderPropertyOption(cca.fromTo, cca.recursive, !cca.includeRoot, filters, cca.preserveSMBInfo, cca.preservePermissions.IsTruthy(), false, strings.EqualFold(cca.destination.Value, common.Dev_Null), cca.includeDirectoryStubs) if !cca.dryrunMode { glcm.Info(folderMessage) } @@ -328,6 +373,7 @@ func IsDestinationCaseInsensitive(fromTo common.FromTo) bool { } else { return false } + } func quitIfInSync(transferJobInitiated, anyDestinationFileDeleted bool, cca *cookedSyncCmdArgs) { diff --git a/cmd/zc_enumerator.go b/cmd/zc_enumerator.go index a1fa87d59..e565c74fc 100644 --- a/cmd/zc_enumerator.go +++ b/cmd/zc_enumerator.go @@ -24,7 +24,11 @@ import ( "context" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -291,7 +295,7 @@ func newStoredObject(morpher objectMorpher, name string, relativePath string, en // pass each StoredObject to the given objectProcessor if it passes all the filters type ResourceTraverser interface { Traverse(preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) error - IsDirectory(isSource bool) (bool, error) + IsDirectory(isSource bool) (isDirectory bool, err error) // isDirectory has an isSource flag for a single exception to blob. // Blob should ONLY check remote if it's a source. // On destinations, because blobs and virtual directories can share names, we should support placing in both ways. @@ -739,10 +743,23 @@ func newSyncEnumerator(primaryTraverser, secondaryTraverser ResourceTraverser, i } func (e *syncEnumerator) enumerate() (err error) { + handleAcceptableErrors := func() { + switch { + case err == nil: // don't do any error checking + case fileerror.HasCode(err, fileerror.ResourceNotFound), + datalakeerror.HasCode(err, datalakeerror.ResourceNotFound), + bloberror.HasCode(err, bloberror.BlobNotFound), + strings.Contains(err.Error(), "The system cannot find the"), + errors.Is(err, os.ErrNotExist): + err = nil // Oh no! Oh well. We'll create it later. + } + } + // enumerate the primary resource and build lookup map err = e.primaryTraverser.Traverse(noPreProccessor, e.objectIndexer.store, e.filters) + handleAcceptableErrors() if err != nil { - return + return err } // enumerate the secondary resource and as the objects pass the filters @@ -750,6 +767,7 @@ func (e *syncEnumerator) enumerate() (err error) { // which can process given objects based on what's already indexed // note: transferring can start while scanning is ongoing err = e.secondaryTraverser.Traverse(noPreProccessor, e.objectComparator, e.filters) + handleAcceptableErrors() if err != nil { return } diff --git a/cmd/zc_traverser_blob.go b/cmd/zc_traverser_blob.go index 9a1fd9689..0ee4bdcc2 100644 --- a/cmd/zc_traverser_blob.go +++ b/cmd/zc_traverser_blob.go @@ -71,14 +71,34 @@ type blobTraverser struct { isDFS bool } -func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { +var NonErrorDirectoryStubOverlappable = errors.New("The directory stub exists, and can overlap.") + +func (t *blobTraverser) IsDirectory(isSource bool) (isDirectory bool, err error) { isDirDirect := copyHandlerUtil{}.urlIsContainerOrVirtualDirectory(t.rawURL) + blobURLParts, err := blob.ParseURL(t.rawURL) + if err != nil { + return false, err + } + // Skip the single blob check if we're checking a destination. // This is an individual exception for blob because blob supports virtual directories and blobs sharing the same name. // On HNS accounts, we would still perform this test. The user may have provided directory name without path-separator if isDirDirect { // a container or a path ending in '/' is always directory - return true, nil + if blobURLParts.ContainerName != "" && blobURLParts.BlobName == "" { + // If it's a container, let's ensure that container exists. Listing is a safe assumption to be valid, because how else would we enumerate? + containerClient := t.serviceClient.NewContainerClient(blobURLParts.ContainerName) + p := containerClient.NewListBlobsFlatPager(nil) + _, err = p.NextPage(t.ctx) + + if bloberror.HasCode(err, bloberror.AuthorizationPermissionMismatch) { + // Maybe we don't have the ability to list? Can we get container properties as a fallback? + _, propErr := containerClient.GetProperties(t.ctx, nil) + err = common.Iff(propErr == nil, nil, err) + } + } + + return true, err } if !isSource && !t.isDFS { // destination on blob endpoint. If it does not end in '/' it is a file @@ -98,10 +118,6 @@ func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { return isDirStub, nil } - blobURLParts, err := blob.ParseURL(t.rawURL) - if err != nil { - return false, err - } containerClient := t.serviceClient.NewContainerClient(blobURLParts.ContainerName) searchPrefix := strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) + common.AZCOPY_PATH_SEPARATOR_STRING maxResults := int32(1) @@ -116,12 +132,8 @@ func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { } if len(resp.Segment.BlobItems) == 0 { - // Not a directory - // If the blob is not found return the error to throw - if bloberror.HasCode(blobErr, bloberror.BlobNotFound) { - return false, errors.New(common.FILE_NOT_FOUND) - } - return false, blobErr + // Not a directory, but there was also no file on site. Therefore, there's nothing. + return false, errors.New(common.FILE_NOT_FOUND) } return true, nil diff --git a/cmd/zc_traverser_blob_account.go b/cmd/zc_traverser_blob_account.go index 458c7c009..8adc5497a 100644 --- a/cmd/zc_traverser_blob_account.go +++ b/cmd/zc_traverser_blob_account.go @@ -49,7 +49,7 @@ type blobAccountTraverser struct { excludeContainerName []ObjectFilter } -func (t *blobAccountTraverser) IsDirectory(_ bool) (bool, error) { +func (t *blobAccountTraverser) IsDirectory(isSource bool) (bool, error) { return true, nil // Returns true as account traversal is inherently folder-oriented and recursive. } diff --git a/cmd/zc_traverser_file.go b/cmd/zc_traverser_file.go index 9277b741f..1221e4896 100644 --- a/cmd/zc_traverser_file.go +++ b/cmd/zc_traverser_file.go @@ -81,7 +81,16 @@ func createFileClientFromServiceClient(fileURLParts file.URLParts, client *servi func (t *fileTraverser) IsDirectory(bool) (bool, error) { // Azure file share case if gCopyUtil.urlIsContainerOrVirtualDirectory(t.rawURL) { - return true, nil + // Let's at least test if it exists, that way we toss an error. + fileURLParts, err := file.ParseURL(t.rawURL) + if err != nil { + return true, err + } + directoryClient := t.serviceClient.NewShareClient(fileURLParts.ShareName) + p := directoryClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil) + _, err = p.NextPage(t.ctx) + + return true, err } // Need make request to ensure if it's directory @@ -95,7 +104,7 @@ func (t *fileTraverser) IsDirectory(bool) (bool, error) { if azcopyScanningLogger != nil { azcopyScanningLogger.Log(common.LogWarning, fmt.Sprintf("Failed to check if the destination is a folder or a file (Azure Files). Assuming the destination is a file: %s", err)) } - return false, nil + return false, err } return true, nil @@ -284,7 +293,7 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro for pager.More() { lResp, err := pager.NextPage(t.ctx) if err != nil { - return fmt.Errorf("cannot list files due to reason %s", err) + return fmt.Errorf("cannot list files due to reason %w", err) } for _, fileInfo := range lResp.Segment.Files { if invalidBlobOrWindowsName(*fileInfo.Name) { diff --git a/cmd/zc_traverser_file_account.go b/cmd/zc_traverser_file_account.go index 7ff7e9b4e..46c170c06 100644 --- a/cmd/zc_traverser_file_account.go +++ b/cmd/zc_traverser_file_account.go @@ -37,8 +37,8 @@ type fileAccountTraverser struct { // a generic function to notify that a new stored object has been enumerated incrementEnumerationCounter enumerationCounterFunc - trailingDot common.TrailingDotOption - destination *common.Location + trailingDot common.TrailingDotOption + destination *common.Location } func (t *fileAccountTraverser) IsDirectory(isSource bool) (bool, error) { @@ -107,13 +107,13 @@ func (t *fileAccountTraverser) Traverse(preprocessor objectMorpher, processor ob func newFileAccountTraverser(serviceClient *service.Client, shareName string, ctx context.Context, getProperties bool, incrementEnumerationCounter enumerationCounterFunc, trailingDot common.TrailingDotOption, destination *common.Location) (t *fileAccountTraverser) { t = &fileAccountTraverser{ - ctx: ctx, + ctx: ctx, incrementEnumerationCounter: incrementEnumerationCounter, - serviceClient: serviceClient, - sharePattern: shareName, - getProperties: getProperties, - trailingDot: trailingDot, - destination: destination, + serviceClient: serviceClient, + sharePattern: shareName, + getProperties: getProperties, + trailingDot: trailingDot, + destination: destination, } return } diff --git a/cmd/zc_traverser_gcp.go b/cmd/zc_traverser_gcp.go index 15a657b48..647a1374e 100644 --- a/cmd/zc_traverser_gcp.go +++ b/cmd/zc_traverser_gcp.go @@ -35,7 +35,7 @@ func (t *gcpTraverser) IsDirectory(isSource bool) (bool, error) { //Directories do not have attributes and hence throw error _, err := obj.Attrs(t.ctx) if err == gcpUtils.ErrObjectNotExist { - return true, nil + return true, err } return false, nil } diff --git a/cmd/zc_traverser_local.go b/cmd/zc_traverser_local.go index 87a9bf659..c559308b3 100755 --- a/cmd/zc_traverser_local.go +++ b/cmd/zc_traverser_local.go @@ -45,7 +45,7 @@ const MAX_SYMLINKS_TO_FOLLOW = 40 type localTraverser struct { fullPath string recursive bool - stripTopDir bool + stripTopDir bool symlinkHandling common.SymlinkHandlingType appCtx context.Context // a generic function to notify that a new stored object has been enumerated @@ -53,7 +53,7 @@ type localTraverser struct { errorChannel chan ErrorFileInfo targetHashType common.SyncHashType - hashAdapter common.HashDataAdapter + hashAdapter common.HashDataAdapter // receives fullPath entries and manages hashing of files lacking metadata. hashTargetChannel chan string } @@ -287,7 +287,7 @@ func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath result, err := UnfurlSymlinks(filePath) if err != nil { - err = fmt.Errorf("Failed to resolve symlink %s: %s", filePath, err.Error()) + err = fmt.Errorf("failed to resolve symlink %s: %w", filePath, err) WarnStdoutAndScanningLog(err.Error()) writeToErrorChannel(errorChannel, ErrorFileInfo{FilePath: filePath, FileInfo: fileInfo, ErrorMsg: err}) return nil @@ -295,7 +295,7 @@ func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath result, err = filepath.Abs(result) if err != nil { - err = fmt.Errorf("Failed to get absolute path of symlink result %s: %s", filePath, err.Error()) + err = fmt.Errorf("failed to get absolute path of symlink result %s: %w", filePath, err) WarnStdoutAndScanningLog(err.Error()) writeToErrorChannel(errorChannel, ErrorFileInfo{FilePath: filePath, FileInfo: fileInfo, ErrorMsg: err}) return nil @@ -303,7 +303,7 @@ func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath slPath, err := filepath.Abs(filePath) if err != nil { - err = fmt.Errorf("Failed to get absolute path of %s: %s", filePath, err.Error()) + err = fmt.Errorf("failed to get absolute path of %s: %w", filePath, err) WarnStdoutAndScanningLog(err.Error()) writeToErrorChannel(errorChannel, ErrorFileInfo{FilePath: filePath, FileInfo: fileInfo, ErrorMsg: err}) return nil @@ -311,7 +311,7 @@ func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath rStat, err := os.Stat(result) if err != nil { - err = fmt.Errorf("Failed to get properties of symlink target at %s: %s", result, err.Error()) + err = fmt.Errorf("failed to get properties of symlink target at %s: %w", result, err) WarnStdoutAndScanningLog(err.Error()) writeToErrorChannel(errorChannel, ErrorFileInfo{FilePath: filePath, FileInfo: fileInfo, ErrorMsg: err}) return nil @@ -354,7 +354,7 @@ func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath result, err := filepath.Abs(filePath) if err != nil { - err = fmt.Errorf("Failed to get absolute path of %s: %s", filePath, err.Error()) + err = fmt.Errorf("failed to get absolute path of %s: %w", filePath, err) WarnStdoutAndScanningLog(err.Error()) writeToErrorChannel(errorChannel, ErrorFileInfo{FilePath: filePath, FileInfo: fileInfo, ErrorMsg: err}) return nil @@ -513,7 +513,7 @@ func (t *localTraverser) prepareHashingThreads(preprocessor objectMorpher, proce fullPath := filepath.Join(t.fullPath, relPath) fi, err := os.Stat(fullPath) // query LMT & if it's a directory if err != nil { - err = fmt.Errorf("failed to get properties of file result %s: %s", relPath, err.Error()) + err = fmt.Errorf("failed to get properties of file result %s: %w", relPath, err) hashError <- err return } @@ -524,7 +524,7 @@ func (t *localTraverser) prepareHashingThreads(preprocessor objectMorpher, proce f, err := os.OpenFile(fullPath, os.O_RDONLY, 0644) // perm is not used here since it's RO if err != nil { - err = fmt.Errorf("failed to open file for reading result %s: %s", relPath, err.Error()) + err = fmt.Errorf("failed to open file for reading result %s: %w", relPath, err) hashError <- err return } @@ -538,7 +538,7 @@ func (t *localTraverser) prepareHashingThreads(preprocessor objectMorpher, proce // hash.Hash provides a writer type, allowing us to do a (small, 32MB to be precise) buffered write into the hasher and avoid memory concerns _, err = io.Copy(hasher, f) if err != nil { - err = fmt.Errorf("failed to read file into hasher result %s: %s", relPath, err.Error()) + err = fmt.Errorf("failed to read file into hasher result %s: %w", relPath, err) hashError <- err return } @@ -644,7 +644,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr // it fails here if file does not exist if err != nil { azcopyScanningLogger.Log(common.LogError, fmt.Sprintf("Failed to scan path %s: %s", t.fullPath, err.Error())) - return fmt.Errorf("failed to scan path %s due to %s", t.fullPath, err.Error()) + return fmt.Errorf("failed to scan path %s due to %w", t.fullPath, err) } finalizer, hashingProcessor := t.prepareHashingThreads(preprocessor, processor, filters) @@ -790,7 +790,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr preprocessor, entry.Name(), strings.ReplaceAll(relativePath, common.DeterminePathSeparator(t.fullPath), common.AZCOPY_PATH_SEPARATOR_STRING), // Consolidate relative paths to the azcopy path separator for sync - entityType, // TODO: add code path for folders + entityType, // TODO: add code path for folders fileInfo.ModTime(), fileInfo.Size(), noContentProps, // Local MD5s are computed in the STE, and other props don't apply to local files @@ -829,8 +829,8 @@ func newLocalTraverser(ctx context.Context, fullPath string, recursive bool, str incrementEnumerationCounter: incrementEnumerationCounter, errorChannel: errorChannel, targetHashType: syncHashType, - hashAdapter: hashAdapter, - stripTopDir: stripTopDir, + hashAdapter: hashAdapter, + stripTopDir: stripTopDir, } return &traverser, nil } diff --git a/cmd/zc_traverser_s3.go b/cmd/zc_traverser_s3.go index 825ad4fcb..a35d778ba 100644 --- a/cmd/zc_traverser_s3.go +++ b/cmd/zc_traverser_s3.go @@ -57,7 +57,7 @@ func (t *s3Traverser) IsDirectory(isSource bool) (bool, error) { _, err := t.s3Client.StatObject(t.s3URLParts.BucketName, t.s3URLParts.ObjectKey, minio.StatObjectOptions{}) if err != nil { - return true, nil + return true, err } return false, nil diff --git a/cmd/zt_sync_blob_blob_test.go b/cmd/zt_sync_blob_blob_test.go index 418bf2524..70ea14d8f 100644 --- a/cmd/zt_sync_blob_blob_test.go +++ b/cmd/zt_sync_blob_blob_test.go @@ -457,23 +457,23 @@ func TestSyncS2SMismatchContainerAndBlob(t *testing.T) { dstBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, dstContainerName, singleBlobName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstBlobURLWithSAS.String()) - // type mismatch, we should get an error + // type mismatch, we should not get an error runSyncAndVerify(a, raw, func(err error) { - a.NotNil(err) + a.Nil(err) // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) + a.Equal(len(mockedRPC.transfers), len(blobList)) }) // reverse the source and destination raw = getDefaultSyncRawInput(dstBlobURLWithSAS.String(), srcContainerURLWithSAS.String()) - // type mismatch again, we should also get an error + // type mismatch again, we should also not get an error runSyncAndVerify(a, raw, func(err error) { - a.NotNil(err) + a.Nil(err) // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) + a.Equal(len(mockedRPC.transfers), len(blobList)) }) } diff --git a/cmd/zt_sync_blob_local_test.go b/cmd/zt_sync_blob_local_test.go index 54e7ee1ae..470ef5214 100644 --- a/cmd/zt_sync_blob_local_test.go +++ b/cmd/zt_sync_blob_local_test.go @@ -433,103 +433,10 @@ func TestSyncDownloadWithMissingDestination(t *testing.T) { runSyncAndVerify(a, raw, func(err error) { // error should not be nil, but the app should not crash either - a.NotNil(err) - - // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) - }) -} - -// there is a type mismatch between the source and destination -func TestSyncMismatchContainerAndFile(t *testing.T) { - a := assert.New(t) - bsc := getBlobServiceClient() - - // set up the container with numerous blobs - cc, containerName := createNewContainer(a, bsc) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") - defer deleteContainer(a, cc) - a.NotNil(cc) - a.NotZero(len(blobList)) - - // set up the destination as a single file - dstDirName := scenarioHelper{}.generateLocalDirectory(a) - defer os.RemoveAll(dstDirName) - dstFileName := blobList[0] - scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobList) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) - raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), filepath.Join(dstDirName, dstFileName)) - - // type mismatch, we should get an error - runSyncAndVerify(a, raw, func(err error) { - a.NotNil(err) - - // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) - }) - - // reverse the source and destination - raw = getDefaultSyncRawInput(filepath.Join(dstDirName, dstFileName), rawContainerURLWithSAS.String()) - - // type mismatch, we should get an error - runSyncAndVerify(a, raw, func(err error) { - a.NotNil(err) - - // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) - }) -} - -// there is a type mismatch between the source and destination -func TestSyncMismatchBlobAndDirectory(t *testing.T) { - a := assert.New(t) - bsc := getBlobServiceClient() - - // set up the container with a single blob - blobName := "singleblobisbest" - blobList := []string{blobName} - cc, containerName := createNewContainer(a, bsc) - scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) - defer deleteContainer(a, cc) - a.NotNil(cc) - - // set up the destination as a directory - dstDirName := scenarioHelper{}.generateLocalDirectory(a) - defer os.RemoveAll(dstDirName) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) - raw := getDefaultSyncRawInput(rawBlobURLWithSAS.String(), dstDirName) - - // type mismatch, we should get an error - runSyncAndVerify(a, raw, func(err error) { - a.NotNil(err) - - // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) - }) - - // reverse the source and destination - raw = getDefaultSyncRawInput(dstDirName, rawBlobURLWithSAS.String()) - - // type mismatch, we should get an error - runSyncAndVerify(a, raw, func(err error) { - a.NotNil(err) + a.Nil(err) // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) + a.Equal(len(mockedRPC.transfers), len(blobList), "Expected to transfer the container's worth of blobs") }) } diff --git a/cmd/zt_sync_comparator_test.go b/cmd/zt_sync_comparator_test.go index c84cfcca5..ad7413371 100644 --- a/cmd/zt_sync_comparator_test.go +++ b/cmd/zt_sync_comparator_test.go @@ -384,53 +384,6 @@ func TestFileSyncS2SWithIncludeAndExcludeFlag(t *testing.T) { // }) // } -// there is a type mismatch between the source and destination -func TestFileSyncS2SMismatchShareAndFile(t *testing.T) { - a := assert.New(t) - fsc := getFileServiceClient() - srcShareClient, srcShareName := createNewShare(a, fsc) - dstShareClient, dstShareName := createNewShare(a, fsc) - defer deleteShare(a, srcShareClient) - defer deleteShare(a, dstShareClient) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") - a.NotZero(len(fileList)) - - // set up the destination share with a single file - singleFileName := "single" - scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, []string{singleFileName}) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) - dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, dstShareName, singleFileName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstFileURLWithSAS.String()) - - // type mismatch, we should get an error - runSyncAndVerify(a, raw, func(err error) { - a.NotNil(err) - - // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) - }) - - // reverse the source and destination - raw = getDefaultSyncRawInput(dstFileURLWithSAS.String(), srcShareURLWithSAS.String()) - - // type mismatch again, we should also get an error - runSyncAndVerify(a, raw, func(err error) { - a.NotNil(err) - - // validate that the right number of transfers were scheduled - a.Zero(len(mockedRPC.transfers)) - }) -} - // share <-> dir sync func TestFileSyncS2SShareAndEmptyDir(t *testing.T) { a := assert.New(t) diff --git a/common/oauthTokenManager.go b/common/oauthTokenManager.go index 794aa50ab..9b7b881df 100644 --- a/common/oauthTokenManager.go +++ b/common/oauthTokenManager.go @@ -267,7 +267,7 @@ func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint s func (uotm *UserOAuthTokenManager) getCachedTokenInfo(ctx context.Context) (*OAuthTokenInfo, error) { hasToken, err := uotm.credCache.HasCachedToken() if err != nil { - return nil, fmt.Errorf("no cached token found, please log in with azcopy's login command, %v", err) + return nil, fmt.Errorf("no cached token found, please log in with azcopy's login command, %w", err) } if !hasToken { return nil, errors.New("no cached token found, please log in with azcopy's login command") @@ -275,12 +275,12 @@ func (uotm *UserOAuthTokenManager) getCachedTokenInfo(ctx context.Context) (*OAu tokenInfo, err := uotm.credCache.LoadToken() if err != nil { - return nil, fmt.Errorf("get cached token failed, %v", err) + return nil, fmt.Errorf("get cached token failed, %w", err) } freshToken, err := tokenInfo.Refresh(ctx) if err != nil { - return nil, fmt.Errorf("get cached token failed to ensure token fresh, please log in with azcopy's login command again, %v", err) + return nil, fmt.Errorf("get cached token failed to ensure token fresh, please log in with azcopy's login command again, %w", err) } // Update token cache, if token is updated. @@ -352,13 +352,13 @@ func (uotm *UserOAuthTokenManager) getTokenInfoFromEnvVar(ctx context.Context) ( tokenInfo, err := jsonToTokenInfo([]byte(rawToken)) if err != nil { - return nil, fmt.Errorf("get token from environment variable failed to unmarshal token, %v", err) + return nil, fmt.Errorf("get token from environment variable failed to unmarshal token, %w", err) } if tokenInfo.LoginType != EAutoLoginType.TokenStore() { refreshedToken, err := tokenInfo.Refresh(ctx) if err != nil { - return nil, fmt.Errorf("get token from environment variable failed to ensure token fresh, %v", err) + return nil, fmt.Errorf("get token from environment variable failed to ensure token fresh, %w", err) } tokenInfo.Token = *refreshedToken } @@ -541,12 +541,12 @@ func (tsc *TokenStoreCredential) GetToken(_ context.Context, _ policy.TokenReque defer tsc.lock.Unlock() hasToken, err := tokenStoreCredCache.HasCachedToken() if err != nil || !hasToken { - return azcore.AccessToken{}, fmt.Errorf("no cached token found in Token Store Mode(SE), %v", err) + return azcore.AccessToken{}, fmt.Errorf("no cached token found in Token Store Mode(SE), %w", err) } tokenInfo, err := tokenStoreCredCache.LoadToken() if err != nil { - return azcore.AccessToken{}, fmt.Errorf("get cached token failed in Token Store Mode(SE), %v", err) + return azcore.AccessToken{}, fmt.Errorf("get cached token failed in Token Store Mode(SE), %w", err) } tsc.token = &azcore.AccessToken{ diff --git a/e2etest/newe2e_generic_wrangling.go b/e2etest/newe2e_generic_wrangling.go index 9aaa362ab..87f61e5cc 100644 --- a/e2etest/newe2e_generic_wrangling.go +++ b/e2etest/newe2e_generic_wrangling.go @@ -160,3 +160,15 @@ func ClonePointer[T any](in *T) *T { return &out } + +func JoinMap[K comparable, V any](in ...map[K]V) map[K]V { + out := map[K]V{} + + for _, dict := range in { + for k, v := range dict { + out[k] = v + } + } + + return out +} diff --git a/e2etest/newe2e_resource_managers_file.go b/e2etest/newe2e_resource_managers_file.go index 8534e513b..2a0e7368d 100644 --- a/e2etest/newe2e_resource_managers_file.go +++ b/e2etest/newe2e_resource_managers_file.go @@ -517,6 +517,13 @@ func (f *FileObjectResourceManager) Create(a Asserter, body ObjectContentContain FilePermissions: perms, Metadata: props.Metadata, }) + // This is fine. Instead let's set properties. + if fileerror.HasCode(err, fileerror.ResourceAlreadyExists) { + err = nil + + f.SetObjectProperties(a, props) + } + a.NoError("Create directory", err) default: a.Error("File Objects only support Files and Folders") diff --git a/e2etest/newe2e_resource_managers_local.go b/e2etest/newe2e_resource_managers_local.go index 901ca42d6..08f4d572a 100644 --- a/e2etest/newe2e_resource_managers_local.go +++ b/e2etest/newe2e_resource_managers_local.go @@ -274,15 +274,21 @@ func (l *LocalObjectResourceManager) Create(a Asserter, body ObjectContentContai a.AssertNow("Object must be file to have content", Equal{}) l.CreateParents(a) - f, err := os.OpenFile(l.getWorkingPath(), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0774) - a.NoError("Open file", err) - defer func(f *os.File) { - err := f.Close() - a.NoError("Close file", err) - }(f) - _, err = io.Copy(f, body.Reader()) - a.NoError("Write file", err) + if l.entityType == common.EEntityType.File() { + f, err := os.OpenFile(l.getWorkingPath(), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0774) + a.NoError("Open file", err) + defer func(f *os.File) { + err := f.Close() + a.NoError("Close file", err) + }(f) + + _, err = io.Copy(f, body.Reader()) + a.NoError("Write file", err) + } else if l.entityType == common.EEntityType.Folder() { + err := os.Mkdir(l.getWorkingPath(), 0775) + a.NoError("Mkdir", err) + } l.SetObjectProperties(a, properties) diff --git a/e2etest/newe2e_task_runazcopy_parameters.go b/e2etest/newe2e_task_runazcopy_parameters.go index 633d4fa23..db525dd40 100644 --- a/e2etest/newe2e_task_runazcopy_parameters.go +++ b/e2etest/newe2e_task_runazcopy_parameters.go @@ -383,6 +383,7 @@ type SyncFlags struct { LocalHashStorageMode *common.HashStorageMode `flag:"local-hash-storage-mode"` // The real flag name is not all that great due to `delete-destination`, but, it works. DeleteIfNecessary *bool `flag:"delete-destination-file"` + IncludeRoot *bool `flag:"include-root"` } // RemoveFlags is not tiered like CopySyncCommonFlags is, because it is dissimilar in functionality, and would be hard to test in the same scenario. diff --git a/e2etest/zt_newe2e_fns_dir_test.go b/e2etest/zt_newe2e_fns_dir_test.go index 6515e7a2f..567ab2c55 100644 --- a/e2etest/zt_newe2e_fns_dir_test.go +++ b/e2etest/zt_newe2e_fns_dir_test.go @@ -202,3 +202,72 @@ func (*FNSSuite) Scenario_SyncTrailingSlashDeletion(a *ScenarioVariationManager) }, }, false) } + +func (*FNSSuite) Scenario_SyncOverlap(a *ScenarioVariationManager) { + // Sync must be capable of mirroring a source resource type onto a blob destination. + + // Define our scenario up front; reduce complexity in debugging. + srcLoc := ResolveVariation(a, []common.Location{common.ELocation.File(), common.ELocation.Blob(), common.ELocation.BlobFS(), common.ELocation.Local()}) + dstLoc := common.ELocation.Blob() + a.InsertVariationSeparator("->Blob|Overwrite:") + + const ( + DestTypeStandardStub = iota + DestTypeOverlapStup + ) + + dstType := NamedResolveVariation(a, map[string]uint{ + "StandardStub": DestTypeStandardStub, + "TrailingSlashStub": DestTypeOverlapStup, + }) + + // Set up the resource maps + srcMap := ObjectResourceMappingFlat{ + "foo": ResourceDefinitionObject{}, + } + + dstMap := map[uint]ObjectResourceMappingFlat{ + DestTypeStandardStub: { + "foo": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: common.EEntityType.Folder(), + }, + }, + }, + DestTypeOverlapStup: { + "foo/": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: common.EEntityType.Folder(), + }, + }, + }, + }[dstType] + + // Spin up the resources + src := CreateResource(a, GetRootResource(a, srcLoc), ResourceDefinitionContainer{ + Objects: srcMap, + }) + + dst := CreateResource(a, GetRootResource(a, dstLoc), ResourceDefinitionContainer{ + Objects: dstMap, + }) + + // Execute the test + RunAzCopy(a, AzCopyCommand{ + Verb: AzCopyVerbSync, + Targets: []ResourceManager{ + src.GetObject(a, "foo", common.EEntityType.File()), + dst.GetObject(a, "foo", common.EEntityType.File()), + }, + Flags: SyncFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + Recursive: pointerTo(true), + IncludeDirectoryStubs: pointerTo(true), + }, + }, + }) + + ValidateResource(a, dst, ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat(JoinMap(dstMap, srcMap)), + }, false) +} diff --git a/e2etest/zt_newe2e_sync_test.go b/e2etest/zt_newe2e_sync_test.go index 5db3b004c..c16f99741 100644 --- a/e2etest/zt_newe2e_sync_test.go +++ b/e2etest/zt_newe2e_sync_test.go @@ -4,7 +4,9 @@ import ( "bytes" "encoding/base64" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + blobsas "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/google/uuid" "io/fs" "os" "path/filepath" @@ -155,8 +157,11 @@ func (s *SyncTestSuite) Scenario_TestSyncRemoveDestination(svm *ScenarioVariatio }) RunAzCopy(svm, AzCopyCommand{ - Verb: AzCopyVerbSync, - Targets: []ResourceManager{srcRes, dstRes}, + Verb: AzCopyVerbSync, + Targets: []ResourceManager{ + srcRes, + dstRes, + }, Flags: SyncFlags{ CopySyncCommonFlags: CopySyncCommonFlags{ Recursive: pointerTo(true), @@ -437,3 +442,105 @@ func (s *SyncTestSuite) Scenario_TestSyncHashTypeDestinationHash(svm *ScenarioVa }, }) } + +func (s *SyncTestSuite) Scenario_TestSyncCreateResources(a *ScenarioVariationManager) { + // Set up the scenario + a.InsertVariationSeparator("Blob->") + srcLoc := common.ELocation.Blob() + dstLoc := ResolveVariation(a, []common.Location{common.ELocation.Local(), common.ELocation.Blob(), common.ELocation.File(), common.ELocation.BlobFS()}) + a.InsertVariationSeparator("|Create:") + + const ( + CreateContainer = "Container" + CreateFolder = "Folder" + CreateObject = "Object" + ) + + resourceType := ResolveVariation(a, []string{CreateContainer, CreateFolder, CreateObject}) + + // Select source map + srcMap := map[string]ObjectResourceMappingFlat{ + CreateContainer: { + "foo": ResourceDefinitionObject{}, + }, + CreateFolder: { + "foo": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: common.EEntityType.Folder(), + }, + }, + "foo/bar": ResourceDefinitionObject{}, + }, + CreateObject: { + "foo": ResourceDefinitionObject{}, + }, + }[resourceType] + + // Create resources and targets + src := CreateResource(a, GetRootResource(a, srcLoc), ResourceDefinitionContainer{ + Objects: srcMap, + }) + srcTarget := map[string]ResourceManager{ + CreateContainer: src, + CreateFolder: src.GetObject(a, "foo", common.EEntityType.Folder()), + CreateObject: src.GetObject(a, "foo", common.EEntityType.File()), + }[resourceType] + + var dstTarget ResourceManager + var dst ContainerResourceManager + if dstLoc == common.ELocation.Local() { + dst = GetRootResource(a, dstLoc).(ContainerResourceManager) // No need to grab a container + } else { + dst = GetRootResource(a, dstLoc).(ServiceResourceManager).GetContainer(uuid.NewString()) + } + + if resourceType != CreateContainer { + dst.Create(a, ContainerProperties{}) + } + + dstTarget = map[string]ResourceManager{ + CreateContainer: dst, + CreateFolder: dst.GetObject(a, "foo", common.EEntityType.File()), // Intentionally don't end with a trailing slash, so Sync has to pick that up for us. + CreateObject: dst.GetObject(a, "foo", common.EEntityType.File()), + }[resourceType] + + // Run the test for realsies. + RunAzCopy(a, AzCopyCommand{ + Verb: AzCopyVerbSync, + Targets: []ResourceManager{ + srcTarget, + AzCopyTarget{ + ResourceManager: dstTarget, + AuthType: EExplicitCredentialType.SASToken(), + Opts: CreateAzCopyTargetOptions{ + SASTokenOptions: GenericAccountSignatureValues{ + Permissions: (&blobsas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Add: true, + Create: true, + }).String(), + ResourceTypes: (&blobsas.AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }).String(), + }, + }, + }, + }, + Flags: SyncFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + Recursive: pointerTo(true), + IncludeDirectoryStubs: pointerTo(true), + }, + IncludeRoot: pointerTo(true), + }, + }) + + ValidateResource(a, dst, ResourceDefinitionContainer{ + Objects: srcMap, + }, false) +} diff --git a/ste/JobPartPlanFileName.go b/ste/JobPartPlanFileName.go index 18d8a0e41..4a6e3705a 100644 --- a/ste/JobPartPlanFileName.go +++ b/ste/JobPartPlanFileName.go @@ -46,8 +46,8 @@ func (jpfn JobPartPlanFileName) Parse() (jobID common.JobID, partNumber common.P jpfnSplit := strings.Split(string(jpfn), "--") jobId, err := common.ParseJobID(jpfnSplit[0]) if err != nil { - err = fmt.Errorf("failed to parse the JobId from JobPartFileName %s. Failed with error %s", string(jpfn), err.Error()) //nolint:staticcheck - // TODO: return here on error? or ignore + err = fmt.Errorf("failed to parse the JobId from JobPartFileName %s. Failed with error %w", string(jpfn), err) //nolint:staticcheck + common.GetLifecycleMgr().Warn(err.Error()) } jobID = jobId n, err := fmt.Sscanf(jpfnSplit[1], "%05d.steV%d", &partNumber, &dataSchemaVersion) @@ -144,7 +144,7 @@ func (jpfn JobPartPlanFileName) Create(order common.CopyJobPartOrderRequest) { // planPathname := planDir + "/" + string(jpfn) file, err := os.Create(jpfn.GetJobPartPlanPath()) if err != nil { - panic(fmt.Errorf("couldn't create job part plan file %q: %v", jpfn, err)) + panic(fmt.Errorf("couldn't create job part plan file %q: %w", jpfn, err)) } defer file.Close()