Skip to content

Commit

Permalink
Merge pull request #1771 from Azure/dev
Browse files Browse the repository at this point in the history
10.15.0 Release
  • Loading branch information
zezha-msft authored May 10, 2022
2 parents 87e0ff7 + 9a3336c commit fa61205
Show file tree
Hide file tree
Showing 116 changed files with 2,662 additions and 2,507 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ testSuite/testSuite.exe
*.exe

#Python Dependancies
testSuite/test_suite_config.ini
test_suite_config.ini
testSuite/venv/*
testSuite/venv1/*
testSuite/scripts/__pycache__/utility.cpython-36.pyc
Expand Down
16 changes: 16 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,22 @@

# Change Log

## Version 10.15.0

### New features

1. Added support for OAuth forwarding when performing Blob -> Blob copy.
2. Allow users to dynamically change the bandwidth cap via messages through the STDIN.
3. GCS -> Blob is now GA.
4. Enable MinIO(S3) logs in DEBUG mode.
6. Upgraded Go version to 1.17.9.

### Bug fixes
1. Resolved alignment of atomicSuccessfulBytesInActiveFiles.
2. Fixed issue where last-write-time was still getting persisted even when --preserve-smb-info is false.
3. Fixed issue where concurrency was always AUTO for Azure Files despite explicit override.
4. Removed outdated load command following the deprecation of the cflsload package.

## Version 10.14.1

### Bug fixes
Expand Down
2 changes: 1 addition & 1 deletion azbfs/zc_filesystemRequestOptions.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ type ListPathsFilesystemOptions struct {
MaxResults *int32
// The continuation token to resume listing.
ContinuationToken *string
}
}
26 changes: 16 additions & 10 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ jobs:
env:
GO111MODULE: 'on'
inputs:
version: '1.16.14'
version: '1.17.9'

- script: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.43.0
Expand Down Expand Up @@ -94,19 +94,22 @@ jobs:
steps:
- task: GoTool@0
inputs:
version: '1.16.14'
version: '1.17.9'

# Running E2E Tests on Linux - AMD64
- script: |
set -e
GOARCH=amd64 GOOS=linux go build -o azcopy_linux_amd64
export AZCOPY_E2E_EXECUTABLE_PATH=$(pwd)/azcopy_linux_amd64
go test -timeout 20m -race -short -cover ./e2etest
go test -timeout 60m -race -short -cover ./e2etest
env:
AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY)
AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME)
AZCOPY_E2E_ACCOUNT_KEY_HNS: $(AZCOPY_E2E_ACCOUNT_KEY_HNS)
AZCOPY_E2E_ACCOUNT_NAME_HNS: $(AZCOPY_E2E_ACCOUNT_NAME_HNS)
AZCOPY_E2E_TENANT_ID: $(OAUTH_TENANT_ID)
AZCOPY_E2E_APPLICATION_ID: $(ACTIVE_DIRECTORY_APPLICATION_ID)
AZCOPY_E2E_CLIENT_SECRET: $(AZCOPY_SPA_CLIENT_SECRET)
AZCOPY_E2E_CLASSIC_ACCOUNT_NAME: $(AZCOPY_E2E_CLASSIC_ACCOUNT_NAME)
AZCOPY_E2E_CLASSIC_ACCOUNT_KEY: $(AZCOPY_E2E_CLASSIC_ACCOUNT_KEY)
CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY)
Expand All @@ -119,12 +122,15 @@ jobs:
go build -o $(System.DefaultWorkingDirectory)/azcopy_windows_amd64.exe
go build -o $(System.DefaultWorkingDirectory)/azcopy_windows_386.exe
echo 'starting E2E tests on windows'
go test -timeout 30m -race -cover -v ./e2etest
go test -timeout 60m -race -cover -v ./e2etest
env:
AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY)
AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME)
AZCOPY_E2E_ACCOUNT_KEY_HNS: $(AZCOPY_E2E_ACCOUNT_KEY_HNS)
AZCOPY_E2E_ACCOUNT_NAME_HNS: $(AZCOPY_E2E_ACCOUNT_NAME_HNS)
AZCOPY_E2E_TENANT_ID: $(OAUTH_TENANT_ID)
AZCOPY_E2E_APPLICATION_ID: $(ACTIVE_DIRECTORY_APPLICATION_ID)
AZCOPY_E2E_CLIENT_SECRET: $(AZCOPY_SPA_CLIENT_SECRET)
AZCOPY_E2E_CLASSIC_ACCOUNT_NAME: $(AZCOPY_E2E_CLASSIC_ACCOUNT_NAME)
AZCOPY_E2E_CLASSIC_ACCOUNT_KEY: $(AZCOPY_E2E_CLASSIC_ACCOUNT_KEY)
CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY)
Expand All @@ -139,12 +145,15 @@ jobs:
go build -o azcopy_darwin_amd64
echo 'starting E2E tests on mac-os'
export AZCOPY_E2E_EXECUTABLE_PATH=$(pwd)/azcopy_darwin_amd64
go test -timeout 30m -race -cover -v ./e2etest
go test -timeout 60m -race -cover -v ./e2etest
env:
AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY)
AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME)
AZCOPY_E2E_ACCOUNT_KEY_HNS: $(AZCOPY_E2E_ACCOUNT_KEY_HNS)
AZCOPY_E2E_ACCOUNT_NAME_HNS: $(AZCOPY_E2E_ACCOUNT_NAME_HNS)
AZCOPY_E2E_TENANT_ID: $(OAUTH_TENANT_ID)
AZCOPY_E2E_APPLICATION_ID: $(ACTIVE_DIRECTORY_APPLICATION_ID)
AZCOPY_E2E_CLIENT_SECRET: $(AZCOPY_SPA_CLIENT_SECRET)
AZCOPY_E2E_CLASSIC_ACCOUNT_NAME: $(AZCOPY_E2E_CLASSIC_ACCOUNT_NAME)
AZCOPY_E2E_CLASSIC_ACCOUNT_KEY: $(AZCOPY_E2E_CLASSIC_ACCOUNT_KEY)
CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY)
Expand All @@ -167,7 +176,7 @@ jobs:
- task: GoTool@0
name: 'Set_up_Golang'
inputs:
version: '1.16.14'
version: '1.17.9'
- task: DownloadSecureFile@1
name: ciGCSServiceAccountKey
displayName: 'Download GCS Service Account Key'
Expand All @@ -187,7 +196,7 @@ jobs:
# the set -e line is needed so that the unit tests failure would cause the job to fail properly
# "-check.v" (must be after package list) outputs timings
set -e
go test -timeout 45m -race -short -cover ./cmd ./common ./common/parallel ./ste ./azbfs ./sddl "-check.v"
go test -timeout 60m -race -short -cover ./cmd ./common ./common/parallel ./ste ./azbfs ./sddl "-check.v"
GOARCH=amd64 GOOS=linux go build -o azcopy_linux_amd64
name: 'Run_unit_tests'
env:
Expand All @@ -206,9 +215,6 @@ jobs:
export TEST_SUITE_EXECUTABLE_LOCATION=$(pwd)/test-validator
export TEST_DIRECTORY_PATH=$(pwd)/test-temp
# install the CLFSLoad extension
pip3 install clfsload
keyctl session test python ./testSuite/scripts/run.py
name: 'Run_smoke_tests'
env:
Expand Down
114 changes: 57 additions & 57 deletions cmd/copy.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/Azure/azure-storage-azcopy/v10/jobsAdmin"
"io"
"math"
"net/url"
Expand Down Expand Up @@ -66,7 +67,7 @@ type rawCopyCmdArgs struct {
src string
dst string
fromTo string
//blobUrlForRedirection string
// blobUrlForRedirection string

// new include/exclude only apply to file names
// implemented for remove (and sync) only
Expand Down Expand Up @@ -1312,9 +1313,9 @@ func (cca *CookedCopyCmdArgs) processRedirectionUpload(blobResource common.Resou
// dispatches the job order (in parts) to the storage engine
func (cca *CookedCopyCmdArgs) processCopyJobPartOrders() (err error) {
ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion)
// Make AUTO default for Azure Files since Azure Files throttles too easily.
if ste.JobsAdmin != nil && (cca.FromTo.From() == common.ELocation.File() || cca.FromTo.To() == common.ELocation.File()) {
ste.JobsAdmin.SetConcurrencySettingsToAuto()
// Make AUTO default for Azure Files since Azure Files throttles too easily unless user specified concurrency value
if jobsAdmin.JobsAdmin != nil && (cca.FromTo.From() == common.ELocation.File() || cca.FromTo.To() == common.ELocation.File()) && glcm.GetEnvironmentVariable(common.EEnvironmentVariable.ConcurrencyValue()) == "" {
jobsAdmin.JobsAdmin.SetConcurrencySettingsToAuto()
}

// Note: credential info here is only used by remove at the moment.
Expand Down Expand Up @@ -1556,6 +1557,55 @@ func (cca *CookedCopyCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (tot
// if json is not desired, and job is done, then we generate a special end message to conclude the job
duration := time.Now().Sub(cca.jobStartTime) // report the total run time of the job

var computeThroughput = func() float64 {
// compute the average throughput for the last time interval
bytesInMb := float64(float64(summary.BytesOverWire-cca.intervalBytesTransferred) / float64(base10Mega))
timeElapsed := time.Since(cca.intervalStartTime).Seconds()

// reset the interval timer and byte count
cca.intervalStartTime = time.Now()
cca.intervalBytesTransferred = summary.BytesOverWire

return common.Iffloat64(timeElapsed != 0, bytesInMb/timeElapsed, 0) * 8
}
glcm.Progress(func(format common.OutputFormat) string {
if format == common.EOutputFormat.Json() {
jsonOutput, err := json.Marshal(summary)
common.PanicIfErr(err)
return string(jsonOutput)
} else {
// abbreviated output for cleanup jobs
if cca.isCleanupJob {
return cleanupStatusString
}

// if json is not needed, then we generate a message that goes nicely on the same line
// display a scanning keyword if the job is not completely ordered
var scanningString = " (scanning...)"
if summary.CompleteJobOrdered {
scanningString = ""
}

throughput := computeThroughput()
throughputString := fmt.Sprintf("2-sec Throughput (Mb/s): %v", jobsAdmin.ToFixed(throughput, 4))
if throughput == 0 {
// As there would be case when no bits sent from local, e.g. service side copy, when throughput = 0, hide it.
throughputString = ""
}

// indicate whether constrained by disk or not
isBenchmark := cca.FromTo.From() == common.ELocation.Benchmark()
perfString, diskString := getPerfDisplayText(summary.PerfStrings, summary.PerfConstraint, duration, isBenchmark)

return fmt.Sprintf("%.1f %%, %v Done, %v Failed, %v Pending, %v Skipped, %v Total%s, %s%s%s",
summary.PercentComplete,
summary.TransfersCompleted,
summary.TransfersFailed,
summary.TotalTransfers-(summary.TransfersCompleted+summary.TransfersFailed+summary.TransfersSkipped),
summary.TransfersSkipped, summary.TotalTransfers, scanningString, perfString, throughputString, diskString)
}
})

if jobDone {
exitCode := cca.getSuccessExitCode()
if summary.TransfersFailed > 0 {
Expand Down Expand Up @@ -1585,7 +1635,7 @@ TotalBytesTransferred: %v
Final Job Status: %v%s%s
`,
summary.JobID.String(),
ste.ToFixed(duration.Minutes(), 4),
jobsAdmin.ToFixed(duration.Minutes(), 4),
summary.FileTransfers,
summary.FolderPropertyTransfers,
summary.TotalTransfers,
Expand All @@ -1603,7 +1653,7 @@ Final Job Status: %v%s%s
}

// log to job log
jobMan, exists := ste.JobsAdmin.JobMgr(summary.JobID)
jobMan, exists := jobsAdmin.JobsAdmin.JobMgr(summary.JobID)
if exists {
jobMan.Log(pipeline.LogInfo, logStats+"\n"+output)
}
Expand All @@ -1620,56 +1670,6 @@ Final Job Status: %v%s%s
}
}

var computeThroughput = func() float64 {
// compute the average throughput for the last time interval
bytesInMb := float64(float64(summary.BytesOverWire-cca.intervalBytesTransferred) / float64(base10Mega))
timeElapsed := time.Since(cca.intervalStartTime).Seconds()

// reset the interval timer and byte count
cca.intervalStartTime = time.Now()
cca.intervalBytesTransferred = summary.BytesOverWire

return common.Iffloat64(timeElapsed != 0, bytesInMb/timeElapsed, 0) * 8
}

glcm.Progress(func(format common.OutputFormat) string {
if format == common.EOutputFormat.Json() {
jsonOutput, err := json.Marshal(summary)
common.PanicIfErr(err)
return string(jsonOutput)
} else {
// abbreviated output for cleanup jobs
if cca.isCleanupJob {
return cleanupStatusString
}

// if json is not needed, then we generate a message that goes nicely on the same line
// display a scanning keyword if the job is not completely ordered
var scanningString = " (scanning...)"
if summary.CompleteJobOrdered {
scanningString = ""
}

throughput := computeThroughput()
throughputString := fmt.Sprintf("2-sec Throughput (Mb/s): %v", ste.ToFixed(throughput, 4))
if throughput == 0 {
// As there would be case when no bits sent from local, e.g. service side copy, when throughput = 0, hide it.
throughputString = ""
}

// indicate whether constrained by disk or not
isBenchmark := cca.FromTo.From() == common.ELocation.Benchmark()
perfString, diskString := getPerfDisplayText(summary.PerfStrings, summary.PerfConstraint, duration, isBenchmark)

return fmt.Sprintf("%.1f %%, %v Done, %v Failed, %v Pending, %v Skipped, %v Total%s, %s%s%s",
summary.PercentComplete,
summary.TransfersCompleted,
summary.TransfersFailed,
summary.TotalTransfers-(summary.TransfersCompleted+summary.TransfersFailed+summary.TransfersSkipped),
summary.TransfersSkipped, summary.TotalTransfers, scanningString, perfString, throughputString, diskString)
}
})

return
}

Expand Down Expand Up @@ -1763,7 +1763,7 @@ func init() {
cpCmd := &cobra.Command{
Use: "copy [source] [destination]",
Aliases: []string{"cp", "c"},
SuggestFor: []string{"cpy", "cy", "mv"}, //TODO why does message appear twice on the console
SuggestFor: []string{"cpy", "cy", "mv"}, // TODO why does message appear twice on the console
Short: copyCmdShortDescription,
Long: copyCmdLongDescription,
Example: copyCmdExample,
Expand Down
6 changes: 3 additions & 3 deletions cmd/copyEnumeratorHelper.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package cmd
import (
"fmt"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-azcopy/v10/ste"
"github.com/Azure/azure-storage-azcopy/v10/jobsAdmin"
"math/rand"
"strings"

Expand Down Expand Up @@ -85,8 +85,8 @@ func dispatchFinalPart(e *common.CopyJobPartOrderRequest, cca *CookedCopyCmdArgs
return fmt.Errorf("copy job part order with JobId %s and part number %d failed because %s", e.JobID, e.PartNum, resp.ErrorMsg)
}

if ste.JobsAdmin != nil {
ste.JobsAdmin.LogToJobLog(FinalPartCreatedMessage, pipeline.LogInfo)
if jobsAdmin.JobsAdmin != nil {
jobsAdmin.JobsAdmin.LogToJobLog(FinalPartCreatedMessage, pipeline.LogInfo)
}

// set the flag on cca, to indicate the enumeration is done
Expand Down
Loading

0 comments on commit fa61205

Please sign in to comment.