Skip to content

Commit

Permalink
Merge pull request #1292 from buildpacks-community/0.9.x-ci-cherrypicks
Browse files Browse the repository at this point in the history
[0.9.x] more ci related backports
  • Loading branch information
chenbh authored Jul 26, 2023
2 parents bb11d8f + 7205ccc commit bd29085
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 19 deletions.
8 changes: 8 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -194,12 +194,20 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Docker Login
uses: docker/[email protected]
with:
registry: ${{ secrets.REGISTRY_HOST }}
username: ${{ secrets.REGISTRY_USER }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
- name: Build
run: |
trap 'echo -e "$output"' EXIT
output=$(go run ./hack/lifecycle/main.go --tag=${{ env.PUBLIC_IMAGE_DEV_REPO }}/lifecycle 2>&1)
image=$(echo "$output" | grep "saved lifecycle" | awk -F "saved lifecycle image: " '{print $2}')
mkdir images
Expand Down
3 changes: 0 additions & 3 deletions test/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ type config struct {
testRegistryUsername string
testRegistryPassword string
imageTag string

generatedImageNames []string
}

type dockerCredentials map[string]authn.AuthConfig
Expand Down Expand Up @@ -56,7 +54,6 @@ func loadConfig(t *testing.T) config {

func (c *config) newImageTag() string {
genTag := c.imageTag + "-" + strconv.Itoa(rand.Int())
c.generatedImageNames = append(c.generatedImageNames, genTag)
return genTag
}

Expand Down
55 changes: 39 additions & 16 deletions test/execute_build_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,15 @@ func testCreateImage(t *testing.T, when spec.G, it spec.S) {
)

var (
cfg config
clients *clients
ctx = context.Background()
cfg config
clients *clients
ctx = context.Background()
builtImages map[string]struct{}
)

it.Before(func() {
cfg = loadConfig(t)
builtImages = map[string]struct{}{}

var err error
clients, err = newClients(t)
Expand Down Expand Up @@ -91,7 +93,7 @@ func testCreateImage(t *testing.T, when spec.G, it spec.S) {
})

it.After(func() {
for _, tag := range cfg.generatedImageNames {
for tag := range builtImages {
deleteImageTag(t, tag)
}
})
Expand Down Expand Up @@ -370,7 +372,7 @@ func testCreateImage(t *testing.T, when spec.G, it spec.S) {
}, metav1.CreateOptions{})
require.NoError(t, err)

validateImageCreate(t, clients, image, expectedResources)
builtImages[validateImageCreate(t, clients, image, expectedResources)] = struct{}{}
validateRebase(t, ctx, clients, image.Name, testNamespace)
})
}
Expand All @@ -386,7 +388,7 @@ func testCreateImage(t *testing.T, when spec.G, it spec.S) {
},
}

generateRebuild(&ctx, t, cfg, clients, volumeCacheConfig, testNamespace, clusterBuilderName, serviceAccountName)
builtImages[generateRebuild(&ctx, t, cfg, clients, volumeCacheConfig, testNamespace, clusterBuilderName, serviceAccountName)] = struct{}{}
})

it("can trigger rebuilds with registry cache", func() {
Expand All @@ -397,11 +399,11 @@ func testCreateImage(t *testing.T, when spec.G, it spec.S) {
Tag: cacheImageTag,
},
}
generateRebuild(&ctx, t, cfg, clients, registryCacheConfig, testNamespace, clusterBuilderName, serviceAccountName)
builtImages[generateRebuild(&ctx, t, cfg, clients, registryCacheConfig, testNamespace, clusterBuilderName, serviceAccountName)] = struct{}{}
})
}

func generateRebuild(ctx *context.Context, t *testing.T, cfg config, clients *clients, cacheConfig *buildapi.ImageCacheConfig, testNamespace, clusterBuilderName, serviceAccountName string) {
func generateRebuild(ctx *context.Context, t *testing.T, cfg config, clients *clients, cacheConfig *buildapi.ImageCacheConfig, testNamespace, clusterBuilderName, serviceAccountName string) string {
expectedResources := corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("1G"),
Expand Down Expand Up @@ -441,7 +443,7 @@ func generateRebuild(ctx *context.Context, t *testing.T, cfg config, clients *cl
}, metav1.CreateOptions{})
require.NoError(t, err)

validateImageCreate(t, clients, image, expectedResources)
originalImageTag := validateImageCreate(t, clients, image, expectedResources)

list, err := clients.client.KpackV1alpha2().Builds(testNamespace).List(*ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("image.kpack.io/image=%s", imageName),
Expand All @@ -461,6 +463,11 @@ func generateRebuild(ctx *context.Context, t *testing.T, cfg config, clients *cl
require.NoError(t, err)
return len(list.Items) == 2
}, 5*time.Second, 1*time.Minute)

rebuiltImageTag := validateImageCreate(t, clients, image, expectedResources)
require.Equal(t, originalImageTag, rebuiltImageTag)

return originalImageTag
}

func readNamespaceLabelsFromEnv() map[string]string {
Expand Down Expand Up @@ -500,33 +507,40 @@ func waitUntilReady(t *testing.T, ctx context.Context, clients *clients, objects
}
}

func validateImageCreate(t *testing.T, clients *clients, image *buildapi.Image, expectedResources corev1.ResourceRequirements) {
func validateImageCreate(t *testing.T, clients *clients, image *buildapi.Image, expectedResources corev1.ResourceRequirements) string {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

logTail := &bytes.Buffer{}
go func() {
err := logs.NewBuildLogsClient(clients.k8sClient).Tail(ctx, logTail, image.Name, "1", image.Namespace)
err := logs.NewBuildLogsClient(clients.k8sClient).TailImage(ctx, logTail, image.Name, image.Namespace)
require.NoError(t, err)
}()

t.Logf("Waiting for image '%s' to be created", image.Name)
waitUntilReady(t, ctx, clients, image)

registryClient := &registry.Client{}
_, _, err = registryClient.Fetch(authn.DefaultKeychain, image.Spec.Tag)
_, identifier, err := registryClient.Fetch(authn.DefaultKeychain, image.Spec.Tag)
require.NoError(t, err)

eventually(t, func() bool {
return strings.Contains(logTail.String(), "Build successful")
}, 1*time.Second, 10*time.Second)

buildList, err := clients.client.KpackV1alpha2().Builds(image.Namespace).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("image.kpack.io/image=%s", image.Name),
})
require.NoError(t, err)

podList, err := clients.k8sClient.CoreV1().Pods(image.Namespace).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("image.kpack.io/image=%s", image.Name),
})
require.NoError(t, err)

require.Len(t, podList.Items, 1)
require.Len(t, podList.Items, len(buildList.Items))

return identifier
}

func validateRebase(t *testing.T, ctx context.Context, clients *clients, imageName, testNamespace string) {
Expand Down Expand Up @@ -568,13 +582,22 @@ func validateRebase(t *testing.T, ctx context.Context, clients *clients, imageNa

func deleteImageTag(t *testing.T, deleteImageTag string) {
reference, err := name.ParseReference(deleteImageTag, name.WeakValidation)
require.NoError(t, err)
if err != nil {
t.Logf("error cleaning up: could not parse reference: %s", err)
return
}

authenticator, err := authn.DefaultKeychain.Resolve(reference.Context().Registry)
require.NoError(t, err)
if err != nil {
t.Logf("error cleaning up: could not resolve keychain to delete tag: %s", err)
return
}

err = remote.Delete(reference, remote.WithAuth(authenticator))
require.NoError(t, err)
if err != nil {
t.Logf("error cleaning up: could not delete reference: %s", err)
return
}
}

func deleteNamespace(t *testing.T, ctx context.Context, clients *clients, namespace string) {
Expand Down

0 comments on commit bd29085

Please sign in to comment.